linux/drivers/infiniband/hw/hfi1/hfi.h
<<
>>
Prefs
   1#ifndef _HFI1_KERNEL_H
   2#define _HFI1_KERNEL_H
   3/*
   4 * Copyright(c) 2020 Cornelis Networks, Inc.
   5 * Copyright(c) 2015-2020 Intel Corporation.
   6 *
   7 * This file is provided under a dual BSD/GPLv2 license.  When using or
   8 * redistributing this file, you may do so under either license.
   9 *
  10 * GPL LICENSE SUMMARY
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * BSD LICENSE
  22 *
  23 * Redistribution and use in source and binary forms, with or without
  24 * modification, are permitted provided that the following conditions
  25 * are met:
  26 *
  27 *  - Redistributions of source code must retain the above copyright
  28 *    notice, this list of conditions and the following disclaimer.
  29 *  - Redistributions in binary form must reproduce the above copyright
  30 *    notice, this list of conditions and the following disclaimer in
  31 *    the documentation and/or other materials provided with the
  32 *    distribution.
  33 *  - Neither the name of Intel Corporation nor the names of its
  34 *    contributors may be used to endorse or promote products derived
  35 *    from this software without specific prior written permission.
  36 *
  37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48 *
  49 */
  50
  51#include <linux/interrupt.h>
  52#include <linux/pci.h>
  53#include <linux/dma-mapping.h>
  54#include <linux/mutex.h>
  55#include <linux/list.h>
  56#include <linux/scatterlist.h>
  57#include <linux/slab.h>
  58#include <linux/io.h>
  59#include <linux/fs.h>
  60#include <linux/completion.h>
  61#include <linux/kref.h>
  62#include <linux/sched.h>
  63#include <linux/cdev.h>
  64#include <linux/delay.h>
  65#include <linux/kthread.h>
  66#include <linux/i2c.h>
  67#include <linux/i2c-algo-bit.h>
  68#include <linux/xarray.h>
  69#include <rdma/ib_hdrs.h>
  70#include <rdma/opa_addr.h>
  71#include <linux/rhashtable.h>
  72#include <rdma/rdma_vt.h>
  73
  74#include "chip_registers.h"
  75#include "common.h"
  76#include "opfn.h"
  77#include "verbs.h"
  78#include "pio.h"
  79#include "chip.h"
  80#include "mad.h"
  81#include "qsfp.h"
  82#include "platform.h"
  83#include "affinity.h"
  84#include "msix.h"
  85
  86/* bumped 1 from s/w major version of TrueScale */
  87#define HFI1_CHIP_VERS_MAJ 3U
  88
  89/* don't care about this except printing */
  90#define HFI1_CHIP_VERS_MIN 0U
  91
  92/* The Organization Unique Identifier (Mfg code), and its position in GUID */
  93#define HFI1_OUI 0x001175
  94#define HFI1_OUI_LSB 40
  95
  96#define DROP_PACKET_OFF         0
  97#define DROP_PACKET_ON          1
  98
  99#define NEIGHBOR_TYPE_HFI               0
 100#define NEIGHBOR_TYPE_SWITCH    1
 101
 102#define HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES 5
 103
 104extern unsigned long hfi1_cap_mask;
 105#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
 106#define HFI1_CAP_UGET_MASK(mask, cap) \
 107        (((mask) >> HFI1_CAP_USER_SHIFT) & HFI1_CAP_##cap)
 108#define HFI1_CAP_KGET(cap) (HFI1_CAP_KGET_MASK(hfi1_cap_mask, cap))
 109#define HFI1_CAP_UGET(cap) (HFI1_CAP_UGET_MASK(hfi1_cap_mask, cap))
 110#define HFI1_CAP_IS_KSET(cap) (!!HFI1_CAP_KGET(cap))
 111#define HFI1_CAP_IS_USET(cap) (!!HFI1_CAP_UGET(cap))
 112#define HFI1_MISC_GET() ((hfi1_cap_mask >> HFI1_CAP_MISC_SHIFT) & \
 113                        HFI1_CAP_MISC_MASK)
 114/* Offline Disabled Reason is 4-bits */
 115#define HFI1_ODR_MASK(rsn) ((rsn) & OPA_PI_MASK_OFFLINE_REASON)
 116
 117/*
 118 * Control context is always 0 and handles the error packets.
 119 * It also handles the VL15 and multicast packets.
 120 */
 121#define HFI1_CTRL_CTXT    0
 122
 123/*
 124 * Driver context will store software counters for each of the events
 125 * associated with these status registers
 126 */
 127#define NUM_CCE_ERR_STATUS_COUNTERS 41
 128#define NUM_RCV_ERR_STATUS_COUNTERS 64
 129#define NUM_MISC_ERR_STATUS_COUNTERS 13
 130#define NUM_SEND_PIO_ERR_STATUS_COUNTERS 36
 131#define NUM_SEND_DMA_ERR_STATUS_COUNTERS 4
 132#define NUM_SEND_EGRESS_ERR_STATUS_COUNTERS 64
 133#define NUM_SEND_ERR_STATUS_COUNTERS 3
 134#define NUM_SEND_CTXT_ERR_STATUS_COUNTERS 5
 135#define NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS 24
 136
 137/*
 138 * per driver stats, either not device nor port-specific, or
 139 * summed over all of the devices and ports.
 140 * They are described by name via ipathfs filesystem, so layout
 141 * and number of elements can change without breaking compatibility.
 142 * If members are added or deleted hfi1_statnames[] in debugfs.c must
 143 * change to match.
 144 */
 145struct hfi1_ib_stats {
 146        __u64 sps_ints; /* number of interrupts handled */
 147        __u64 sps_errints; /* number of error interrupts */
 148        __u64 sps_txerrs; /* tx-related packet errors */
 149        __u64 sps_rcverrs; /* non-crc rcv packet errors */
 150        __u64 sps_hwerrs; /* hardware errors reported (parity, etc.) */
 151        __u64 sps_nopiobufs; /* no pio bufs avail from kernel */
 152        __u64 sps_ctxts; /* number of contexts currently open */
 153        __u64 sps_lenerrs; /* number of kernel packets where RHF != LRH len */
 154        __u64 sps_buffull;
 155        __u64 sps_hdrfull;
 156};
 157
 158extern struct hfi1_ib_stats hfi1_stats;
 159extern const struct pci_error_handlers hfi1_pci_err_handler;
 160
 161extern int num_driver_cntrs;
 162
 163/*
 164 * First-cut criterion for "device is active" is
 165 * two thousand dwords combined Tx, Rx traffic per
 166 * 5-second interval. SMA packets are 64 dwords,
 167 * and occur "a few per second", presumably each way.
 168 */
 169#define HFI1_TRAFFIC_ACTIVE_THRESHOLD (2000)
 170
 171/*
 172 * Below contains all data related to a single context (formerly called port).
 173 */
 174
 175struct hfi1_opcode_stats_perctx;
 176
 177struct ctxt_eager_bufs {
 178        struct eager_buffer {
 179                void *addr;
 180                dma_addr_t dma;
 181                ssize_t len;
 182        } *buffers;
 183        struct {
 184                void *addr;
 185                dma_addr_t dma;
 186        } *rcvtids;
 187        u32 size;                /* total size of eager buffers */
 188        u32 rcvtid_size;         /* size of each eager rcv tid */
 189        u16 count;               /* size of buffers array */
 190        u16 numbufs;             /* number of buffers allocated */
 191        u16 alloced;             /* number of rcvarray entries used */
 192        u16 threshold;           /* head update threshold */
 193};
 194
 195struct exp_tid_set {
 196        struct list_head list;
 197        u32 count;
 198};
 199
 200struct hfi1_ctxtdata;
 201typedef int (*intr_handler)(struct hfi1_ctxtdata *rcd, int data);
 202typedef void (*rhf_rcv_function_ptr)(struct hfi1_packet *packet);
 203
 204struct tid_queue {
 205        struct list_head queue_head;
 206                        /* queue head for QP TID resource waiters */
 207        u32 enqueue;    /* count of tid enqueues */
 208        u32 dequeue;    /* count of tid dequeues */
 209};
 210
 211struct hfi1_ctxtdata {
 212        /* rcvhdrq base, needs mmap before useful */
 213        void *rcvhdrq;
 214        /* kernel virtual address where hdrqtail is updated */
 215        volatile __le64 *rcvhdrtail_kvaddr;
 216        /* so functions that need physical port can get it easily */
 217        struct hfi1_pportdata *ppd;
 218        /* so file ops can get at unit */
 219        struct hfi1_devdata *dd;
 220        /* this receive context's assigned PIO ACK send context */
 221        struct send_context *sc;
 222        /* per context recv functions */
 223        const rhf_rcv_function_ptr *rhf_rcv_function_map;
 224        /*
 225         * The interrupt handler for a particular receive context can vary
 226         * throughout it's lifetime. This is not a lock protected data member so
 227         * it must be updated atomically and the prev and new value must always
 228         * be valid. Worst case is we process an extra interrupt and up to 64
 229         * packets with the wrong interrupt handler.
 230         */
 231        intr_handler do_interrupt;
 232        /** fast handler after autoactive */
 233        intr_handler fast_handler;
 234        /** slow handler */
 235        intr_handler slow_handler;
 236        /* napi pointer assiociated with netdev */
 237        struct napi_struct *napi;
 238        /* verbs rx_stats per rcd */
 239        struct hfi1_opcode_stats_perctx *opstats;
 240        /* clear interrupt mask */
 241        u64 imask;
 242        /* ctxt rcvhdrq head offset */
 243        u32 head;
 244        /* number of rcvhdrq entries */
 245        u16 rcvhdrq_cnt;
 246        u8 ireg;        /* clear interrupt register */
 247        /* receive packet sequence counter */
 248        u8 seq_cnt;
 249        /* size of each of the rcvhdrq entries */
 250        u8 rcvhdrqentsize;
 251        /* offset of RHF within receive header entry */
 252        u8 rhf_offset;
 253        /* dynamic receive available interrupt timeout */
 254        u8 rcvavail_timeout;
 255        /* Indicates that this is vnic context */
 256        bool is_vnic;
 257        /* vnic queue index this context is mapped to */
 258        u8 vnic_q_idx;
 259        /* Is ASPM interrupt supported for this context */
 260        bool aspm_intr_supported;
 261        /* ASPM state (enabled/disabled) for this context */
 262        bool aspm_enabled;
 263        /* Is ASPM processing enabled for this context (in intr context) */
 264        bool aspm_intr_enable;
 265        struct ctxt_eager_bufs egrbufs;
 266        /* QPs waiting for context processing */
 267        struct list_head qp_wait_list;
 268        /* tid allocation lists */
 269        struct exp_tid_set tid_group_list;
 270        struct exp_tid_set tid_used_list;
 271        struct exp_tid_set tid_full_list;
 272
 273        /* Timer for re-enabling ASPM if interrupt activity quiets down */
 274        struct timer_list aspm_timer;
 275        /* per-context configuration flags */
 276        unsigned long flags;
 277        /* array of tid_groups */
 278        struct tid_group  *groups;
 279        /* mmap of hdrq, must fit in 44 bits */
 280        dma_addr_t rcvhdrq_dma;
 281        dma_addr_t rcvhdrqtailaddr_dma;
 282        /* Last interrupt timestamp */
 283        ktime_t aspm_ts_last_intr;
 284        /* Last timestamp at which we scheduled a timer for this context */
 285        ktime_t aspm_ts_timer_sched;
 286        /* Lock to serialize between intr, timer intr and user threads */
 287        spinlock_t aspm_lock;
 288        /* Reference count the base context usage */
 289        struct kref kref;
 290        /* numa node of this context */
 291        int numa_id;
 292        /* associated msix interrupt. */
 293        s16 msix_intr;
 294        /* job key */
 295        u16 jkey;
 296        /* number of RcvArray groups for this context. */
 297        u16 rcv_array_groups;
 298        /* index of first eager TID entry. */
 299        u16 eager_base;
 300        /* number of expected TID entries */
 301        u16 expected_count;
 302        /* index of first expected TID entry. */
 303        u16 expected_base;
 304        /* Device context index */
 305        u8 ctxt;
 306
 307        /* PSM Specific fields */
 308        /* lock protecting all Expected TID data */
 309        struct mutex exp_mutex;
 310        /* lock protecting all Expected TID data of kernel contexts */
 311        spinlock_t exp_lock;
 312        /* Queue for QP's waiting for HW TID flows */
 313        struct tid_queue flow_queue;
 314        /* Queue for QP's waiting for HW receive array entries */
 315        struct tid_queue rarr_queue;
 316        /* when waiting for rcv or pioavail */
 317        wait_queue_head_t wait;
 318        /* uuid from PSM */
 319        u8 uuid[16];
 320        /* same size as task_struct .comm[], command that opened context */
 321        char comm[TASK_COMM_LEN];
 322        /* Bitmask of in use context(s) */
 323        DECLARE_BITMAP(in_use_ctxts, HFI1_MAX_SHARED_CTXTS);
 324        /* per-context event flags for fileops/intr communication */
 325        unsigned long event_flags;
 326        /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
 327        void *subctxt_uregbase;
 328        /* An array of pages for the eager receive buffers * N */
 329        void *subctxt_rcvegrbuf;
 330        /* An array of pages for the eager header queue entries * N */
 331        void *subctxt_rcvhdr_base;
 332        /* total number of polled urgent packets */
 333        u32 urgent;
 334        /* saved total number of polled urgent packets for poll edge trigger */
 335        u32 urgent_poll;
 336        /* Type of packets or conditions we want to poll for */
 337        u16 poll_type;
 338        /* non-zero if ctxt is being shared. */
 339        u16 subctxt_id;
 340        /* The version of the library which opened this ctxt */
 341        u32 userversion;
 342        /*
 343         * non-zero if ctxt can be shared, and defines the maximum number of
 344         * sub-contexts for this device context.
 345         */
 346        u8 subctxt_cnt;
 347
 348        /* Bit mask to track free TID RDMA HW flows */
 349        unsigned long flow_mask;
 350        struct tid_flow_state flows[RXE_NUM_TID_FLOWS];
 351};
 352
 353/**
 354 * rcvhdrq_size - return total size in bytes for header queue
 355 * @rcd: the receive context
 356 *
 357 * rcvhdrqentsize is in DWs, so we have to convert to bytes
 358 *
 359 */
 360static inline u32 rcvhdrq_size(struct hfi1_ctxtdata *rcd)
 361{
 362        return PAGE_ALIGN(rcd->rcvhdrq_cnt *
 363                          rcd->rcvhdrqentsize * sizeof(u32));
 364}
 365
 366/*
 367 * Represents a single packet at a high level. Put commonly computed things in
 368 * here so we do not have to keep doing them over and over. The rule of thumb is
 369 * if something is used one time to derive some value, store that something in
 370 * here. If it is used multiple times, then store the result of that derivation
 371 * in here.
 372 */
 373struct hfi1_packet {
 374        void *ebuf;
 375        void *hdr;
 376        void *payload;
 377        struct hfi1_ctxtdata *rcd;
 378        __le32 *rhf_addr;
 379        struct rvt_qp *qp;
 380        struct ib_other_headers *ohdr;
 381        struct ib_grh *grh;
 382        struct opa_16b_mgmt *mgmt;
 383        u64 rhf;
 384        u32 maxcnt;
 385        u32 rhqoff;
 386        u32 dlid;
 387        u32 slid;
 388        int numpkt;
 389        u16 tlen;
 390        s16 etail;
 391        u16 pkey;
 392        u8 hlen;
 393        u8 rsize;
 394        u8 updegr;
 395        u8 etype;
 396        u8 extra_byte;
 397        u8 pad;
 398        u8 sc;
 399        u8 sl;
 400        u8 opcode;
 401        bool migrated;
 402};
 403
 404/* Packet types */
 405#define HFI1_PKT_TYPE_9B  0
 406#define HFI1_PKT_TYPE_16B 1
 407
 408/*
 409 * OPA 16B Header
 410 */
 411#define OPA_16B_L4_MASK         0xFFull
 412#define OPA_16B_SC_MASK         0x1F00000ull
 413#define OPA_16B_SC_SHIFT        20
 414#define OPA_16B_LID_MASK        0xFFFFFull
 415#define OPA_16B_DLID_MASK       0xF000ull
 416#define OPA_16B_DLID_SHIFT      20
 417#define OPA_16B_DLID_HIGH_SHIFT 12
 418#define OPA_16B_SLID_MASK       0xF00ull
 419#define OPA_16B_SLID_SHIFT      20
 420#define OPA_16B_SLID_HIGH_SHIFT 8
 421#define OPA_16B_BECN_MASK       0x80000000ull
 422#define OPA_16B_BECN_SHIFT      31
 423#define OPA_16B_FECN_MASK       0x10000000ull
 424#define OPA_16B_FECN_SHIFT      28
 425#define OPA_16B_L2_MASK         0x60000000ull
 426#define OPA_16B_L2_SHIFT        29
 427#define OPA_16B_PKEY_MASK       0xFFFF0000ull
 428#define OPA_16B_PKEY_SHIFT      16
 429#define OPA_16B_LEN_MASK        0x7FF00000ull
 430#define OPA_16B_LEN_SHIFT       20
 431#define OPA_16B_RC_MASK         0xE000000ull
 432#define OPA_16B_RC_SHIFT        25
 433#define OPA_16B_AGE_MASK        0xFF0000ull
 434#define OPA_16B_AGE_SHIFT       16
 435#define OPA_16B_ENTROPY_MASK    0xFFFFull
 436
 437/*
 438 * OPA 16B L2/L4 Encodings
 439 */
 440#define OPA_16B_L4_9B           0x00
 441#define OPA_16B_L2_TYPE         0x02
 442#define OPA_16B_L4_FM           0x08
 443#define OPA_16B_L4_IB_LOCAL     0x09
 444#define OPA_16B_L4_IB_GLOBAL    0x0A
 445#define OPA_16B_L4_ETHR         OPA_VNIC_L4_ETHR
 446
 447/*
 448 * OPA 16B Management
 449 */
 450#define OPA_16B_L4_FM_PAD       3  /* fixed 3B pad */
 451#define OPA_16B_L4_FM_HLEN      24 /* 16B(16) + L4_FM(8) */
 452
 453static inline u8 hfi1_16B_get_l4(struct hfi1_16b_header *hdr)
 454{
 455        return (u8)(hdr->lrh[2] & OPA_16B_L4_MASK);
 456}
 457
 458static inline u8 hfi1_16B_get_sc(struct hfi1_16b_header *hdr)
 459{
 460        return (u8)((hdr->lrh[1] & OPA_16B_SC_MASK) >> OPA_16B_SC_SHIFT);
 461}
 462
 463static inline u32 hfi1_16B_get_dlid(struct hfi1_16b_header *hdr)
 464{
 465        return (u32)((hdr->lrh[1] & OPA_16B_LID_MASK) |
 466                     (((hdr->lrh[2] & OPA_16B_DLID_MASK) >>
 467                     OPA_16B_DLID_HIGH_SHIFT) << OPA_16B_DLID_SHIFT));
 468}
 469
 470static inline u32 hfi1_16B_get_slid(struct hfi1_16b_header *hdr)
 471{
 472        return (u32)((hdr->lrh[0] & OPA_16B_LID_MASK) |
 473                     (((hdr->lrh[2] & OPA_16B_SLID_MASK) >>
 474                     OPA_16B_SLID_HIGH_SHIFT) << OPA_16B_SLID_SHIFT));
 475}
 476
 477static inline u8 hfi1_16B_get_becn(struct hfi1_16b_header *hdr)
 478{
 479        return (u8)((hdr->lrh[0] & OPA_16B_BECN_MASK) >> OPA_16B_BECN_SHIFT);
 480}
 481
 482static inline u8 hfi1_16B_get_fecn(struct hfi1_16b_header *hdr)
 483{
 484        return (u8)((hdr->lrh[1] & OPA_16B_FECN_MASK) >> OPA_16B_FECN_SHIFT);
 485}
 486
 487static inline u8 hfi1_16B_get_l2(struct hfi1_16b_header *hdr)
 488{
 489        return (u8)((hdr->lrh[1] & OPA_16B_L2_MASK) >> OPA_16B_L2_SHIFT);
 490}
 491
 492static inline u16 hfi1_16B_get_pkey(struct hfi1_16b_header *hdr)
 493{
 494        return (u16)((hdr->lrh[2] & OPA_16B_PKEY_MASK) >> OPA_16B_PKEY_SHIFT);
 495}
 496
 497static inline u8 hfi1_16B_get_rc(struct hfi1_16b_header *hdr)
 498{
 499        return (u8)((hdr->lrh[1] & OPA_16B_RC_MASK) >> OPA_16B_RC_SHIFT);
 500}
 501
 502static inline u8 hfi1_16B_get_age(struct hfi1_16b_header *hdr)
 503{
 504        return (u8)((hdr->lrh[3] & OPA_16B_AGE_MASK) >> OPA_16B_AGE_SHIFT);
 505}
 506
 507static inline u16 hfi1_16B_get_len(struct hfi1_16b_header *hdr)
 508{
 509        return (u16)((hdr->lrh[0] & OPA_16B_LEN_MASK) >> OPA_16B_LEN_SHIFT);
 510}
 511
 512static inline u16 hfi1_16B_get_entropy(struct hfi1_16b_header *hdr)
 513{
 514        return (u16)(hdr->lrh[3] & OPA_16B_ENTROPY_MASK);
 515}
 516
 517#define OPA_16B_MAKE_QW(low_dw, high_dw) (((u64)(high_dw) << 32) | (low_dw))
 518
 519/*
 520 * BTH
 521 */
 522#define OPA_16B_BTH_PAD_MASK    7
 523static inline u8 hfi1_16B_bth_get_pad(struct ib_other_headers *ohdr)
 524{
 525        return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
 526                   OPA_16B_BTH_PAD_MASK);
 527}
 528
 529/*
 530 * 16B Management
 531 */
 532#define OPA_16B_MGMT_QPN_MASK   0xFFFFFF
 533static inline u32 hfi1_16B_get_dest_qpn(struct opa_16b_mgmt *mgmt)
 534{
 535        return be32_to_cpu(mgmt->dest_qpn) & OPA_16B_MGMT_QPN_MASK;
 536}
 537
 538static inline u32 hfi1_16B_get_src_qpn(struct opa_16b_mgmt *mgmt)
 539{
 540        return be32_to_cpu(mgmt->src_qpn) & OPA_16B_MGMT_QPN_MASK;
 541}
 542
 543static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
 544                                    u32 dest_qp, u32 src_qp)
 545{
 546        mgmt->dest_qpn = cpu_to_be32(dest_qp & OPA_16B_MGMT_QPN_MASK);
 547        mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
 548}
 549
 550/**
 551 * hfi1_get_rc_ohdr - get extended header
 552 * @opah - the opaheader
 553 */
 554static inline struct ib_other_headers *
 555hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
 556{
 557        struct ib_other_headers *ohdr;
 558        struct ib_header *hdr = NULL;
 559        struct hfi1_16b_header *hdr_16b = NULL;
 560
 561        /* Find out where the BTH is */
 562        if (opah->hdr_type == HFI1_PKT_TYPE_9B) {
 563                hdr = &opah->ibh;
 564                if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
 565                        ohdr = &hdr->u.oth;
 566                else
 567                        ohdr = &hdr->u.l.oth;
 568        } else {
 569                u8 l4;
 570
 571                hdr_16b = &opah->opah;
 572                l4  = hfi1_16B_get_l4(hdr_16b);
 573                if (l4 == OPA_16B_L4_IB_LOCAL)
 574                        ohdr = &hdr_16b->u.oth;
 575                else
 576                        ohdr = &hdr_16b->u.l.oth;
 577        }
 578        return ohdr;
 579}
 580
 581struct rvt_sge_state;
 582
 583/*
 584 * Get/Set IB link-level config parameters for f_get/set_ib_cfg()
 585 * Mostly for MADs that set or query link parameters, also ipath
 586 * config interfaces
 587 */
 588#define HFI1_IB_CFG_LIDLMC 0 /* LID (LS16b) and Mask (MS16b) */
 589#define HFI1_IB_CFG_LWID_DG_ENB 1 /* allowed Link-width downgrade */
 590#define HFI1_IB_CFG_LWID_ENB 2 /* allowed Link-width */
 591#define HFI1_IB_CFG_LWID 3 /* currently active Link-width */
 592#define HFI1_IB_CFG_SPD_ENB 4 /* allowed Link speeds */
 593#define HFI1_IB_CFG_SPD 5 /* current Link spd */
 594#define HFI1_IB_CFG_RXPOL_ENB 6 /* Auto-RX-polarity enable */
 595#define HFI1_IB_CFG_LREV_ENB 7 /* Auto-Lane-reversal enable */
 596#define HFI1_IB_CFG_LINKLATENCY 8 /* Link Latency (IB1.2 only) */
 597#define HFI1_IB_CFG_HRTBT 9 /* IB heartbeat off/enable/auto; DDR/QDR only */
 598#define HFI1_IB_CFG_OP_VLS 10 /* operational VLs */
 599#define HFI1_IB_CFG_VL_HIGH_CAP 11 /* num of VL high priority weights */
 600#define HFI1_IB_CFG_VL_LOW_CAP 12 /* num of VL low priority weights */
 601#define HFI1_IB_CFG_OVERRUN_THRESH 13 /* IB overrun threshold */
 602#define HFI1_IB_CFG_PHYERR_THRESH 14 /* IB PHY error threshold */
 603#define HFI1_IB_CFG_LINKDEFAULT 15 /* IB link default (sleep/poll) */
 604#define HFI1_IB_CFG_PKEYS 16 /* update partition keys */
 605#define HFI1_IB_CFG_MTU 17 /* update MTU in IBC */
 606#define HFI1_IB_CFG_VL_HIGH_LIMIT 19
 607#define HFI1_IB_CFG_PMA_TICKS 20 /* PMA sample tick resolution */
 608#define HFI1_IB_CFG_PORT 21 /* switch port we are connected to */
 609
 610/*
 611 * HFI or Host Link States
 612 *
 613 * These describe the states the driver thinks the logical and physical
 614 * states are in.  Used as an argument to set_link_state().  Implemented
 615 * as bits for easy multi-state checking.  The actual state can only be
 616 * one.
 617 */
 618#define __HLS_UP_INIT_BP        0
 619#define __HLS_UP_ARMED_BP       1
 620#define __HLS_UP_ACTIVE_BP      2
 621#define __HLS_DN_DOWNDEF_BP     3       /* link down default */
 622#define __HLS_DN_POLL_BP        4
 623#define __HLS_DN_DISABLE_BP     5
 624#define __HLS_DN_OFFLINE_BP     6
 625#define __HLS_VERIFY_CAP_BP     7
 626#define __HLS_GOING_UP_BP       8
 627#define __HLS_GOING_OFFLINE_BP  9
 628#define __HLS_LINK_COOLDOWN_BP 10
 629
 630#define HLS_UP_INIT       BIT(__HLS_UP_INIT_BP)
 631#define HLS_UP_ARMED      BIT(__HLS_UP_ARMED_BP)
 632#define HLS_UP_ACTIVE     BIT(__HLS_UP_ACTIVE_BP)
 633#define HLS_DN_DOWNDEF    BIT(__HLS_DN_DOWNDEF_BP) /* link down default */
 634#define HLS_DN_POLL       BIT(__HLS_DN_POLL_BP)
 635#define HLS_DN_DISABLE    BIT(__HLS_DN_DISABLE_BP)
 636#define HLS_DN_OFFLINE    BIT(__HLS_DN_OFFLINE_BP)
 637#define HLS_VERIFY_CAP    BIT(__HLS_VERIFY_CAP_BP)
 638#define HLS_GOING_UP      BIT(__HLS_GOING_UP_BP)
 639#define HLS_GOING_OFFLINE BIT(__HLS_GOING_OFFLINE_BP)
 640#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
 641
 642#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
 643#define HLS_DOWN ~(HLS_UP)
 644
 645#define HLS_DEFAULT HLS_DN_POLL
 646
 647/* use this MTU size if none other is given */
 648#define HFI1_DEFAULT_ACTIVE_MTU 10240
 649/* use this MTU size as the default maximum */
 650#define HFI1_DEFAULT_MAX_MTU 10240
 651/* default partition key */
 652#define DEFAULT_PKEY 0xffff
 653
 654/*
 655 * Possible fabric manager config parameters for fm_{get,set}_table()
 656 */
 657#define FM_TBL_VL_HIGH_ARB              1 /* Get/set VL high prio weights */
 658#define FM_TBL_VL_LOW_ARB               2 /* Get/set VL low prio weights */
 659#define FM_TBL_BUFFER_CONTROL           3 /* Get/set Buffer Control */
 660#define FM_TBL_SC2VLNT                  4 /* Get/set SC->VLnt */
 661#define FM_TBL_VL_PREEMPT_ELEMS         5 /* Get (no set) VL preempt elems */
 662#define FM_TBL_VL_PREEMPT_MATRIX        6 /* Get (no set) VL preempt matrix */
 663
 664/*
 665 * Possible "operations" for f_rcvctrl(ppd, op, ctxt)
 666 * these are bits so they can be combined, e.g.
 667 * HFI1_RCVCTRL_INTRAVAIL_ENB | HFI1_RCVCTRL_CTXT_ENB
 668 */
 669#define HFI1_RCVCTRL_TAILUPD_ENB 0x01
 670#define HFI1_RCVCTRL_TAILUPD_DIS 0x02
 671#define HFI1_RCVCTRL_CTXT_ENB 0x04
 672#define HFI1_RCVCTRL_CTXT_DIS 0x08
 673#define HFI1_RCVCTRL_INTRAVAIL_ENB 0x10
 674#define HFI1_RCVCTRL_INTRAVAIL_DIS 0x20
 675#define HFI1_RCVCTRL_PKEY_ENB 0x40  /* Note, default is enabled */
 676#define HFI1_RCVCTRL_PKEY_DIS 0x80
 677#define HFI1_RCVCTRL_TIDFLOW_ENB 0x0400
 678#define HFI1_RCVCTRL_TIDFLOW_DIS 0x0800
 679#define HFI1_RCVCTRL_ONE_PKT_EGR_ENB 0x1000
 680#define HFI1_RCVCTRL_ONE_PKT_EGR_DIS 0x2000
 681#define HFI1_RCVCTRL_NO_RHQ_DROP_ENB 0x4000
 682#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
 683#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
 684#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
 685#define HFI1_RCVCTRL_URGENT_ENB 0x40000
 686#define HFI1_RCVCTRL_URGENT_DIS 0x80000
 687
 688/* partition enforcement flags */
 689#define HFI1_PART_ENFORCE_IN    0x1
 690#define HFI1_PART_ENFORCE_OUT   0x2
 691
 692/* how often we check for synthetic counter wrap around */
 693#define SYNTH_CNT_TIME 3
 694
 695/* Counter flags */
 696#define CNTR_NORMAL             0x0 /* Normal counters, just read register */
 697#define CNTR_SYNTH              0x1 /* Synthetic counters, saturate at all 1s */
 698#define CNTR_DISABLED           0x2 /* Disable this counter */
 699#define CNTR_32BIT              0x4 /* Simulate 64 bits for this counter */
 700#define CNTR_VL                 0x8 /* Per VL counter */
 701#define CNTR_SDMA              0x10
 702#define CNTR_INVALID_VL         -1  /* Specifies invalid VL */
 703#define CNTR_MODE_W             0x0
 704#define CNTR_MODE_R             0x1
 705
 706/* VLs Supported/Operational */
 707#define HFI1_MIN_VLS_SUPPORTED 1
 708#define HFI1_MAX_VLS_SUPPORTED 8
 709
 710#define HFI1_GUIDS_PER_PORT  5
 711#define HFI1_PORT_GUID_INDEX 0
 712
 713static inline void incr_cntr64(u64 *cntr)
 714{
 715        if (*cntr < (u64)-1LL)
 716                (*cntr)++;
 717}
 718
 719#define MAX_NAME_SIZE 64
 720struct hfi1_msix_entry {
 721        enum irq_type type;
 722        int irq;
 723        void *arg;
 724        cpumask_t mask;
 725        struct irq_affinity_notify notify;
 726};
 727
 728struct hfi1_msix_info {
 729        /* lock to synchronize in_use_msix access */
 730        spinlock_t msix_lock;
 731        DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
 732        struct hfi1_msix_entry *msix_entries;
 733        u16 max_requested;
 734};
 735
 736/* per-SL CCA information */
 737struct cca_timer {
 738        struct hrtimer hrtimer;
 739        struct hfi1_pportdata *ppd; /* read-only */
 740        int sl; /* read-only */
 741        u16 ccti; /* read/write - current value of CCTI */
 742};
 743
 744struct link_down_reason {
 745        /*
 746         * SMA-facing value.  Should be set from .latest when
 747         * HLS_UP_* -> HLS_DN_* transition actually occurs.
 748         */
 749        u8 sma;
 750        u8 latest;
 751};
 752
 753enum {
 754        LO_PRIO_TABLE,
 755        HI_PRIO_TABLE,
 756        MAX_PRIO_TABLE
 757};
 758
 759struct vl_arb_cache {
 760        /* protect vl arb cache */
 761        spinlock_t lock;
 762        struct ib_vl_weight_elem table[VL_ARB_TABLE_SIZE];
 763};
 764
 765/*
 766 * The structure below encapsulates data relevant to a physical IB Port.
 767 * Current chips support only one such port, but the separation
 768 * clarifies things a bit. Note that to conform to IB conventions,
 769 * port-numbers are one-based. The first or only port is port1.
 770 */
 771struct hfi1_pportdata {
 772        struct hfi1_ibport ibport_data;
 773
 774        struct hfi1_devdata *dd;
 775
 776        /* PHY support */
 777        struct qsfp_data qsfp_info;
 778        /* Values for SI tuning of SerDes */
 779        u32 port_type;
 780        u32 tx_preset_eq;
 781        u32 tx_preset_noeq;
 782        u32 rx_preset;
 783        u8  local_atten;
 784        u8  remote_atten;
 785        u8  default_atten;
 786        u8  max_power_class;
 787
 788        /* did we read platform config from scratch registers? */
 789        bool config_from_scratch;
 790
 791        /* GUIDs for this interface, in host order, guids[0] is a port guid */
 792        u64 guids[HFI1_GUIDS_PER_PORT];
 793
 794        /* GUID for peer interface, in host order */
 795        u64 neighbor_guid;
 796
 797        /* up or down physical link state */
 798        u32 linkup;
 799
 800        /*
 801         * this address is mapped read-only into user processes so they can
 802         * get status cheaply, whenever they want.  One qword of status per port
 803         */
 804        u64 *statusp;
 805
 806        /* SendDMA related entries */
 807
 808        struct workqueue_struct *hfi1_wq;
 809        struct workqueue_struct *link_wq;
 810
 811        /* move out of interrupt context */
 812        struct work_struct link_vc_work;
 813        struct work_struct link_up_work;
 814        struct work_struct link_down_work;
 815        struct work_struct sma_message_work;
 816        struct work_struct freeze_work;
 817        struct work_struct link_downgrade_work;
 818        struct work_struct link_bounce_work;
 819        struct delayed_work start_link_work;
 820        /* host link state variables */
 821        struct mutex hls_lock;
 822        u32 host_link_state;
 823
 824        /* these are the "32 bit" regs */
 825
 826        u32 ibmtu; /* The MTU programmed for this unit */
 827        /*
 828         * Current max size IB packet (in bytes) including IB headers, that
 829         * we can send. Changes when ibmtu changes.
 830         */
 831        u32 ibmaxlen;
 832        u32 current_egress_rate; /* units [10^6 bits/sec] */
 833        /* LID programmed for this instance */
 834        u32 lid;
 835        /* list of pkeys programmed; 0 if not set */
 836        u16 pkeys[MAX_PKEY_VALUES];
 837        u16 link_width_supported;
 838        u16 link_width_downgrade_supported;
 839        u16 link_speed_supported;
 840        u16 link_width_enabled;
 841        u16 link_width_downgrade_enabled;
 842        u16 link_speed_enabled;
 843        u16 link_width_active;
 844        u16 link_width_downgrade_tx_active;
 845        u16 link_width_downgrade_rx_active;
 846        u16 link_speed_active;
 847        u8 vls_supported;
 848        u8 vls_operational;
 849        u8 actual_vls_operational;
 850        /* LID mask control */
 851        u8 lmc;
 852        /* Rx Polarity inversion (compensate for ~tx on partner) */
 853        u8 rx_pol_inv;
 854
 855        u8 hw_pidx;     /* physical port index */
 856        u32 port;        /* IB port number and index into dd->pports - 1 */
 857        /* type of neighbor node */
 858        u8 neighbor_type;
 859        u8 neighbor_normal;
 860        u8 neighbor_fm_security; /* 1 if firmware checking is disabled */
 861        u8 neighbor_port_number;
 862        u8 is_sm_config_started;
 863        u8 offline_disabled_reason;
 864        u8 is_active_optimize_enabled;
 865        u8 driver_link_ready;   /* driver ready for active link */
 866        u8 link_enabled;        /* link enabled? */
 867        u8 linkinit_reason;
 868        u8 local_tx_rate;       /* rate given to 8051 firmware */
 869        u8 qsfp_retry_count;
 870
 871        /* placeholders for IB MAD packet settings */
 872        u8 overrun_threshold;
 873        u8 phy_error_threshold;
 874        unsigned int is_link_down_queued;
 875
 876        /* Used to override LED behavior for things like maintenance beaconing*/
 877        /*
 878         * Alternates per phase of blink
 879         * [0] holds LED off duration, [1] holds LED on duration
 880         */
 881        unsigned long led_override_vals[2];
 882        u8 led_override_phase; /* LSB picks from vals[] */
 883        atomic_t led_override_timer_active;
 884        /* Used to flash LEDs in override mode */
 885        struct timer_list led_override_timer;
 886
 887        u32 sm_trap_qp;
 888        u32 sa_qp;
 889
 890        /*
 891         * cca_timer_lock protects access to the per-SL cca_timer
 892         * structures (specifically the ccti member).
 893         */
 894        spinlock_t cca_timer_lock ____cacheline_aligned_in_smp;
 895        struct cca_timer cca_timer[OPA_MAX_SLS];
 896
 897        /* List of congestion control table entries */
 898        struct ib_cc_table_entry_shadow ccti_entries[CC_TABLE_SHADOW_MAX];
 899
 900        /* congestion entries, each entry corresponding to a SL */
 901        struct opa_congestion_setting_entry_shadow
 902                congestion_entries[OPA_MAX_SLS];
 903
 904        /*
 905         * cc_state_lock protects (write) access to the per-port
 906         * struct cc_state.
 907         */
 908        spinlock_t cc_state_lock ____cacheline_aligned_in_smp;
 909
 910        struct cc_state __rcu *cc_state;
 911
 912        /* Total number of congestion control table entries */
 913        u16 total_cct_entry;
 914
 915        /* Bit map identifying service level */
 916        u32 cc_sl_control_map;
 917
 918        /* CA's max number of 64 entry units in the congestion control table */
 919        u8 cc_max_table_entries;
 920
 921        /*
 922         * begin congestion log related entries
 923         * cc_log_lock protects all congestion log related data
 924         */
 925        spinlock_t cc_log_lock ____cacheline_aligned_in_smp;
 926        u8 threshold_cong_event_map[OPA_MAX_SLS / 8];
 927        u16 threshold_event_counter;
 928        struct opa_hfi1_cong_log_event_internal cc_events[OPA_CONG_LOG_ELEMS];
 929        int cc_log_idx; /* index for logging events */
 930        int cc_mad_idx; /* index for reporting events */
 931        /* end congestion log related entries */
 932
 933        struct vl_arb_cache vl_arb_cache[MAX_PRIO_TABLE];
 934
 935        /* port relative counter buffer */
 936        u64 *cntrs;
 937        /* port relative synthetic counter buffer */
 938        u64 *scntrs;
 939        /* port_xmit_discards are synthesized from different egress errors */
 940        u64 port_xmit_discards;
 941        u64 port_xmit_discards_vl[C_VL_COUNT];
 942        u64 port_xmit_constraint_errors;
 943        u64 port_rcv_constraint_errors;
 944        /* count of 'link_err' interrupts from DC */
 945        u64 link_downed;
 946        /* number of times link retrained successfully */
 947        u64 link_up;
 948        /* number of times a link unknown frame was reported */
 949        u64 unknown_frame_count;
 950        /* port_ltp_crc_mode is returned in 'portinfo' MADs */
 951        u16 port_ltp_crc_mode;
 952        /* port_crc_mode_enabled is the crc we support */
 953        u8 port_crc_mode_enabled;
 954        /* mgmt_allowed is also returned in 'portinfo' MADs */
 955        u8 mgmt_allowed;
 956        u8 part_enforce; /* partition enforcement flags */
 957        struct link_down_reason local_link_down_reason;
 958        struct link_down_reason neigh_link_down_reason;
 959        /* Value to be sent to link peer on LinkDown .*/
 960        u8 remote_link_down_reason;
 961        /* Error events that will cause a port bounce. */
 962        u32 port_error_action;
 963        struct work_struct linkstate_active_work;
 964        /* Does this port need to prescan for FECNs */
 965        bool cc_prescan;
 966        /*
 967         * Sample sendWaitCnt & sendWaitVlCnt during link transition
 968         * and counter request.
 969         */
 970        u64 port_vl_xmit_wait_last[C_VL_COUNT + 1];
 971        u16 prev_link_width;
 972        u64 vl_xmit_flit_cnt[C_VL_COUNT + 1];
 973};
 974
 975typedef void (*opcode_handler)(struct hfi1_packet *packet);
 976typedef void (*hfi1_make_req)(struct rvt_qp *qp,
 977                              struct hfi1_pkt_state *ps,
 978                              struct rvt_swqe *wqe);
 979extern const rhf_rcv_function_ptr normal_rhf_rcv_functions[];
 980extern const rhf_rcv_function_ptr netdev_rhf_rcv_functions[];
 981
 982/* return values for the RHF receive functions */
 983#define RHF_RCV_CONTINUE  0     /* keep going */
 984#define RHF_RCV_DONE      1     /* stop, this packet processed */
 985#define RHF_RCV_REPROCESS 2     /* stop. retain this packet */
 986
 987struct rcv_array_data {
 988        u16 ngroups;
 989        u16 nctxt_extra;
 990        u8 group_size;
 991};
 992
 993struct per_vl_data {
 994        u16 mtu;
 995        struct send_context *sc;
 996};
 997
 998/* 16 to directly index */
 999#define PER_VL_SEND_CONTEXTS 16
1000
1001struct err_info_rcvport {
1002        u8 status_and_code;
1003        u64 packet_flit1;
1004        u64 packet_flit2;
1005};
1006
1007struct err_info_constraint {
1008        u8 status;
1009        u16 pkey;
1010        u32 slid;
1011};
1012
1013struct hfi1_temp {
1014        unsigned int curr;       /* current temperature */
1015        unsigned int lo_lim;     /* low temperature limit */
1016        unsigned int hi_lim;     /* high temperature limit */
1017        unsigned int crit_lim;   /* critical temperature limit */
1018        u8 triggers;      /* temperature triggers */
1019};
1020
1021struct hfi1_i2c_bus {
1022        struct hfi1_devdata *controlling_dd; /* current controlling device */
1023        struct i2c_adapter adapter;     /* bus details */
1024        struct i2c_algo_bit_data algo;  /* bus algorithm details */
1025        int num;                        /* bus number, 0 or 1 */
1026};
1027
1028/* common data between shared ASIC HFIs */
1029struct hfi1_asic_data {
1030        struct hfi1_devdata *dds[2];    /* back pointers */
1031        struct mutex asic_resource_mutex;
1032        struct hfi1_i2c_bus *i2c_bus0;
1033        struct hfi1_i2c_bus *i2c_bus1;
1034};
1035
1036/* sizes for both the QP and RSM map tables */
1037#define NUM_MAP_ENTRIES  256
1038#define NUM_MAP_REGS      32
1039
1040/* Virtual NIC information */
1041struct hfi1_vnic_data {
1042        struct kmem_cache *txreq_cache;
1043        u8 num_vports;
1044};
1045
1046struct hfi1_vnic_vport_info;
1047
1048/* device data struct now contains only "general per-device" info.
1049 * fields related to a physical IB port are in a hfi1_pportdata struct.
1050 */
1051struct sdma_engine;
1052struct sdma_vl_map;
1053
1054#define BOARD_VERS_MAX 96 /* how long the version string can be */
1055#define SERIAL_MAX 16 /* length of the serial number */
1056
1057typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
1058struct hfi1_netdev_rx;
1059struct hfi1_devdata {
1060        struct hfi1_ibdev verbs_dev;     /* must be first */
1061        /* pointers to related structs for this device */
1062        /* pci access data structure */
1063        struct pci_dev *pcidev;
1064        struct cdev user_cdev;
1065        struct cdev diag_cdev;
1066        struct cdev ui_cdev;
1067        struct device *user_device;
1068        struct device *diag_device;
1069        struct device *ui_device;
1070
1071        /* first mapping up to RcvArray */
1072        u8 __iomem *kregbase1;
1073        resource_size_t physaddr;
1074
1075        /* second uncached mapping from RcvArray to pio send buffers */
1076        u8 __iomem *kregbase2;
1077        /* for detecting offset above kregbase2 address */
1078        u32 base2_start;
1079
1080        /* Per VL data. Enough for all VLs but not all elements are set/used. */
1081        struct per_vl_data vld[PER_VL_SEND_CONTEXTS];
1082        /* send context data */
1083        struct send_context_info *send_contexts;
1084        /* map hardware send contexts to software index */
1085        u8 *hw_to_sw;
1086        /* spinlock for allocating and releasing send context resources */
1087        spinlock_t sc_lock;
1088        /* lock for pio_map */
1089        spinlock_t pio_map_lock;
1090        /* Send Context initialization lock. */
1091        spinlock_t sc_init_lock;
1092        /* lock for sdma_map */
1093        spinlock_t                          sde_map_lock;
1094        /* array of kernel send contexts */
1095        struct send_context **kernel_send_context;
1096        /* array of vl maps */
1097        struct pio_vl_map __rcu *pio_map;
1098        /* default flags to last descriptor */
1099        u64 default_desc1;
1100
1101        /* fields common to all SDMA engines */
1102
1103        volatile __le64                    *sdma_heads_dma; /* DMA'ed by chip */
1104        dma_addr_t                          sdma_heads_phys;
1105        void                               *sdma_pad_dma; /* DMA'ed by chip */
1106        dma_addr_t                          sdma_pad_phys;
1107        /* for deallocation */
1108        size_t                              sdma_heads_size;
1109        /* num used */
1110        u32                                 num_sdma;
1111        /* array of engines sized by num_sdma */
1112        struct sdma_engine                 *per_sdma;
1113        /* array of vl maps */
1114        struct sdma_vl_map __rcu           *sdma_map;
1115        /* SPC freeze waitqueue and variable */
1116        wait_queue_head_t                 sdma_unfreeze_wq;
1117        atomic_t                          sdma_unfreeze_count;
1118
1119        u32 lcb_access_count;           /* count of LCB users */
1120
1121        /* common data between shared ASIC HFIs in this OS */
1122        struct hfi1_asic_data *asic_data;
1123
1124        /* mem-mapped pointer to base of PIO buffers */
1125        void __iomem *piobase;
1126        /*
1127         * write-combining mem-mapped pointer to base of RcvArray
1128         * memory.
1129         */
1130        void __iomem *rcvarray_wc;
1131        /*
1132         * credit return base - a per-NUMA range of DMA address that
1133         * the chip will use to update the per-context free counter
1134         */
1135        struct credit_return_base *cr_base;
1136
1137        /* send context numbers and sizes for each type */
1138        struct sc_config_sizes sc_sizes[SC_MAX];
1139
1140        char *boardname; /* human readable board info */
1141
1142        u64 ctx0_seq_drop;
1143
1144        /* reset value */
1145        u64 z_int_counter;
1146        u64 z_rcv_limit;
1147        u64 z_send_schedule;
1148
1149        u64 __percpu *send_schedule;
1150        /* number of reserved contexts for netdev usage */
1151        u16 num_netdev_contexts;
1152        /* number of receive contexts in use by the driver */
1153        u32 num_rcv_contexts;
1154        /* number of pio send contexts in use by the driver */
1155        u32 num_send_contexts;
1156        /*
1157         * number of ctxts available for PSM open
1158         */
1159        u32 freectxts;
1160        /* total number of available user/PSM contexts */
1161        u32 num_user_contexts;
1162        /* base receive interrupt timeout, in CSR units */
1163        u32 rcv_intr_timeout_csr;
1164
1165        spinlock_t sendctrl_lock; /* protect changes to SendCtrl */
1166        spinlock_t rcvctrl_lock; /* protect changes to RcvCtrl */
1167        spinlock_t uctxt_lock; /* protect rcd changes */
1168        struct mutex dc8051_lock; /* exclusive access to 8051 */
1169        struct workqueue_struct *update_cntr_wq;
1170        struct work_struct update_cntr_work;
1171        /* exclusive access to 8051 memory */
1172        spinlock_t dc8051_memlock;
1173        int dc8051_timed_out;   /* remember if the 8051 timed out */
1174        /*
1175         * A page that will hold event notification bitmaps for all
1176         * contexts. This page will be mapped into all processes.
1177         */
1178        unsigned long *events;
1179        /*
1180         * per unit status, see also portdata statusp
1181         * mapped read-only into user processes so they can get unit and
1182         * IB link status cheaply
1183         */
1184        struct hfi1_status *status;
1185
1186        /* revision register shadow */
1187        u64 revision;
1188        /* Base GUID for device (network order) */
1189        u64 base_guid;
1190
1191        /* both sides of the PCIe link are gen3 capable */
1192        u8 link_gen3_capable;
1193        u8 dc_shutdown;
1194        /* localbus width (1, 2,4,8,16,32) from config space  */
1195        u32 lbus_width;
1196        /* localbus speed in MHz */
1197        u32 lbus_speed;
1198        int unit; /* unit # of this chip */
1199        int node; /* home node of this chip */
1200
1201        /* save these PCI fields to restore after a reset */
1202        u32 pcibar0;
1203        u32 pcibar1;
1204        u32 pci_rom;
1205        u16 pci_command;
1206        u16 pcie_devctl;
1207        u16 pcie_lnkctl;
1208        u16 pcie_devctl2;
1209        u32 pci_msix0;
1210        u32 pci_tph2;
1211
1212        /*
1213         * ASCII serial number, from flash, large enough for original
1214         * all digit strings, and longer serial number format
1215         */
1216        u8 serial[SERIAL_MAX];
1217        /* human readable board version */
1218        u8 boardversion[BOARD_VERS_MAX];
1219        u8 lbus_info[32]; /* human readable localbus info */
1220        /* chip major rev, from CceRevision */
1221        u8 majrev;
1222        /* chip minor rev, from CceRevision */
1223        u8 minrev;
1224        /* hardware ID */
1225        u8 hfi1_id;
1226        /* implementation code */
1227        u8 icode;
1228        /* vAU of this device */
1229        u8 vau;
1230        /* vCU of this device */
1231        u8 vcu;
1232        /* link credits of this device */
1233        u16 link_credits;
1234        /* initial vl15 credits to use */
1235        u16 vl15_init;
1236
1237        /*
1238         * Cached value for vl15buf, read during verify cap interrupt. VL15
1239         * credits are to be kept at 0 and set when handling the link-up
1240         * interrupt. This removes the possibility of receiving VL15 MAD
1241         * packets before this HFI is ready.
1242         */
1243        u16 vl15buf_cached;
1244
1245        /* Misc small ints */
1246        u8 n_krcv_queues;
1247        u8 qos_shift;
1248
1249        u16 irev;       /* implementation revision */
1250        u32 dc8051_ver; /* 8051 firmware version */
1251
1252        spinlock_t hfi1_diag_trans_lock; /* protect diag observer ops */
1253        struct platform_config platform_config;
1254        struct platform_config_cache pcfg_cache;
1255
1256        struct diag_client *diag_client;
1257
1258        /* general interrupt: mask of handled interrupts */
1259        u64 gi_mask[CCE_NUM_INT_CSRS];
1260
1261        struct rcv_array_data rcv_entries;
1262
1263        /* cycle length of PS* counters in HW (in picoseconds) */
1264        u16 psxmitwait_check_rate;
1265
1266        /*
1267         * 64 bit synthetic counters
1268         */
1269        struct timer_list synth_stats_timer;
1270
1271        /* MSI-X information */
1272        struct hfi1_msix_info msix_info;
1273
1274        /*
1275         * device counters
1276         */
1277        char *cntrnames;
1278        size_t cntrnameslen;
1279        size_t ndevcntrs;
1280        u64 *cntrs;
1281        u64 *scntrs;
1282
1283        /*
1284         * remembered values for synthetic counters
1285         */
1286        u64 last_tx;
1287        u64 last_rx;
1288
1289        /*
1290         * per-port counters
1291         */
1292        size_t nportcntrs;
1293        char *portcntrnames;
1294        size_t portcntrnameslen;
1295
1296        struct err_info_rcvport err_info_rcvport;
1297        struct err_info_constraint err_info_rcv_constraint;
1298        struct err_info_constraint err_info_xmit_constraint;
1299
1300        atomic_t drop_packet;
1301        bool do_drop;
1302        u8 err_info_uncorrectable;
1303        u8 err_info_fmconfig;
1304
1305        /*
1306         * Software counters for the status bits defined by the
1307         * associated error status registers
1308         */
1309        u64 cce_err_status_cnt[NUM_CCE_ERR_STATUS_COUNTERS];
1310        u64 rcv_err_status_cnt[NUM_RCV_ERR_STATUS_COUNTERS];
1311        u64 misc_err_status_cnt[NUM_MISC_ERR_STATUS_COUNTERS];
1312        u64 send_pio_err_status_cnt[NUM_SEND_PIO_ERR_STATUS_COUNTERS];
1313        u64 send_dma_err_status_cnt[NUM_SEND_DMA_ERR_STATUS_COUNTERS];
1314        u64 send_egress_err_status_cnt[NUM_SEND_EGRESS_ERR_STATUS_COUNTERS];
1315        u64 send_err_status_cnt[NUM_SEND_ERR_STATUS_COUNTERS];
1316
1317        /* Software counter that spans all contexts */
1318        u64 sw_ctxt_err_status_cnt[NUM_SEND_CTXT_ERR_STATUS_COUNTERS];
1319        /* Software counter that spans all DMA engines */
1320        u64 sw_send_dma_eng_err_status_cnt[
1321                NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS];
1322        /* Software counter that aggregates all cce_err_status errors */
1323        u64 sw_cce_err_status_aggregate;
1324        /* Software counter that aggregates all bypass packet rcv errors */
1325        u64 sw_rcv_bypass_packet_errors;
1326
1327        /* Save the enabled LCB error bits */
1328        u64 lcb_err_en;
1329        struct cpu_mask_set *comp_vect;
1330        int *comp_vect_mappings;
1331        u32 comp_vect_possible_cpus;
1332
1333        /*
1334         * Capability to have different send engines simply by changing a
1335         * pointer value.
1336         */
1337        send_routine process_pio_send ____cacheline_aligned_in_smp;
1338        send_routine process_dma_send;
1339        void (*pio_inline_send)(struct hfi1_devdata *dd, struct pio_buf *pbuf,
1340                                u64 pbc, const void *from, size_t count);
1341        int (*process_vnic_dma_send)(struct hfi1_devdata *dd, u8 q_idx,
1342                                     struct hfi1_vnic_vport_info *vinfo,
1343                                     struct sk_buff *skb, u64 pbc, u8 plen);
1344        /* hfi1_pportdata, points to array of (physical) port-specific
1345         * data structs, indexed by pidx (0..n-1)
1346         */
1347        struct hfi1_pportdata *pport;
1348        /* receive context data */
1349        struct hfi1_ctxtdata **rcd;
1350        u64 __percpu *int_counter;
1351        /* verbs tx opcode stats */
1352        struct hfi1_opcode_stats_perctx __percpu *tx_opstats;
1353        /* device (not port) flags, basically device capabilities */
1354        u16 flags;
1355        /* Number of physical ports available */
1356        u8 num_pports;
1357        /* Lowest context number which can be used by user processes or VNIC */
1358        u8 first_dyn_alloc_ctxt;
1359        /* adding a new field here would make it part of this cacheline */
1360
1361        /* seqlock for sc2vl */
1362        seqlock_t sc2vl_lock ____cacheline_aligned_in_smp;
1363        u64 sc2vl[4];
1364        u64 __percpu *rcv_limit;
1365        /* adding a new field here would make it part of this cacheline */
1366
1367        /* OUI comes from the HW. Used everywhere as 3 separate bytes. */
1368        u8 oui1;
1369        u8 oui2;
1370        u8 oui3;
1371
1372        /* Timer and counter used to detect RcvBufOvflCnt changes */
1373        struct timer_list rcverr_timer;
1374
1375        wait_queue_head_t event_queue;
1376
1377        /* receive context tail dummy address */
1378        __le64 *rcvhdrtail_dummy_kvaddr;
1379        dma_addr_t rcvhdrtail_dummy_dma;
1380
1381        u32 rcv_ovfl_cnt;
1382        /* Serialize ASPM enable/disable between multiple verbs contexts */
1383        spinlock_t aspm_lock;
1384        /* Number of verbs contexts which have disabled ASPM */
1385        atomic_t aspm_disabled_cnt;
1386        /* Keeps track of user space clients */
1387        atomic_t user_refcount;
1388        /* Used to wait for outstanding user space clients before dev removal */
1389        struct completion user_comp;
1390
1391        bool eprom_available;   /* true if EPROM is available for this device */
1392        bool aspm_supported;    /* Does HW support ASPM */
1393        bool aspm_enabled;      /* ASPM state: enabled/disabled */
1394        struct rhashtable *sdma_rht;
1395
1396        /* vnic data */
1397        struct hfi1_vnic_data vnic;
1398        /* Lock to protect IRQ SRC register access */
1399        spinlock_t irq_src_lock;
1400        int vnic_num_vports;
1401        struct hfi1_netdev_rx *netdev_rx;
1402        struct hfi1_affinity_node *affinity_entry;
1403
1404        /* Keeps track of IPoIB RSM rule users */
1405        atomic_t ipoib_rsm_usr_num;
1406};
1407
1408/* 8051 firmware version helper */
1409#define dc8051_ver(a, b, c) ((a) << 16 | (b) << 8 | (c))
1410#define dc8051_ver_maj(a) (((a) & 0xff0000) >> 16)
1411#define dc8051_ver_min(a) (((a) & 0x00ff00) >> 8)
1412#define dc8051_ver_patch(a) ((a) & 0x0000ff)
1413
1414/* f_put_tid types */
1415#define PT_EXPECTED       0
1416#define PT_EAGER          1
1417#define PT_INVALID_FLUSH  2
1418#define PT_INVALID        3
1419
1420struct tid_rb_node;
1421struct mmu_rb_node;
1422struct mmu_rb_handler;
1423
1424/* Private data for file operations */
1425struct hfi1_filedata {
1426        struct srcu_struct pq_srcu;
1427        struct hfi1_devdata *dd;
1428        struct hfi1_ctxtdata *uctxt;
1429        struct hfi1_user_sdma_comp_q *cq;
1430        /* update side lock for SRCU */
1431        spinlock_t pq_rcu_lock;
1432        struct hfi1_user_sdma_pkt_q __rcu *pq;
1433        u16 subctxt;
1434        /* for cpu affinity; -1 if none */
1435        int rec_cpu_num;
1436        u32 tid_n_pinned;
1437        bool use_mn;
1438        struct tid_rb_node **entry_to_rb;
1439        spinlock_t tid_lock; /* protect tid_[limit,used] counters */
1440        u32 tid_limit;
1441        u32 tid_used;
1442        u32 *invalid_tids;
1443        u32 invalid_tid_idx;
1444        /* protect invalid_tids array and invalid_tid_idx */
1445        spinlock_t invalid_lock;
1446};
1447
1448extern struct xarray hfi1_dev_table;
1449struct hfi1_devdata *hfi1_lookup(int unit);
1450
1451static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
1452{
1453        return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
1454                HFI1_MAX_SHARED_CTXTS;
1455}
1456
1457int hfi1_init(struct hfi1_devdata *dd, int reinit);
1458int hfi1_count_active_units(void);
1459
1460int hfi1_diag_add(struct hfi1_devdata *dd);
1461void hfi1_diag_remove(struct hfi1_devdata *dd);
1462void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup);
1463
1464void handle_user_interrupt(struct hfi1_ctxtdata *rcd);
1465
1466int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1467int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd);
1468int hfi1_create_kctxts(struct hfi1_devdata *dd);
1469int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
1470                         struct hfi1_ctxtdata **rcd);
1471void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
1472void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
1473                         struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
1474void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
1475int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
1476int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
1477struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
1478                                                 u16 ctxt);
1479struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
1480int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
1481int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1482int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
1483int handle_receive_interrupt_napi_fp(struct hfi1_ctxtdata *rcd, int budget);
1484int handle_receive_interrupt_napi_sp(struct hfi1_ctxtdata *rcd, int budget);
1485void set_all_slowpath(struct hfi1_devdata *dd);
1486
1487extern const struct pci_device_id hfi1_pci_tbl[];
1488void hfi1_make_ud_req_9B(struct rvt_qp *qp,
1489                         struct hfi1_pkt_state *ps,
1490                         struct rvt_swqe *wqe);
1491
1492void hfi1_make_ud_req_16B(struct rvt_qp *qp,
1493                          struct hfi1_pkt_state *ps,
1494                          struct rvt_swqe *wqe);
1495
1496/* receive packet handler dispositions */
1497#define RCV_PKT_OK      0x0 /* keep going */
1498#define RCV_PKT_LIMIT   0x1 /* stop, hit limit, start thread */
1499#define RCV_PKT_DONE    0x2 /* stop, no more packets detected */
1500
1501/**
1502 * hfi1_rcd_head - add accessor for rcd head
1503 * @rcd: the context
1504 */
1505static inline u32 hfi1_rcd_head(struct hfi1_ctxtdata *rcd)
1506{
1507        return rcd->head;
1508}
1509
1510/**
1511 * hfi1_set_rcd_head - add accessor for rcd head
1512 * @rcd: the context
1513 * @head: the new head
1514 */
1515static inline void hfi1_set_rcd_head(struct hfi1_ctxtdata *rcd, u32 head)
1516{
1517        rcd->head = head;
1518}
1519
1520/* calculate the current RHF address */
1521static inline __le32 *get_rhf_addr(struct hfi1_ctxtdata *rcd)
1522{
1523        return (__le32 *)rcd->rcvhdrq + rcd->head + rcd->rhf_offset;
1524}
1525
1526/* return DMA_RTAIL configuration */
1527static inline bool get_dma_rtail_setting(struct hfi1_ctxtdata *rcd)
1528{
1529        return !!HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL);
1530}
1531
1532/**
1533 * hfi1_seq_incr_wrap - wrapping increment for sequence
1534 * @seq: the current sequence number
1535 *
1536 * Returns: the incremented seq
1537 */
1538static inline u8 hfi1_seq_incr_wrap(u8 seq)
1539{
1540        if (++seq > RHF_MAX_SEQ)
1541                seq = 1;
1542        return seq;
1543}
1544
1545/**
1546 * hfi1_seq_cnt - return seq_cnt member
1547 * @rcd: the receive context
1548 *
1549 * Return seq_cnt member
1550 */
1551static inline u8 hfi1_seq_cnt(struct hfi1_ctxtdata *rcd)
1552{
1553        return rcd->seq_cnt;
1554}
1555
1556/**
1557 * hfi1_set_seq_cnt - return seq_cnt member
1558 * @rcd: the receive context
1559 *
1560 * Return seq_cnt member
1561 */
1562static inline void hfi1_set_seq_cnt(struct hfi1_ctxtdata *rcd, u8 cnt)
1563{
1564        rcd->seq_cnt = cnt;
1565}
1566
1567/**
1568 * last_rcv_seq - is last
1569 * @rcd: the receive context
1570 * @seq: sequence
1571 *
1572 * return true if last packet
1573 */
1574static inline bool last_rcv_seq(struct hfi1_ctxtdata *rcd, u32 seq)
1575{
1576        return seq != rcd->seq_cnt;
1577}
1578
1579/**
1580 * rcd_seq_incr - increment context sequence number
1581 * @rcd: the receive context
1582 * @seq: the current sequence number
1583 *
1584 * Returns: true if the this was the last packet
1585 */
1586static inline bool hfi1_seq_incr(struct hfi1_ctxtdata *rcd, u32 seq)
1587{
1588        rcd->seq_cnt = hfi1_seq_incr_wrap(rcd->seq_cnt);
1589        return last_rcv_seq(rcd, seq);
1590}
1591
1592/**
1593 * get_hdrqentsize - return hdrq entry size
1594 * @rcd: the receive context
1595 */
1596static inline u8 get_hdrqentsize(struct hfi1_ctxtdata *rcd)
1597{
1598        return rcd->rcvhdrqentsize;
1599}
1600
1601/**
1602 * get_hdrq_cnt - return hdrq count
1603 * @rcd: the receive context
1604 */
1605static inline u16 get_hdrq_cnt(struct hfi1_ctxtdata *rcd)
1606{
1607        return rcd->rcvhdrq_cnt;
1608}
1609
1610/**
1611 * hfi1_is_slowpath - check if this context is slow path
1612 * @rcd: the receive context
1613 */
1614static inline bool hfi1_is_slowpath(struct hfi1_ctxtdata *rcd)
1615{
1616        return rcd->do_interrupt == rcd->slow_handler;
1617}
1618
1619/**
1620 * hfi1_is_fastpath - check if this context is fast path
1621 * @rcd: the receive context
1622 */
1623static inline bool hfi1_is_fastpath(struct hfi1_ctxtdata *rcd)
1624{
1625        if (rcd->ctxt == HFI1_CTRL_CTXT)
1626                return false;
1627
1628        return rcd->do_interrupt == rcd->fast_handler;
1629}
1630
1631/**
1632 * hfi1_set_fast - change to the fast handler
1633 * @rcd: the receive context
1634 */
1635static inline void hfi1_set_fast(struct hfi1_ctxtdata *rcd)
1636{
1637        if (unlikely(!rcd))
1638                return;
1639        if (unlikely(!hfi1_is_fastpath(rcd)))
1640                rcd->do_interrupt = rcd->fast_handler;
1641}
1642
1643int hfi1_reset_device(int);
1644
1645void receive_interrupt_work(struct work_struct *work);
1646
1647/* extract service channel from header and rhf */
1648static inline int hfi1_9B_get_sc5(struct ib_header *hdr, u64 rhf)
1649{
1650        return ib_get_sc(hdr) | ((!!(rhf_dc_info(rhf))) << 4);
1651}
1652
1653#define HFI1_JKEY_WIDTH       16
1654#define HFI1_JKEY_MASK        (BIT(16) - 1)
1655#define HFI1_ADMIN_JKEY_RANGE 32
1656
1657/*
1658 * J_KEYs are split and allocated in the following groups:
1659 *   0 - 31    - users with administrator privileges
1660 *  32 - 63    - kernel protocols using KDETH packets
1661 *  64 - 65535 - all other users using KDETH packets
1662 */
1663static inline u16 generate_jkey(kuid_t uid)
1664{
1665        u16 jkey = from_kuid(current_user_ns(), uid) & HFI1_JKEY_MASK;
1666
1667        if (capable(CAP_SYS_ADMIN))
1668                jkey &= HFI1_ADMIN_JKEY_RANGE - 1;
1669        else if (jkey < 64)
1670                jkey |= BIT(HFI1_JKEY_WIDTH - 1);
1671
1672        return jkey;
1673}
1674
1675/*
1676 * active_egress_rate
1677 *
1678 * returns the active egress rate in units of [10^6 bits/sec]
1679 */
1680static inline u32 active_egress_rate(struct hfi1_pportdata *ppd)
1681{
1682        u16 link_speed = ppd->link_speed_active;
1683        u16 link_width = ppd->link_width_active;
1684        u32 egress_rate;
1685
1686        if (link_speed == OPA_LINK_SPEED_25G)
1687                egress_rate = 25000;
1688        else /* assume OPA_LINK_SPEED_12_5G */
1689                egress_rate = 12500;
1690
1691        switch (link_width) {
1692        case OPA_LINK_WIDTH_4X:
1693                egress_rate *= 4;
1694                break;
1695        case OPA_LINK_WIDTH_3X:
1696                egress_rate *= 3;
1697                break;
1698        case OPA_LINK_WIDTH_2X:
1699                egress_rate *= 2;
1700                break;
1701        default:
1702                /* assume IB_WIDTH_1X */
1703                break;
1704        }
1705
1706        return egress_rate;
1707}
1708
1709/*
1710 * egress_cycles
1711 *
1712 * Returns the number of 'fabric clock cycles' to egress a packet
1713 * of length 'len' bytes, at 'rate' Mbit/s. Since the fabric clock
1714 * rate is (approximately) 805 MHz, the units of the returned value
1715 * are (1/805 MHz).
1716 */
1717static inline u32 egress_cycles(u32 len, u32 rate)
1718{
1719        u32 cycles;
1720
1721        /*
1722         * cycles is:
1723         *
1724         *          (length) [bits] / (rate) [bits/sec]
1725         *  ---------------------------------------------------
1726         *  fabric_clock_period == 1 /(805 * 10^6) [cycles/sec]
1727         */
1728
1729        cycles = len * 8; /* bits */
1730        cycles *= 805;
1731        cycles /= rate;
1732
1733        return cycles;
1734}
1735
1736void set_link_ipg(struct hfi1_pportdata *ppd);
1737void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
1738                  u32 rqpn, u8 svc_type);
1739void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
1740                u16 pkey, u32 slid, u32 dlid, u8 sc5,
1741                const struct ib_grh *old_grh);
1742void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
1743                    u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
1744                    u8 sc5, const struct ib_grh *old_grh);
1745typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
1746                                u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
1747                                u8 sc5, const struct ib_grh *old_grh);
1748
1749#define PKEY_CHECK_INVALID -1
1750int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
1751                      u8 sc5, int8_t s_pkey_index);
1752
1753#define PACKET_EGRESS_TIMEOUT 350
1754static inline void pause_for_credit_return(struct hfi1_devdata *dd)
1755{
1756        /* Pause at least 1us, to ensure chip returns all credits */
1757        u32 usec = cclock_to_ns(dd, PACKET_EGRESS_TIMEOUT) / 1000;
1758
1759        udelay(usec ? usec : 1);
1760}
1761
1762/**
1763 * sc_to_vlt() - reverse lookup sc to vl
1764 * @dd - devdata
1765 * @sc5 - 5 bit sc
1766 */
1767static inline u8 sc_to_vlt(struct hfi1_devdata *dd, u8 sc5)
1768{
1769        unsigned seq;
1770        u8 rval;
1771
1772        if (sc5 >= OPA_MAX_SCS)
1773                return (u8)(0xff);
1774
1775        do {
1776                seq = read_seqbegin(&dd->sc2vl_lock);
1777                rval = *(((u8 *)dd->sc2vl) + sc5);
1778        } while (read_seqretry(&dd->sc2vl_lock, seq));
1779
1780        return rval;
1781}
1782
1783#define PKEY_MEMBER_MASK 0x8000
1784#define PKEY_LOW_15_MASK 0x7fff
1785
1786/*
1787 * ingress_pkey_matches_entry - return 1 if the pkey matches ent (ent
1788 * being an entry from the ingress partition key table), return 0
1789 * otherwise. Use the matching criteria for ingress partition keys
1790 * specified in the OPAv1 spec., section 9.10.14.
1791 */
1792static inline int ingress_pkey_matches_entry(u16 pkey, u16 ent)
1793{
1794        u16 mkey = pkey & PKEY_LOW_15_MASK;
1795        u16 ment = ent & PKEY_LOW_15_MASK;
1796
1797        if (mkey == ment) {
1798                /*
1799                 * If pkey[15] is clear (limited partition member),
1800                 * is bit 15 in the corresponding table element
1801                 * clear (limited member)?
1802                 */
1803                if (!(pkey & PKEY_MEMBER_MASK))
1804                        return !!(ent & PKEY_MEMBER_MASK);
1805                return 1;
1806        }
1807        return 0;
1808}
1809
1810/*
1811 * ingress_pkey_table_search - search the entire pkey table for
1812 * an entry which matches 'pkey'. return 0 if a match is found,
1813 * and 1 otherwise.
1814 */
1815static int ingress_pkey_table_search(struct hfi1_pportdata *ppd, u16 pkey)
1816{
1817        int i;
1818
1819        for (i = 0; i < MAX_PKEY_VALUES; i++) {
1820                if (ingress_pkey_matches_entry(pkey, ppd->pkeys[i]))
1821                        return 0;
1822        }
1823        return 1;
1824}
1825
1826/*
1827 * ingress_pkey_table_fail - record a failure of ingress pkey validation,
1828 * i.e., increment port_rcv_constraint_errors for the port, and record
1829 * the 'error info' for this failure.
1830 */
1831static void ingress_pkey_table_fail(struct hfi1_pportdata *ppd, u16 pkey,
1832                                    u32 slid)
1833{
1834        struct hfi1_devdata *dd = ppd->dd;
1835
1836        incr_cntr64(&ppd->port_rcv_constraint_errors);
1837        if (!(dd->err_info_rcv_constraint.status & OPA_EI_STATUS_SMASK)) {
1838                dd->err_info_rcv_constraint.status |= OPA_EI_STATUS_SMASK;
1839                dd->err_info_rcv_constraint.slid = slid;
1840                dd->err_info_rcv_constraint.pkey = pkey;
1841        }
1842}
1843
1844/*
1845 * ingress_pkey_check - Return 0 if the ingress pkey is valid, return 1
1846 * otherwise. Use the criteria in the OPAv1 spec, section 9.10.14. idx
1847 * is a hint as to the best place in the partition key table to begin
1848 * searching. This function should not be called on the data path because
1849 * of performance reasons. On datapath pkey check is expected to be done
1850 * by HW and rcv_pkey_check function should be called instead.
1851 */
1852static inline int ingress_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
1853                                     u8 sc5, u8 idx, u32 slid, bool force)
1854{
1855        if (!(force) && !(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
1856                return 0;
1857
1858        /* If SC15, pkey[0:14] must be 0x7fff */
1859        if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1860                goto bad;
1861
1862        /* Is the pkey = 0x0, or 0x8000? */
1863        if ((pkey & PKEY_LOW_15_MASK) == 0)
1864                goto bad;
1865
1866        /* The most likely matching pkey has index 'idx' */
1867        if (ingress_pkey_matches_entry(pkey, ppd->pkeys[idx]))
1868                return 0;
1869
1870        /* no match - try the whole table */
1871        if (!ingress_pkey_table_search(ppd, pkey))
1872                return 0;
1873
1874bad:
1875        ingress_pkey_table_fail(ppd, pkey, slid);
1876        return 1;
1877}
1878
1879/*
1880 * rcv_pkey_check - Return 0 if the ingress pkey is valid, return 1
1881 * otherwise. It only ensures pkey is vlid for QP0. This function
1882 * should be called on the data path instead of ingress_pkey_check
1883 * as on data path, pkey check is done by HW (except for QP0).
1884 */
1885static inline int rcv_pkey_check(struct hfi1_pportdata *ppd, u16 pkey,
1886                                 u8 sc5, u16 slid)
1887{
1888        if (!(ppd->part_enforce & HFI1_PART_ENFORCE_IN))
1889                return 0;
1890
1891        /* If SC15, pkey[0:14] must be 0x7fff */
1892        if ((sc5 == 0xf) && ((pkey & PKEY_LOW_15_MASK) != PKEY_LOW_15_MASK))
1893                goto bad;
1894
1895        return 0;
1896bad:
1897        ingress_pkey_table_fail(ppd, pkey, slid);
1898        return 1;
1899}
1900
1901/* MTU handling */
1902
1903/* MTU enumeration, 256-4k match IB */
1904#define OPA_MTU_0     0
1905#define OPA_MTU_256   1
1906#define OPA_MTU_512   2
1907#define OPA_MTU_1024  3
1908#define OPA_MTU_2048  4
1909#define OPA_MTU_4096  5
1910
1911u32 lrh_max_header_bytes(struct hfi1_devdata *dd);
1912int mtu_to_enum(u32 mtu, int default_if_bad);
1913u16 enum_to_mtu(int mtu);
1914static inline int valid_ib_mtu(unsigned int mtu)
1915{
1916        return mtu == 256 || mtu == 512 ||
1917                mtu == 1024 || mtu == 2048 ||
1918                mtu == 4096;
1919}
1920
1921static inline int valid_opa_max_mtu(unsigned int mtu)
1922{
1923        return mtu >= 2048 &&
1924                (valid_ib_mtu(mtu) || mtu == 8192 || mtu == 10240);
1925}
1926
1927int set_mtu(struct hfi1_pportdata *ppd);
1928
1929int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc);
1930void hfi1_disable_after_error(struct hfi1_devdata *dd);
1931int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit);
1932int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
1933
1934int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
1935int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
1936
1937void set_up_vau(struct hfi1_devdata *dd, u8 vau);
1938void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
1939void reset_link_credits(struct hfi1_devdata *dd);
1940void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);
1941
1942int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc);
1943
1944static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd)
1945{
1946        return ppd->dd;
1947}
1948
1949static inline struct hfi1_devdata *dd_from_dev(struct hfi1_ibdev *dev)
1950{
1951        return container_of(dev, struct hfi1_devdata, verbs_dev);
1952}
1953
1954static inline struct hfi1_devdata *dd_from_ibdev(struct ib_device *ibdev)
1955{
1956        return dd_from_dev(to_idev(ibdev));
1957}
1958
1959static inline struct hfi1_pportdata *ppd_from_ibp(struct hfi1_ibport *ibp)
1960{
1961        return container_of(ibp, struct hfi1_pportdata, ibport_data);
1962}
1963
1964static inline struct hfi1_ibdev *dev_from_rdi(struct rvt_dev_info *rdi)
1965{
1966        return container_of(rdi, struct hfi1_ibdev, rdi);
1967}
1968
1969static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
1970{
1971        struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1972        u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
1973
1974        WARN_ON(pidx >= dd->num_pports);
1975        return &dd->pport[pidx].ibport_data;
1976}
1977
1978static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
1979{
1980        return &rcd->ppd->ibport_data;
1981}
1982
1983/**
1984 * hfi1_may_ecn - Check whether FECN or BECN processing should be done
1985 * @pkt: the packet to be evaluated
1986 *
1987 * Check whether the FECN or BECN bits in the packet's header are
1988 * enabled, depending on packet type.
1989 *
1990 * This function only checks for FECN and BECN bits. Additional checks
1991 * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
1992 * ensure correct handling.
1993 */
1994static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
1995{
1996        bool fecn, becn;
1997
1998        if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
1999                fecn = hfi1_16B_get_fecn(pkt->hdr);
2000                becn = hfi1_16B_get_becn(pkt->hdr);
2001        } else {
2002                fecn = ib_bth_get_fecn(pkt->ohdr);
2003                becn = ib_bth_get_becn(pkt->ohdr);
2004        }
2005        return fecn || becn;
2006}
2007
2008bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
2009                               bool prescan);
2010static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
2011{
2012        bool do_work;
2013
2014        do_work = hfi1_may_ecn(pkt);
2015        if (unlikely(do_work))
2016                return hfi1_process_ecn_slowpath(qp, pkt, false);
2017        return false;
2018}
2019
2020/*
2021 * Return the indexed PKEY from the port PKEY table.
2022 */
2023static inline u16 hfi1_get_pkey(struct hfi1_ibport *ibp, unsigned index)
2024{
2025        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2026        u16 ret;
2027
2028        if (index >= ARRAY_SIZE(ppd->pkeys))
2029                ret = 0;
2030        else
2031                ret = ppd->pkeys[index];
2032
2033        return ret;
2034}
2035
2036/*
2037 * Return the indexed GUID from the port GUIDs table.
2038 */
2039static inline __be64 get_sguid(struct hfi1_ibport *ibp, unsigned int index)
2040{
2041        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2042
2043        WARN_ON(index >= HFI1_GUIDS_PER_PORT);
2044        return cpu_to_be64(ppd->guids[index]);
2045}
2046
2047/*
2048 * Called by readers of cc_state only, must call under rcu_read_lock().
2049 */
2050static inline struct cc_state *get_cc_state(struct hfi1_pportdata *ppd)
2051{
2052        return rcu_dereference(ppd->cc_state);
2053}
2054
2055/*
2056 * Called by writers of cc_state only,  must call under cc_state_lock.
2057 */
2058static inline
2059struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
2060{
2061        return rcu_dereference_protected(ppd->cc_state,
2062                                         lockdep_is_held(&ppd->cc_state_lock));
2063}
2064
2065/*
2066 * values for dd->flags (_device_ related flags)
2067 */
2068#define HFI1_INITTED           0x1    /* chip and driver up and initted */
2069#define HFI1_PRESENT           0x2    /* chip accesses can be done */
2070#define HFI1_FROZEN            0x4    /* chip in SPC freeze */
2071#define HFI1_HAS_SDMA_TIMEOUT  0x8
2072#define HFI1_HAS_SEND_DMA      0x10   /* Supports Send DMA */
2073#define HFI1_FORCED_FREEZE     0x80   /* driver forced freeze mode */
2074#define HFI1_SHUTDOWN          0x100  /* device is shutting down */
2075
2076/* IB dword length mask in PBC (lower 11 bits); same for all chips */
2077#define HFI1_PBC_LENGTH_MASK                     ((1 << 11) - 1)
2078
2079/* ctxt_flag bit offsets */
2080                /* base context has not finished initializing */
2081#define HFI1_CTXT_BASE_UNINIT 1
2082                /* base context initaliation failed */
2083#define HFI1_CTXT_BASE_FAILED 2
2084                /* waiting for a packet to arrive */
2085#define HFI1_CTXT_WAITING_RCV 3
2086                /* waiting for an urgent packet to arrive */
2087#define HFI1_CTXT_WAITING_URG 4
2088
2089/* free up any allocated data at closes */
2090int hfi1_init_dd(struct hfi1_devdata *dd);
2091void hfi1_free_devdata(struct hfi1_devdata *dd);
2092
2093/* LED beaconing functions */
2094void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
2095                             unsigned int timeoff);
2096void shutdown_led_override(struct hfi1_pportdata *ppd);
2097
2098#define HFI1_CREDIT_RETURN_RATE (100)
2099
2100/*
2101 * The number of words for the KDETH protocol field.  If this is
2102 * larger then the actual field used, then part of the payload
2103 * will be in the header.
2104 *
2105 * Optimally, we want this sized so that a typical case will
2106 * use full cache lines.  The typical local KDETH header would
2107 * be:
2108 *
2109 *      Bytes   Field
2110 *        8     LRH
2111 *       12     BHT
2112 *       ??     KDETH
2113 *        8     RHF
2114 *      ---
2115 *       28 + KDETH
2116 *
2117 * For a 64-byte cache line, KDETH would need to be 36 bytes or 9 DWORDS
2118 */
2119#define DEFAULT_RCVHDRSIZE 9
2120
2121/*
2122 * Maximal header byte count:
2123 *
2124 *      Bytes   Field
2125 *        8     LRH
2126 *       40     GRH (optional)
2127 *       12     BTH
2128 *       ??     KDETH
2129 *        8     RHF
2130 *      ---
2131 *       68 + KDETH
2132 *
2133 * We also want to maintain a cache line alignment to assist DMA'ing
2134 * of the header bytes.  Round up to a good size.
2135 */
2136#define DEFAULT_RCVHDR_ENTSIZE 32
2137
2138bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
2139                        u32 nlocked, u32 npages);
2140int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr,
2141                            size_t npages, bool writable, struct page **pages);
2142void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
2143                             size_t npages, bool dirty);
2144
2145/**
2146 * hfi1_rcvhdrtail_kvaddr - return tail kvaddr
2147 * @rcd - the receive context
2148 */
2149static inline __le64 *hfi1_rcvhdrtail_kvaddr(const struct hfi1_ctxtdata *rcd)
2150{
2151        return (__le64 *)rcd->rcvhdrtail_kvaddr;
2152}
2153
2154static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
2155{
2156        u64 *kv = (u64 *)hfi1_rcvhdrtail_kvaddr(rcd);
2157
2158        if (kv)
2159                *kv = 0ULL;
2160}
2161
2162static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
2163{
2164        /*
2165         * volatile because it's a DMA target from the chip, routine is
2166         * inlined, and don't want register caching or reordering.
2167         */
2168        return (u32)le64_to_cpu(*hfi1_rcvhdrtail_kvaddr(rcd));
2169}
2170
2171static inline bool hfi1_packet_present(struct hfi1_ctxtdata *rcd)
2172{
2173        if (likely(!rcd->rcvhdrtail_kvaddr)) {
2174                u32 seq = rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)));
2175
2176                return !last_rcv_seq(rcd, seq);
2177        }
2178        return hfi1_rcd_head(rcd) != get_rcvhdrtail(rcd);
2179}
2180
2181/*
2182 * sysfs interface.
2183 */
2184
2185extern const char ib_hfi1_version[];
2186extern const struct attribute_group ib_hfi1_attr_group;
2187extern const struct attribute_group *hfi1_attr_port_groups[];
2188
2189int hfi1_device_create(struct hfi1_devdata *dd);
2190void hfi1_device_remove(struct hfi1_devdata *dd);
2191
2192int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
2193void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
2194/* Hook for sysfs read of QSFP */
2195int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
2196
2197int hfi1_pcie_init(struct hfi1_devdata *dd);
2198void hfi1_pcie_cleanup(struct pci_dev *pdev);
2199int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
2200void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
2201int pcie_speeds(struct hfi1_devdata *dd);
2202int restore_pci_variables(struct hfi1_devdata *dd);
2203int save_pci_variables(struct hfi1_devdata *dd);
2204int do_pcie_gen3_transition(struct hfi1_devdata *dd);
2205void tune_pcie_caps(struct hfi1_devdata *dd);
2206int parse_platform_config(struct hfi1_devdata *dd);
2207int get_platform_config_field(struct hfi1_devdata *dd,
2208                              enum platform_config_table_type_encoding
2209                              table_type, int table_index, int field_index,
2210                              u32 *data, u32 len);
2211
2212struct pci_dev *get_pci_dev(struct rvt_dev_info *rdi);
2213
2214/*
2215 * Flush write combining store buffers (if present) and perform a write
2216 * barrier.
2217 */
2218static inline void flush_wc(void)
2219{
2220        asm volatile("sfence" : : : "memory");
2221}
2222
2223void handle_eflags(struct hfi1_packet *packet);
2224void seqfile_dump_rcd(struct seq_file *s, struct hfi1_ctxtdata *rcd);
2225
2226/* global module parameter variables */
2227extern unsigned int hfi1_max_mtu;
2228extern unsigned int hfi1_cu;
2229extern unsigned int user_credit_return_threshold;
2230extern int num_user_contexts;
2231extern unsigned long n_krcvqs;
2232extern uint krcvqs[];
2233extern int krcvqsset;
2234extern uint loopback;
2235extern uint quick_linkup;
2236extern uint rcv_intr_timeout;
2237extern uint rcv_intr_count;
2238extern uint rcv_intr_dynamic;
2239extern ushort link_crc_mask;
2240
2241extern struct mutex hfi1_mutex;
2242
2243/* Number of seconds before our card status check...  */
2244#define STATUS_TIMEOUT 60
2245
2246#define DRIVER_NAME             "hfi1"
2247#define HFI1_USER_MINOR_BASE     0
2248#define HFI1_TRACE_MINOR         127
2249#define HFI1_NMINORS             255
2250
2251#define PCI_VENDOR_ID_INTEL 0x8086
2252#define PCI_DEVICE_ID_INTEL0 0x24f0
2253#define PCI_DEVICE_ID_INTEL1 0x24f1
2254
2255#define HFI1_PKT_USER_SC_INTEGRITY                                          \
2256        (SEND_CTXT_CHECK_ENABLE_DISALLOW_NON_KDETH_PACKETS_SMASK            \
2257        | SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK           \
2258        | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_SMASK              \
2259        | SEND_CTXT_CHECK_ENABLE_DISALLOW_GRH_SMASK)
2260
2261#define HFI1_PKT_KERNEL_SC_INTEGRITY                                        \
2262        (SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK)
2263
2264static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd,
2265                                                  u16 ctxt_type)
2266{
2267        u64 base_sc_integrity;
2268
2269        /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2270        if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
2271                return 0;
2272
2273        base_sc_integrity =
2274        SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2275        | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK
2276        | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2277        | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2278        | SEND_CTXT_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2279#ifndef CONFIG_FAULT_INJECTION
2280        | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK
2281#endif
2282        | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2283        | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2284        | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2285        | SEND_CTXT_CHECK_ENABLE_DISALLOW_RAW_SMASK
2286        | SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2287        | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2288        | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK
2289        | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK
2290        | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK
2291        | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK;
2292
2293        if (ctxt_type == SC_USER)
2294                base_sc_integrity |=
2295#ifndef CONFIG_FAULT_INJECTION
2296                        SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_TEST_SMASK |
2297#endif
2298                        HFI1_PKT_USER_SC_INTEGRITY;
2299        else if (ctxt_type != SC_KERNEL)
2300                base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY;
2301
2302        /* turn on send-side job key checks if !A0 */
2303        if (!is_ax(dd))
2304                base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
2305
2306        return base_sc_integrity;
2307}
2308
2309static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
2310{
2311        u64 base_sdma_integrity;
2312
2313        /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */
2314        if (HFI1_CAP_IS_KSET(NO_INTEGRITY))
2315                return 0;
2316
2317        base_sdma_integrity =
2318        SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK
2319        | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK
2320        | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK
2321        | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK
2322        | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_BYPASS_PACKETS_SMASK
2323        | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_SMALL_IB_PACKETS_SMASK
2324        | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_IPV6_SMASK
2325        | SEND_DMA_CHECK_ENABLE_DISALLOW_RAW_SMASK
2326        | SEND_DMA_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK
2327        | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK
2328        | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK
2329        | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK
2330        | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK
2331        | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK;
2332
2333        if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
2334                base_sdma_integrity |=
2335                SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK;
2336
2337        /* turn on send-side job key checks if !A0 */
2338        if (!is_ax(dd))
2339                base_sdma_integrity |=
2340                        SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
2341
2342        return base_sdma_integrity;
2343}
2344
2345#define dd_dev_emerg(dd, fmt, ...) \
2346        dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
2347                  rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2348
2349#define dd_dev_err(dd, fmt, ...) \
2350        dev_err(&(dd)->pcidev->dev, "%s: " fmt, \
2351                rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2352
2353#define dd_dev_err_ratelimited(dd, fmt, ...) \
2354        dev_err_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2355                            rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2356                            ##__VA_ARGS__)
2357
2358#define dd_dev_warn(dd, fmt, ...) \
2359        dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
2360                 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2361
2362#define dd_dev_warn_ratelimited(dd, fmt, ...) \
2363        dev_warn_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2364                             rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2365                             ##__VA_ARGS__)
2366
2367#define dd_dev_info(dd, fmt, ...) \
2368        dev_info(&(dd)->pcidev->dev, "%s: " fmt, \
2369                 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2370
2371#define dd_dev_info_ratelimited(dd, fmt, ...) \
2372        dev_info_ratelimited(&(dd)->pcidev->dev, "%s: " fmt, \
2373                             rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), \
2374                             ##__VA_ARGS__)
2375
2376#define dd_dev_dbg(dd, fmt, ...) \
2377        dev_dbg(&(dd)->pcidev->dev, "%s: " fmt, \
2378                rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
2379
2380#define hfi1_dev_porterr(dd, port, fmt, ...) \
2381        dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
2382                rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), (port), ##__VA_ARGS__)
2383
2384/*
2385 * this is used for formatting hw error messages...
2386 */
2387struct hfi1_hwerror_msgs {
2388        u64 mask;
2389        const char *msg;
2390        size_t sz;
2391};
2392
2393/* in intr.c... */
2394void hfi1_format_hwerrors(u64 hwerrs,
2395                          const struct hfi1_hwerror_msgs *hwerrmsgs,
2396                          size_t nhwerrmsgs, char *msg, size_t lmsg);
2397
2398#define USER_OPCODE_CHECK_VAL 0xC0
2399#define USER_OPCODE_CHECK_MASK 0xC0
2400#define OPCODE_CHECK_VAL_DISABLED 0x0
2401#define OPCODE_CHECK_MASK_DISABLED 0x0
2402
2403static inline void hfi1_reset_cpu_counters(struct hfi1_devdata *dd)
2404{
2405        struct hfi1_pportdata *ppd;
2406        int i;
2407
2408        dd->z_int_counter = get_all_cpu_total(dd->int_counter);
2409        dd->z_rcv_limit = get_all_cpu_total(dd->rcv_limit);
2410        dd->z_send_schedule = get_all_cpu_total(dd->send_schedule);
2411
2412        ppd = (struct hfi1_pportdata *)(dd + 1);
2413        for (i = 0; i < dd->num_pports; i++, ppd++) {
2414                ppd->ibport_data.rvp.z_rc_acks =
2415                        get_all_cpu_total(ppd->ibport_data.rvp.rc_acks);
2416                ppd->ibport_data.rvp.z_rc_qacks =
2417                        get_all_cpu_total(ppd->ibport_data.rvp.rc_qacks);
2418        }
2419}
2420
2421/* Control LED state */
2422static inline void setextled(struct hfi1_devdata *dd, u32 on)
2423{
2424        if (on)
2425                write_csr(dd, DCC_CFG_LED_CNTRL, 0x1F);
2426        else
2427                write_csr(dd, DCC_CFG_LED_CNTRL, 0x10);
2428}
2429
2430/* return the i2c resource given the target */
2431static inline u32 i2c_target(u32 target)
2432{
2433        return target ? CR_I2C2 : CR_I2C1;
2434}
2435
2436/* return the i2c chain chip resource that this HFI uses for QSFP */
2437static inline u32 qsfp_resource(struct hfi1_devdata *dd)
2438{
2439        return i2c_target(dd->hfi1_id);
2440}
2441
2442/* Is this device integrated or discrete? */
2443static inline bool is_integrated(struct hfi1_devdata *dd)
2444{
2445        return dd->pcidev->device == PCI_DEVICE_ID_INTEL1;
2446}
2447
2448/**
2449 * hfi1_need_drop - detect need for drop
2450 * @dd: - the device
2451 *
2452 * In some cases, the first packet needs to be dropped.
2453 *
2454 * Return true is the current packet needs to be dropped and false otherwise.
2455 */
2456static inline bool hfi1_need_drop(struct hfi1_devdata *dd)
2457{
2458        if (unlikely(dd->do_drop &&
2459                     atomic_xchg(&dd->drop_packet, DROP_PACKET_OFF) ==
2460                     DROP_PACKET_ON)) {
2461                dd->do_drop = false;
2462                return true;
2463        }
2464        return false;
2465}
2466
2467int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp);
2468
2469#define DD_DEV_ENTRY(dd)       __string(dev, dev_name(&(dd)->pcidev->dev))
2470#define DD_DEV_ASSIGN(dd)      __assign_str(dev, dev_name(&(dd)->pcidev->dev))
2471
2472static inline void hfi1_update_ah_attr(struct ib_device *ibdev,
2473                                       struct rdma_ah_attr *attr)
2474{
2475        struct hfi1_pportdata *ppd;
2476        struct hfi1_ibport *ibp;
2477        u32 dlid = rdma_ah_get_dlid(attr);
2478
2479        /*
2480         * Kernel clients may not have setup GRH information
2481         * Set that here.
2482         */
2483        ibp = to_iport(ibdev, rdma_ah_get_port_num(attr));
2484        ppd = ppd_from_ibp(ibp);
2485        if ((((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
2486              (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE))) &&
2487            (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)) &&
2488            (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
2489            (!(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))) ||
2490            (rdma_ah_get_make_grd(attr))) {
2491                rdma_ah_set_ah_flags(attr, IB_AH_GRH);
2492                rdma_ah_set_interface_id(attr, OPA_MAKE_ID(dlid));
2493                rdma_ah_set_subnet_prefix(attr, ibp->rvp.gid_prefix);
2494        }
2495}
2496
2497/*
2498 * hfi1_check_mcast- Check if the given lid is
2499 * in the OPA multicast range.
2500 *
2501 * The LID might either reside in ah.dlid or might be
2502 * in the GRH of the address handle as DGID if extended
2503 * addresses are in use.
2504 */
2505static inline bool hfi1_check_mcast(u32 lid)
2506{
2507        return ((lid >= opa_get_mcast_base(OPA_MCAST_NR)) &&
2508                (lid != be32_to_cpu(OPA_LID_PERMISSIVE)));
2509}
2510
2511#define opa_get_lid(lid, format)        \
2512        __opa_get_lid(lid, OPA_PORT_PACKET_FORMAT_##format)
2513
2514/* Convert a lid to a specific lid space */
2515static inline u32 __opa_get_lid(u32 lid, u8 format)
2516{
2517        bool is_mcast = hfi1_check_mcast(lid);
2518
2519        switch (format) {
2520        case OPA_PORT_PACKET_FORMAT_8B:
2521        case OPA_PORT_PACKET_FORMAT_10B:
2522                if (is_mcast)
2523                        return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
2524                                0xF0000);
2525                return lid & 0xFFFFF;
2526        case OPA_PORT_PACKET_FORMAT_16B:
2527                if (is_mcast)
2528                        return (lid - opa_get_mcast_base(OPA_MCAST_NR) +
2529                                0xF00000);
2530                return lid & 0xFFFFFF;
2531        case OPA_PORT_PACKET_FORMAT_9B:
2532                if (is_mcast)
2533                        return (lid -
2534                                opa_get_mcast_base(OPA_MCAST_NR) +
2535                                be16_to_cpu(IB_MULTICAST_LID_BASE));
2536                else
2537                        return lid & 0xFFFF;
2538        default:
2539                return lid;
2540        }
2541}
2542
2543/* Return true if the given lid is the OPA 16B multicast range */
2544static inline bool hfi1_is_16B_mcast(u32 lid)
2545{
2546        return ((lid >=
2547                opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 16B)) &&
2548                (lid != opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B)));
2549}
2550
2551static inline void hfi1_make_opa_lid(struct rdma_ah_attr *attr)
2552{
2553        const struct ib_global_route *grh = rdma_ah_read_grh(attr);
2554        u32 dlid = rdma_ah_get_dlid(attr);
2555
2556        /* Modify ah_attr.dlid to be in the 32 bit LID space.
2557         * This is how the address will be laid out:
2558         * Assuming MCAST_NR to be 4,
2559         * 32 bit permissive LID = 0xFFFFFFFF
2560         * Multicast LID range = 0xFFFFFFFE to 0xF0000000
2561         * Unicast LID range = 0xEFFFFFFF to 1
2562         * Invalid LID = 0
2563         */
2564        if (ib_is_opa_gid(&grh->dgid))
2565                dlid = opa_get_lid_from_gid(&grh->dgid);
2566        else if ((dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
2567                 (dlid != be16_to_cpu(IB_LID_PERMISSIVE)) &&
2568                 (dlid != be32_to_cpu(OPA_LID_PERMISSIVE)))
2569                dlid = dlid - be16_to_cpu(IB_MULTICAST_LID_BASE) +
2570                        opa_get_mcast_base(OPA_MCAST_NR);
2571        else if (dlid == be16_to_cpu(IB_LID_PERMISSIVE))
2572                dlid = be32_to_cpu(OPA_LID_PERMISSIVE);
2573
2574        rdma_ah_set_dlid(attr, dlid);
2575}
2576
2577static inline u8 hfi1_get_packet_type(u32 lid)
2578{
2579        /* 9B if lid > 0xF0000000 */
2580        if (lid >= opa_get_mcast_base(OPA_MCAST_NR))
2581                return HFI1_PKT_TYPE_9B;
2582
2583        /* 16B if lid > 0xC000 */
2584        if (lid >= opa_get_lid(opa_get_mcast_base(OPA_MCAST_NR), 9B))
2585                return HFI1_PKT_TYPE_16B;
2586
2587        return HFI1_PKT_TYPE_9B;
2588}
2589
2590static inline bool hfi1_get_hdr_type(u32 lid, struct rdma_ah_attr *attr)
2591{
2592        /*
2593         * If there was an incoming 16B packet with permissive
2594         * LIDs, OPA GIDs would have been programmed when those
2595         * packets were received. A 16B packet will have to
2596         * be sent in response to that packet. Return a 16B
2597         * header type if that's the case.
2598         */
2599        if (rdma_ah_get_dlid(attr) == be32_to_cpu(OPA_LID_PERMISSIVE))
2600                return (ib_is_opa_gid(&rdma_ah_read_grh(attr)->dgid)) ?
2601                        HFI1_PKT_TYPE_16B : HFI1_PKT_TYPE_9B;
2602
2603        /*
2604         * Return a 16B header type if either the the destination
2605         * or source lid is extended.
2606         */
2607        if (hfi1_get_packet_type(rdma_ah_get_dlid(attr)) == HFI1_PKT_TYPE_16B)
2608                return HFI1_PKT_TYPE_16B;
2609
2610        return hfi1_get_packet_type(lid);
2611}
2612
2613static inline void hfi1_make_ext_grh(struct hfi1_packet *packet,
2614                                     struct ib_grh *grh, u32 slid,
2615                                     u32 dlid)
2616{
2617        struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
2618        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2619
2620        if (!ibp)
2621                return;
2622
2623        grh->hop_limit = 1;
2624        grh->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
2625        if (slid == opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))
2626                grh->sgid.global.interface_id =
2627                        OPA_MAKE_ID(be32_to_cpu(OPA_LID_PERMISSIVE));
2628        else
2629                grh->sgid.global.interface_id = OPA_MAKE_ID(slid);
2630
2631        /*
2632         * Upper layers (like mad) may compare the dgid in the
2633         * wc that is obtained here with the sgid_index in
2634         * the wr. Since sgid_index in wr is always 0 for
2635         * extended lids, set the dgid here to the default
2636         * IB gid.
2637         */
2638        grh->dgid.global.subnet_prefix = ibp->rvp.gid_prefix;
2639        grh->dgid.global.interface_id =
2640                cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
2641}
2642
2643static inline int hfi1_get_16b_padding(u32 hdr_size, u32 payload)
2644{
2645        return -(hdr_size + payload + (SIZE_OF_CRC << 2) +
2646                     SIZE_OF_LT) & 0x7;
2647}
2648
2649static inline void hfi1_make_ib_hdr(struct ib_header *hdr,
2650                                    u16 lrh0, u16 len,
2651                                    u16 dlid, u16 slid)
2652{
2653        hdr->lrh[0] = cpu_to_be16(lrh0);
2654        hdr->lrh[1] = cpu_to_be16(dlid);
2655        hdr->lrh[2] = cpu_to_be16(len);
2656        hdr->lrh[3] = cpu_to_be16(slid);
2657}
2658
2659static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
2660                                     u32 slid, u32 dlid,
2661                                     u16 len, u16 pkey,
2662                                     bool becn, bool fecn, u8 l4,
2663                                     u8 sc)
2664{
2665        u32 lrh0 = 0;
2666        u32 lrh1 = 0x40000000;
2667        u32 lrh2 = 0;
2668        u32 lrh3 = 0;
2669
2670        lrh0 = (lrh0 & ~OPA_16B_BECN_MASK) | (becn << OPA_16B_BECN_SHIFT);
2671        lrh0 = (lrh0 & ~OPA_16B_LEN_MASK) | (len << OPA_16B_LEN_SHIFT);
2672        lrh0 = (lrh0 & ~OPA_16B_LID_MASK)  | (slid & OPA_16B_LID_MASK);
2673        lrh1 = (lrh1 & ~OPA_16B_FECN_MASK) | (fecn << OPA_16B_FECN_SHIFT);
2674        lrh1 = (lrh1 & ~OPA_16B_SC_MASK) | (sc << OPA_16B_SC_SHIFT);
2675        lrh1 = (lrh1 & ~OPA_16B_LID_MASK) | (dlid & OPA_16B_LID_MASK);
2676        lrh2 = (lrh2 & ~OPA_16B_SLID_MASK) |
2677                ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
2678        lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
2679                ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
2680        lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
2681        lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
2682
2683        hdr->lrh[0] = lrh0;
2684        hdr->lrh[1] = lrh1;
2685        hdr->lrh[2] = lrh2;
2686        hdr->lrh[3] = lrh3;
2687}
2688#endif                          /* _HFI1_KERNEL_H */
2689