linux/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Huawei HiNIC PCI Express Linux driver
   4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
   5 */
   6
   7#include <linux/kernel.h>
   8#include <linux/types.h>
   9#include <linux/errno.h>
  10#include <linux/pci.h>
  11#include <linux/device.h>
  12#include <linux/workqueue.h>
  13#include <linux/interrupt.h>
  14#include <linux/slab.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/log2.h>
  17#include <asm/byteorder.h>
  18#include <asm/barrier.h>
  19
  20#include "hinic_hw_dev.h"
  21#include "hinic_hw_csr.h"
  22#include "hinic_hw_if.h"
  23#include "hinic_hw_eqs.h"
  24
  25#define HINIC_EQS_WQ_NAME                       "hinic_eqs"
  26
  27#define GET_EQ_NUM_PAGES(eq, pg_size)           \
  28                (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
  29
  30#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size)     ((pg_size) / (eq)->elem_size)
  31
  32#define EQ_CONS_IDX_REG_ADDR(eq)        (((eq)->type == HINIC_AEQ) ? \
  33                        HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
  34                        HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
  35
  36#define EQ_PROD_IDX_REG_ADDR(eq)        (((eq)->type == HINIC_AEQ) ? \
  37                        HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
  38                        HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
  39
  40#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
  41                        HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
  42                        HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
  43
  44#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
  45                        HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
  46                        HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
  47
  48#define GET_EQ_ELEMENT(eq, idx)         \
  49                ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
  50                 (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
  51
  52#define GET_AEQ_ELEM(eq, idx)           ((struct hinic_aeq_elem *) \
  53                                        GET_EQ_ELEMENT(eq, idx))
  54
  55#define GET_CEQ_ELEM(eq, idx)           ((u32 *) \
  56                                         GET_EQ_ELEMENT(eq, idx))
  57
  58#define GET_CURR_AEQ_ELEM(eq)           GET_AEQ_ELEM(eq, (eq)->cons_idx)
  59
  60#define GET_CURR_CEQ_ELEM(eq)           GET_CEQ_ELEM(eq, (eq)->cons_idx)
  61
  62#define PAGE_IN_4K(page_size)           ((page_size) >> 12)
  63#define EQ_SET_HW_PAGE_SIZE_VAL(eq)     (ilog2(PAGE_IN_4K((eq)->page_size)))
  64
  65#define ELEMENT_SIZE_IN_32B(eq)         (((eq)->elem_size) >> 5)
  66#define EQ_SET_HW_ELEM_SIZE_VAL(eq)     (ilog2(ELEMENT_SIZE_IN_32B(eq)))
  67
  68#define EQ_MAX_PAGES                    8
  69
  70#define CEQE_TYPE_SHIFT                 23
  71#define CEQE_TYPE_MASK                  0x7
  72
  73#define CEQE_TYPE(ceqe)                 (((ceqe) >> CEQE_TYPE_SHIFT) &  \
  74                                         CEQE_TYPE_MASK)
  75
  76#define CEQE_DATA_MASK                  0x3FFFFFF
  77#define CEQE_DATA(ceqe)                 ((ceqe) & CEQE_DATA_MASK)
  78
  79#define aeq_to_aeqs(eq)                 \
  80                container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
  81
  82#define ceq_to_ceqs(eq)                 \
  83                container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
  84
  85#define work_to_aeq_work(work)          \
  86                container_of(work, struct hinic_eq_work, work)
  87
  88#define DMA_ATTR_AEQ_DEFAULT            0
  89#define DMA_ATTR_CEQ_DEFAULT            0
  90
  91/* No coalescence */
  92#define THRESH_CEQ_DEFAULT              0
  93
  94enum eq_int_mode {
  95        EQ_INT_MODE_ARMED,
  96        EQ_INT_MODE_ALWAYS
  97};
  98
  99enum eq_arm_state {
 100        EQ_NOT_ARMED,
 101        EQ_ARMED
 102};
 103
 104/**
 105 * hinic_aeq_register_hw_cb - register AEQ callback for specific event
 106 * @aeqs: pointer to Async eqs of the chip
 107 * @event: aeq event to register callback for it
 108 * @handle: private data will be used by the callback
 109 * @hwe_handler: callback function
 110 **/
 111void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
 112                              enum hinic_aeq_type event, void *handle,
 113                              void (*hwe_handler)(void *handle, void *data,
 114                                                  u8 size))
 115{
 116        struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
 117
 118        hwe_cb->hwe_handler = hwe_handler;
 119        hwe_cb->handle = handle;
 120        hwe_cb->hwe_state = HINIC_EQE_ENABLED;
 121}
 122
 123/**
 124 * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
 125 * @aeqs: pointer to Async eqs of the chip
 126 * @event: aeq event to unregister callback for it
 127 **/
 128void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
 129                                enum hinic_aeq_type event)
 130{
 131        struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
 132
 133        hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
 134
 135        while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
 136                schedule();
 137
 138        hwe_cb->hwe_handler = NULL;
 139}
 140
 141/**
 142 * hinic_ceq_register_cb - register CEQ callback for specific event
 143 * @ceqs: pointer to Completion eqs part of the chip
 144 * @event: ceq event to register callback for it
 145 * @handle: private data will be used by the callback
 146 * @handler: callback function
 147 **/
 148void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
 149                           enum hinic_ceq_type event, void *handle,
 150                           void (*handler)(void *handle, u32 ceqe_data))
 151{
 152        struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
 153
 154        ceq_cb->handler = handler;
 155        ceq_cb->handle = handle;
 156        ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
 157}
 158
 159/**
 160 * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
 161 * @ceqs: pointer to Completion eqs part of the chip
 162 * @event: ceq event to unregister callback for it
 163 **/
 164void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
 165                             enum hinic_ceq_type event)
 166{
 167        struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
 168
 169        ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
 170
 171        while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
 172                schedule();
 173
 174        ceq_cb->handler = NULL;
 175}
 176
 177static u8 eq_cons_idx_checksum_set(u32 val)
 178{
 179        u8 checksum = 0;
 180        int idx;
 181
 182        for (idx = 0; idx < 32; idx += 4)
 183                checksum ^= ((val >> idx) & 0xF);
 184
 185        return (checksum & 0xF);
 186}
 187
 188/**
 189 * eq_update_ci - update the HW cons idx of event queue
 190 * @eq: the event queue to update the cons idx for
 191 * @arm_state: the arm bit value of eq's interrupt
 192 **/
 193static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
 194{
 195        u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
 196
 197        /* Read Modify Write */
 198        val = hinic_hwif_read_reg(eq->hwif, addr);
 199
 200        val = HINIC_EQ_CI_CLEAR(val, IDX)       &
 201              HINIC_EQ_CI_CLEAR(val, WRAPPED)   &
 202              HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
 203              HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
 204
 205        val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX)    |
 206               HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
 207               HINIC_EQ_CI_SET(arm_state, INT_ARMED);
 208
 209        val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
 210
 211        hinic_hwif_write_reg(eq->hwif, addr, val);
 212}
 213
 214/**
 215 * aeq_irq_handler - handler for the AEQ event
 216 * @eq: the Async Event Queue that received the event
 217 **/
 218static void aeq_irq_handler(struct hinic_eq *eq)
 219{
 220        struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
 221        struct hinic_hwif *hwif = aeqs->hwif;
 222        struct pci_dev *pdev = hwif->pdev;
 223        struct hinic_aeq_elem *aeqe_curr;
 224        struct hinic_hw_event_cb *hwe_cb;
 225        enum hinic_aeq_type event;
 226        unsigned long eqe_state;
 227        u32 aeqe_desc;
 228        int i, size;
 229
 230        for (i = 0; i < eq->q_len; i++) {
 231                aeqe_curr = GET_CURR_AEQ_ELEM(eq);
 232
 233                /* Data in HW is in Big endian Format */
 234                aeqe_desc = be32_to_cpu(aeqe_curr->desc);
 235
 236                /* HW toggles the wrapped bit, when it adds eq element */
 237                if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
 238                        break;
 239
 240                dma_rmb();
 241
 242                event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
 243                if (event >= HINIC_MAX_AEQ_EVENTS) {
 244                        dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
 245                        return;
 246                }
 247
 248                if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
 249                        hwe_cb = &aeqs->hwe_cb[event];
 250
 251                        size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
 252
 253                        eqe_state = cmpxchg(&hwe_cb->hwe_state,
 254                                            HINIC_EQE_ENABLED,
 255                                            HINIC_EQE_ENABLED |
 256                                            HINIC_EQE_RUNNING);
 257                        if (eqe_state == HINIC_EQE_ENABLED &&
 258                            hwe_cb->hwe_handler)
 259                                hwe_cb->hwe_handler(hwe_cb->handle,
 260                                                    aeqe_curr->data, size);
 261                        else
 262                                dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
 263                                        event);
 264
 265                        hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
 266                }
 267
 268                eq->cons_idx++;
 269
 270                if (eq->cons_idx == eq->q_len) {
 271                        eq->cons_idx = 0;
 272                        eq->wrapped = !eq->wrapped;
 273                }
 274        }
 275}
 276
 277/**
 278 * ceq_event_handler - handler for the ceq events
 279 * @ceqs: ceqs part of the chip
 280 * @ceqe: ceq element that describes the event
 281 **/
 282static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
 283{
 284        struct hinic_hwif *hwif = ceqs->hwif;
 285        struct pci_dev *pdev = hwif->pdev;
 286        struct hinic_ceq_cb *ceq_cb;
 287        enum hinic_ceq_type event;
 288        unsigned long eqe_state;
 289
 290        event = CEQE_TYPE(ceqe);
 291        if (event >= HINIC_MAX_CEQ_EVENTS) {
 292                dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
 293                return;
 294        }
 295
 296        ceq_cb = &ceqs->ceq_cb[event];
 297
 298        eqe_state = cmpxchg(&ceq_cb->ceqe_state,
 299                            HINIC_EQE_ENABLED,
 300                            HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
 301
 302        if (eqe_state == HINIC_EQE_ENABLED && ceq_cb->handler)
 303                ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
 304        else
 305                dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
 306
 307        ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
 308}
 309
 310/**
 311 * ceq_irq_handler - handler for the CEQ event
 312 * @eq: the Completion Event Queue that received the event
 313 **/
 314static void ceq_irq_handler(struct hinic_eq *eq)
 315{
 316        struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
 317        u32 ceqe;
 318        int i;
 319
 320        for (i = 0; i < eq->q_len; i++) {
 321                ceqe = *(GET_CURR_CEQ_ELEM(eq));
 322
 323                /* Data in HW is in Big endian Format */
 324                ceqe = be32_to_cpu(ceqe);
 325
 326                /* HW toggles the wrapped bit, when it adds eq element event */
 327                if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
 328                        break;
 329
 330                ceq_event_handler(ceqs, ceqe);
 331
 332                eq->cons_idx++;
 333
 334                if (eq->cons_idx == eq->q_len) {
 335                        eq->cons_idx = 0;
 336                        eq->wrapped = !eq->wrapped;
 337                }
 338        }
 339}
 340
 341/**
 342 * eq_irq_handler - handler for the EQ event
 343 * @data: the Event Queue that received the event
 344 **/
 345static void eq_irq_handler(void *data)
 346{
 347        struct hinic_eq *eq = data;
 348
 349        if (eq->type == HINIC_AEQ)
 350                aeq_irq_handler(eq);
 351        else if (eq->type == HINIC_CEQ)
 352                ceq_irq_handler(eq);
 353
 354        eq_update_ci(eq, EQ_ARMED);
 355}
 356
 357/**
 358 * eq_irq_work - the work of the EQ that received the event
 359 * @work: the work struct that is associated with the EQ
 360 **/
 361static void eq_irq_work(struct work_struct *work)
 362{
 363        struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
 364        struct hinic_eq *aeq;
 365
 366        aeq = aeq_work->data;
 367        eq_irq_handler(aeq);
 368}
 369
 370/**
 371 * ceq_tasklet - the tasklet of the EQ that received the event
 372 * @t: the tasklet struct pointer
 373 **/
 374static void ceq_tasklet(struct tasklet_struct *t)
 375{
 376        struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
 377
 378        eq_irq_handler(ceq);
 379}
 380
 381/**
 382 * aeq_interrupt - aeq interrupt handler
 383 * @irq: irq number
 384 * @data: the Async Event Queue that collected the event
 385 **/
 386static irqreturn_t aeq_interrupt(int irq, void *data)
 387{
 388        struct hinic_eq_work *aeq_work;
 389        struct hinic_eq *aeq = data;
 390        struct hinic_aeqs *aeqs;
 391
 392        /* clear resend timer cnt register */
 393        hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
 394
 395        aeq_work = &aeq->aeq_work;
 396        aeq_work->data = aeq;
 397
 398        aeqs = aeq_to_aeqs(aeq);
 399        queue_work(aeqs->workq, &aeq_work->work);
 400
 401        return IRQ_HANDLED;
 402}
 403
 404/**
 405 * ceq_interrupt - ceq interrupt handler
 406 * @irq: irq number
 407 * @data: the Completion Event Queue that collected the event
 408 **/
 409static irqreturn_t ceq_interrupt(int irq, void *data)
 410{
 411        struct hinic_eq *ceq = data;
 412
 413        /* clear resend timer cnt register */
 414        hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
 415
 416        tasklet_schedule(&ceq->ceq_tasklet);
 417
 418        return IRQ_HANDLED;
 419}
 420
 421static u32 get_ctrl0_val(struct hinic_eq *eq, u32 addr)
 422{
 423        struct msix_entry *msix_entry = &eq->msix_entry;
 424        enum hinic_eq_type type = eq->type;
 425        u32 val, ctrl0;
 426
 427        if (type == HINIC_AEQ) {
 428                /* RMW Ctrl0 */
 429                addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
 430
 431                val = hinic_hwif_read_reg(eq->hwif, addr);
 432
 433                val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX)      &
 434                      HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR)     &
 435                      HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
 436                      HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
 437
 438                ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX)     |
 439                        HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
 440                        HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
 441                                             PCI_INTF_IDX)                   |
 442                        HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
 443
 444                val |= ctrl0;
 445        } else {
 446                /* RMW Ctrl0 */
 447                addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
 448
 449                val = hinic_hwif_read_reg(eq->hwif, addr);
 450
 451                val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX)     &
 452                      HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR)     &
 453                      HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH)  &
 454                      HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
 455                      HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
 456
 457                ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX)     |
 458                        HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR)  |
 459                        HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
 460                        HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
 461                                             PCI_INTF_IDX)                    |
 462                        HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
 463
 464                val |= ctrl0;
 465        }
 466        return val;
 467}
 468
 469static void set_ctrl0(struct hinic_eq *eq)
 470{
 471        u32 val, addr;
 472
 473        if (eq->type == HINIC_AEQ)
 474                addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
 475        else
 476                addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
 477
 478        val = get_ctrl0_val(eq, addr);
 479
 480        hinic_hwif_write_reg(eq->hwif, addr, val);
 481}
 482
 483static u32 get_ctrl1_val(struct hinic_eq *eq, u32 addr)
 484{
 485        u32 page_size_val, elem_size, val, ctrl1;
 486        enum hinic_eq_type type = eq->type;
 487
 488        if (type == HINIC_AEQ) {
 489                /* RMW Ctrl1 */
 490                addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
 491
 492                page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
 493                elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
 494
 495                val = hinic_hwif_read_reg(eq->hwif, addr);
 496
 497                val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN)          &
 498                      HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE)    &
 499                      HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
 500
 501                ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN)            |
 502                        HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE)      |
 503                        HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
 504
 505                val |= ctrl1;
 506        } else {
 507                /* RMW Ctrl1 */
 508                addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
 509
 510                page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
 511
 512                val = hinic_hwif_read_reg(eq->hwif, addr);
 513
 514                val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
 515                      HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
 516
 517                ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
 518                        HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
 519
 520                val |= ctrl1;
 521        }
 522        return val;
 523}
 524
 525static void set_ctrl1(struct hinic_eq *eq)
 526{
 527        u32 addr, val;
 528
 529        if (eq->type == HINIC_AEQ)
 530                addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
 531        else
 532                addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
 533
 534        val = get_ctrl1_val(eq, addr);
 535
 536        hinic_hwif_write_reg(eq->hwif, addr, val);
 537}
 538
 539static int set_ceq_ctrl_reg(struct hinic_eq *eq)
 540{
 541        struct hinic_ceq_ctrl_reg ceq_ctrl = {0};
 542        struct hinic_hwdev *hwdev = eq->hwdev;
 543        u16 out_size = sizeof(ceq_ctrl);
 544        u16 in_size = sizeof(ceq_ctrl);
 545        struct hinic_pfhwdev *pfhwdev;
 546        u32 addr;
 547        int err;
 548
 549        pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
 550
 551        addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
 552        ceq_ctrl.ctrl0 = get_ctrl0_val(eq, addr);
 553        addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
 554        ceq_ctrl.ctrl1 = get_ctrl1_val(eq, addr);
 555
 556        ceq_ctrl.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
 557        ceq_ctrl.q_id = eq->q_id;
 558
 559        err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
 560                                HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP,
 561                                &ceq_ctrl, in_size,
 562                                &ceq_ctrl, &out_size, HINIC_MGMT_MSG_SYNC);
 563        if (err || !out_size || ceq_ctrl.status) {
 564                dev_err(&hwdev->hwif->pdev->dev,
 565                        "Failed to set ceq %d ctrl reg, err: %d status: 0x%x, out_size: 0x%x\n",
 566                        eq->q_id, err, ceq_ctrl.status, out_size);
 567                return -EFAULT;
 568        }
 569
 570        return 0;
 571}
 572
 573/**
 574 * set_eq_ctrls - setting eq's ctrl registers
 575 * @eq: the Event Queue for setting
 576 **/
 577static int set_eq_ctrls(struct hinic_eq *eq)
 578{
 579        if (HINIC_IS_VF(eq->hwif) && eq->type == HINIC_CEQ)
 580                return set_ceq_ctrl_reg(eq);
 581
 582        set_ctrl0(eq);
 583        set_ctrl1(eq);
 584        return 0;
 585}
 586
 587/**
 588 * aeq_elements_init - initialize all the elements in the aeq
 589 * @eq: the Async Event Queue
 590 * @init_val: value to initialize the elements with it
 591 **/
 592static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
 593{
 594        struct hinic_aeq_elem *aeqe;
 595        int i;
 596
 597        for (i = 0; i < eq->q_len; i++) {
 598                aeqe = GET_AEQ_ELEM(eq, i);
 599                aeqe->desc = cpu_to_be32(init_val);
 600        }
 601
 602        wmb();  /* Write the initilzation values */
 603}
 604
 605/**
 606 * ceq_elements_init - Initialize all the elements in the ceq
 607 * @eq: the event queue
 608 * @init_val: value to init with it the elements
 609 **/
 610static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
 611{
 612        u32 *ceqe;
 613        int i;
 614
 615        for (i = 0; i < eq->q_len; i++) {
 616                ceqe = GET_CEQ_ELEM(eq, i);
 617                *(ceqe) = cpu_to_be32(init_val);
 618        }
 619
 620        wmb();  /* Write the initilzation values */
 621}
 622
 623/**
 624 * alloc_eq_pages - allocate the pages for the queue
 625 * @eq: the event queue
 626 *
 627 * Return 0 - Success, Negative - Failure
 628 **/
 629static int alloc_eq_pages(struct hinic_eq *eq)
 630{
 631        struct hinic_hwif *hwif = eq->hwif;
 632        struct pci_dev *pdev = hwif->pdev;
 633        u32 init_val, addr, val;
 634        size_t addr_size;
 635        int err, pg;
 636
 637        addr_size = eq->num_pages * sizeof(*eq->dma_addr);
 638        eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
 639        if (!eq->dma_addr)
 640                return -ENOMEM;
 641
 642        addr_size = eq->num_pages * sizeof(*eq->virt_addr);
 643        eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
 644        if (!eq->virt_addr) {
 645                err = -ENOMEM;
 646                goto err_virt_addr_alloc;
 647        }
 648
 649        for (pg = 0; pg < eq->num_pages; pg++) {
 650                eq->virt_addr[pg] = dma_alloc_coherent(&pdev->dev,
 651                                                       eq->page_size,
 652                                                       &eq->dma_addr[pg],
 653                                                       GFP_KERNEL);
 654                if (!eq->virt_addr[pg]) {
 655                        err = -ENOMEM;
 656                        goto err_dma_alloc;
 657                }
 658
 659                addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
 660                val = upper_32_bits(eq->dma_addr[pg]);
 661
 662                hinic_hwif_write_reg(hwif, addr, val);
 663
 664                addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
 665                val = lower_32_bits(eq->dma_addr[pg]);
 666
 667                hinic_hwif_write_reg(hwif, addr, val);
 668        }
 669
 670        init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
 671
 672        if (eq->type == HINIC_AEQ)
 673                aeq_elements_init(eq, init_val);
 674        else if (eq->type == HINIC_CEQ)
 675                ceq_elements_init(eq, init_val);
 676
 677        return 0;
 678
 679err_dma_alloc:
 680        while (--pg >= 0)
 681                dma_free_coherent(&pdev->dev, eq->page_size,
 682                                  eq->virt_addr[pg],
 683                                  eq->dma_addr[pg]);
 684
 685        devm_kfree(&pdev->dev, eq->virt_addr);
 686
 687err_virt_addr_alloc:
 688        devm_kfree(&pdev->dev, eq->dma_addr);
 689        return err;
 690}
 691
 692/**
 693 * free_eq_pages - free the pages of the queue
 694 * @eq: the Event Queue
 695 **/
 696static void free_eq_pages(struct hinic_eq *eq)
 697{
 698        struct hinic_hwif *hwif = eq->hwif;
 699        struct pci_dev *pdev = hwif->pdev;
 700        int pg;
 701
 702        for (pg = 0; pg < eq->num_pages; pg++)
 703                dma_free_coherent(&pdev->dev, eq->page_size,
 704                                  eq->virt_addr[pg],
 705                                  eq->dma_addr[pg]);
 706
 707        devm_kfree(&pdev->dev, eq->virt_addr);
 708        devm_kfree(&pdev->dev, eq->dma_addr);
 709}
 710
 711/**
 712 * init_eq - initialize Event Queue
 713 * @eq: the event queue
 714 * @hwif: the HW interface of a PCI function device
 715 * @type: the type of the event queue, aeq or ceq
 716 * @q_id: Queue id number
 717 * @q_len: the number of EQ elements
 718 * @page_size: the page size of the pages in the event queue
 719 * @entry: msix entry associated with the event queue
 720 *
 721 * Return 0 - Success, Negative - Failure
 722 **/
 723static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
 724                   enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
 725                   struct msix_entry entry)
 726{
 727        struct pci_dev *pdev = hwif->pdev;
 728        int err;
 729
 730        eq->hwif = hwif;
 731        eq->type = type;
 732        eq->q_id = q_id;
 733        eq->q_len = q_len;
 734        eq->page_size = page_size;
 735
 736        /* Clear PI and CI, also clear the ARM bit */
 737        hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
 738        hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
 739
 740        eq->cons_idx = 0;
 741        eq->wrapped = 0;
 742
 743        if (type == HINIC_AEQ) {
 744                eq->elem_size = HINIC_AEQE_SIZE;
 745        } else if (type == HINIC_CEQ) {
 746                eq->elem_size = HINIC_CEQE_SIZE;
 747        } else {
 748                dev_err(&pdev->dev, "Invalid EQ type\n");
 749                return -EINVAL;
 750        }
 751
 752        eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
 753        eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
 754
 755        eq->msix_entry = entry;
 756
 757        if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
 758                dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
 759                return -EINVAL;
 760        }
 761
 762        if (eq->num_pages > EQ_MAX_PAGES) {
 763                dev_err(&pdev->dev, "too many pages for eq\n");
 764                return -EINVAL;
 765        }
 766
 767        err = set_eq_ctrls(eq);
 768        if (err) {
 769                dev_err(&pdev->dev, "Failed to set eq ctrls\n");
 770                return err;
 771        }
 772
 773        eq_update_ci(eq, EQ_ARMED);
 774
 775        err = alloc_eq_pages(eq);
 776        if (err) {
 777                dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
 778                return err;
 779        }
 780
 781        if (type == HINIC_AEQ) {
 782                struct hinic_eq_work *aeq_work = &eq->aeq_work;
 783
 784                INIT_WORK(&aeq_work->work, eq_irq_work);
 785        } else if (type == HINIC_CEQ) {
 786                tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
 787        }
 788
 789        /* set the attributes of the msix entry */
 790        hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
 791                            HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
 792                            HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
 793                            HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
 794                            HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
 795                            HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
 796
 797        if (type == HINIC_AEQ) {
 798                snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
 799                         pci_name(pdev));
 800                err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
 801        } else if (type == HINIC_CEQ) {
 802                snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
 803                         pci_name(pdev));
 804                err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
 805        }
 806
 807        if (err) {
 808                dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
 809                goto err_req_irq;
 810        }
 811
 812        return 0;
 813
 814err_req_irq:
 815        free_eq_pages(eq);
 816        return err;
 817}
 818
 819/**
 820 * remove_eq - remove Event Queue
 821 * @eq: the event queue
 822 **/
 823static void remove_eq(struct hinic_eq *eq)
 824{
 825        hinic_set_msix_state(eq->hwif, eq->msix_entry.entry,
 826                             HINIC_MSIX_DISABLE);
 827        free_irq(eq->msix_entry.vector, eq);
 828
 829        if (eq->type == HINIC_AEQ) {
 830                struct hinic_eq_work *aeq_work = &eq->aeq_work;
 831
 832                cancel_work_sync(&aeq_work->work);
 833                /* clear aeq_len to avoid hw access host memory */
 834                hinic_hwif_write_reg(eq->hwif,
 835                                     HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id), 0);
 836        } else if (eq->type == HINIC_CEQ) {
 837                tasklet_kill(&eq->ceq_tasklet);
 838                /* clear ceq_len to avoid hw access host memory */
 839                hinic_hwif_write_reg(eq->hwif,
 840                                     HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id), 0);
 841        }
 842
 843        /* update cons_idx to avoid invalid interrupt */
 844        eq->cons_idx = hinic_hwif_read_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq));
 845        eq_update_ci(eq, EQ_NOT_ARMED);
 846
 847        free_eq_pages(eq);
 848}
 849
 850/**
 851 * hinic_aeqs_init - initialize all the aeqs
 852 * @aeqs: pointer to Async eqs of the chip
 853 * @hwif: the HW interface of a PCI function device
 854 * @num_aeqs: number of AEQs
 855 * @q_len: number of EQ elements
 856 * @page_size: the page size of the pages in the event queue
 857 * @msix_entries: msix entries associated with the event queues
 858 *
 859 * Return 0 - Success, negative - Failure
 860 **/
 861int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
 862                    int num_aeqs, u32 q_len, u32 page_size,
 863                    struct msix_entry *msix_entries)
 864{
 865        struct pci_dev *pdev = hwif->pdev;
 866        int err, i, q_id;
 867
 868        aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
 869        if (!aeqs->workq)
 870                return -ENOMEM;
 871
 872        aeqs->hwif = hwif;
 873        aeqs->num_aeqs = num_aeqs;
 874
 875        for (q_id = 0; q_id < num_aeqs; q_id++) {
 876                err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
 877                              page_size, msix_entries[q_id]);
 878                if (err) {
 879                        dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
 880                        goto err_init_aeq;
 881                }
 882        }
 883
 884        return 0;
 885
 886err_init_aeq:
 887        for (i = 0; i < q_id; i++)
 888                remove_eq(&aeqs->aeq[i]);
 889
 890        destroy_workqueue(aeqs->workq);
 891        return err;
 892}
 893
 894/**
 895 * hinic_aeqs_free - free all the aeqs
 896 * @aeqs: pointer to Async eqs of the chip
 897 **/
 898void hinic_aeqs_free(struct hinic_aeqs *aeqs)
 899{
 900        int q_id;
 901
 902        for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
 903                remove_eq(&aeqs->aeq[q_id]);
 904
 905        destroy_workqueue(aeqs->workq);
 906}
 907
 908/**
 909 * hinic_ceqs_init - init all the ceqs
 910 * @ceqs: ceqs part of the chip
 911 * @hwif: the hardware interface of a pci function device
 912 * @num_ceqs: number of CEQs
 913 * @q_len: number of EQ elements
 914 * @page_size: the page size of the event queue
 915 * @msix_entries: msix entries associated with the event queues
 916 *
 917 * Return 0 - Success, Negative - Failure
 918 **/
 919int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
 920                    int num_ceqs, u32 q_len, u32 page_size,
 921                    struct msix_entry *msix_entries)
 922{
 923        struct pci_dev *pdev = hwif->pdev;
 924        int i, q_id, err;
 925
 926        ceqs->hwif = hwif;
 927        ceqs->num_ceqs = num_ceqs;
 928
 929        for (q_id = 0; q_id < num_ceqs; q_id++) {
 930                ceqs->ceq[q_id].hwdev = ceqs->hwdev;
 931                err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
 932                              page_size, msix_entries[q_id]);
 933                if (err) {
 934                        dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
 935                        goto err_init_ceq;
 936                }
 937        }
 938
 939        return 0;
 940
 941err_init_ceq:
 942        for (i = 0; i < q_id; i++)
 943                remove_eq(&ceqs->ceq[i]);
 944
 945        return err;
 946}
 947
 948/**
 949 * hinic_ceqs_free - free all the ceqs
 950 * @ceqs: ceqs part of the chip
 951 **/
 952void hinic_ceqs_free(struct hinic_ceqs *ceqs)
 953{
 954        int q_id;
 955
 956        for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
 957                remove_eq(&ceqs->ceq[q_id]);
 958}
 959
 960void hinic_dump_ceq_info(struct hinic_hwdev *hwdev)
 961{
 962        struct hinic_eq *eq = NULL;
 963        u32 addr, ci, pi;
 964        int q_id;
 965
 966        for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) {
 967                eq = &hwdev->func_to_io.ceqs.ceq[q_id];
 968                addr = EQ_CONS_IDX_REG_ADDR(eq);
 969                ci = hinic_hwif_read_reg(hwdev->hwif, addr);
 970                addr = EQ_PROD_IDX_REG_ADDR(eq);
 971                pi = hinic_hwif_read_reg(hwdev->hwif, addr);
 972                dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n",
 973                        q_id, ci, eq->cons_idx, pi,
 974                        eq->ceq_tasklet.state,
 975                        eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq))));
 976        }
 977}
 978
 979void hinic_dump_aeq_info(struct hinic_hwdev *hwdev)
 980{
 981        struct hinic_aeq_elem *aeqe_pos = NULL;
 982        struct hinic_eq *eq = NULL;
 983        u32 addr, ci, pi;
 984        int q_id;
 985
 986        for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) {
 987                eq = &hwdev->aeqs.aeq[q_id];
 988                addr = EQ_CONS_IDX_REG_ADDR(eq);
 989                ci = hinic_hwif_read_reg(hwdev->hwif, addr);
 990                addr = EQ_PROD_IDX_REG_ADDR(eq);
 991                pi = hinic_hwif_read_reg(hwdev->hwif, addr);
 992                aeqe_pos = GET_CURR_AEQ_ELEM(eq);
 993                dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n",
 994                        q_id, ci, pi, work_busy(&eq->aeq_work.work),
 995                        eq->wrapped, be32_to_cpu(aeqe_pos->desc));
 996        }
 997}
 998