linux/drivers/net/wireless/realtek/rtw88/pci.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/* Copyright(c) 2018-2019  Realtek Corporation
   3 */
   4
   5#include <linux/dmi.h>
   6#include <linux/module.h>
   7#include <linux/pci.h>
   8#include "main.h"
   9#include "pci.h"
  10#include "reg.h"
  11#include "tx.h"
  12#include "rx.h"
  13#include "fw.h"
  14#include "ps.h"
  15#include "debug.h"
  16
  17static bool rtw_disable_msi;
  18static bool rtw_pci_disable_aspm;
  19module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
  20module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
  21MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
  22MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
  23
  24static u32 rtw_pci_tx_queue_idx_addr[] = {
  25        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
  26        [RTW_TX_QUEUE_BE]       = RTK_PCI_TXBD_IDX_BEQ,
  27        [RTW_TX_QUEUE_VI]       = RTK_PCI_TXBD_IDX_VIQ,
  28        [RTW_TX_QUEUE_VO]       = RTK_PCI_TXBD_IDX_VOQ,
  29        [RTW_TX_QUEUE_MGMT]     = RTK_PCI_TXBD_IDX_MGMTQ,
  30        [RTW_TX_QUEUE_HI0]      = RTK_PCI_TXBD_IDX_HI0Q,
  31        [RTW_TX_QUEUE_H2C]      = RTK_PCI_TXBD_IDX_H2CQ,
  32};
  33
  34static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
  35{
  36        switch (queue) {
  37        case RTW_TX_QUEUE_BCN:
  38                return TX_DESC_QSEL_BEACON;
  39        case RTW_TX_QUEUE_H2C:
  40                return TX_DESC_QSEL_H2C;
  41        case RTW_TX_QUEUE_MGMT:
  42                return TX_DESC_QSEL_MGMT;
  43        case RTW_TX_QUEUE_HI0:
  44                return TX_DESC_QSEL_HIGH;
  45        default:
  46                return skb->priority;
  47        }
  48};
  49
  50static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
  51{
  52        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  53
  54        return readb(rtwpci->mmap + addr);
  55}
  56
  57static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
  58{
  59        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  60
  61        return readw(rtwpci->mmap + addr);
  62}
  63
  64static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
  65{
  66        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  67
  68        return readl(rtwpci->mmap + addr);
  69}
  70
  71static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
  72{
  73        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  74
  75        writeb(val, rtwpci->mmap + addr);
  76}
  77
  78static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
  79{
  80        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  81
  82        writew(val, rtwpci->mmap + addr);
  83}
  84
  85static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
  86{
  87        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
  88
  89        writel(val, rtwpci->mmap + addr);
  90}
  91
  92static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
  93{
  94        int offset = tx_ring->r.desc_size * idx;
  95
  96        return tx_ring->r.head + offset;
  97}
  98
  99static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
 100                                      struct rtw_pci_tx_ring *tx_ring)
 101{
 102        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 103        struct rtw_pci_tx_data *tx_data;
 104        struct sk_buff *skb, *tmp;
 105        dma_addr_t dma;
 106
 107        /* free every skb remained in tx list */
 108        skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
 109                __skb_unlink(skb, &tx_ring->queue);
 110                tx_data = rtw_pci_get_tx_data(skb);
 111                dma = tx_data->dma;
 112
 113                dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
 114                dev_kfree_skb_any(skb);
 115        }
 116}
 117
 118static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
 119                                 struct rtw_pci_tx_ring *tx_ring)
 120{
 121        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 122        u8 *head = tx_ring->r.head;
 123        u32 len = tx_ring->r.len;
 124        int ring_sz = len * tx_ring->r.desc_size;
 125
 126        rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 127
 128        /* free the ring itself */
 129        dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
 130        tx_ring->r.head = NULL;
 131}
 132
 133static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
 134                                      struct rtw_pci_rx_ring *rx_ring)
 135{
 136        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 137        struct sk_buff *skb;
 138        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 139        dma_addr_t dma;
 140        int i;
 141
 142        for (i = 0; i < rx_ring->r.len; i++) {
 143                skb = rx_ring->buf[i];
 144                if (!skb)
 145                        continue;
 146
 147                dma = *((dma_addr_t *)skb->cb);
 148                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 149                dev_kfree_skb(skb);
 150                rx_ring->buf[i] = NULL;
 151        }
 152}
 153
 154static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
 155                                 struct rtw_pci_rx_ring *rx_ring)
 156{
 157        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 158        u8 *head = rx_ring->r.head;
 159        int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
 160
 161        rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
 162
 163        dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
 164}
 165
 166static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
 167{
 168        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 169        struct rtw_pci_tx_ring *tx_ring;
 170        struct rtw_pci_rx_ring *rx_ring;
 171        int i;
 172
 173        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 174                tx_ring = &rtwpci->tx_rings[i];
 175                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 176        }
 177
 178        for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
 179                rx_ring = &rtwpci->rx_rings[i];
 180                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 181        }
 182}
 183
 184static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
 185                                struct rtw_pci_tx_ring *tx_ring,
 186                                u8 desc_size, u32 len)
 187{
 188        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 189        int ring_sz = desc_size * len;
 190        dma_addr_t dma;
 191        u8 *head;
 192
 193        if (len > TRX_BD_IDX_MASK) {
 194                rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
 195                return -EINVAL;
 196        }
 197
 198        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 199        if (!head) {
 200                rtw_err(rtwdev, "failed to allocate tx ring\n");
 201                return -ENOMEM;
 202        }
 203
 204        skb_queue_head_init(&tx_ring->queue);
 205        tx_ring->r.head = head;
 206        tx_ring->r.dma = dma;
 207        tx_ring->r.len = len;
 208        tx_ring->r.desc_size = desc_size;
 209        tx_ring->r.wp = 0;
 210        tx_ring->r.rp = 0;
 211
 212        return 0;
 213}
 214
 215static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
 216                                 struct rtw_pci_rx_ring *rx_ring,
 217                                 u32 idx, u32 desc_sz)
 218{
 219        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 220        struct rtw_pci_rx_buffer_desc *buf_desc;
 221        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 222        dma_addr_t dma;
 223
 224        if (!skb)
 225                return -EINVAL;
 226
 227        dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
 228        if (dma_mapping_error(&pdev->dev, dma))
 229                return -EBUSY;
 230
 231        *((dma_addr_t *)skb->cb) = dma;
 232        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 233                                                     idx * desc_sz);
 234        memset(buf_desc, 0, sizeof(*buf_desc));
 235        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 236        buf_desc->dma = cpu_to_le32(dma);
 237
 238        return 0;
 239}
 240
 241static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
 242                                        struct rtw_pci_rx_ring *rx_ring,
 243                                        u32 idx, u32 desc_sz)
 244{
 245        struct device *dev = rtwdev->dev;
 246        struct rtw_pci_rx_buffer_desc *buf_desc;
 247        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 248
 249        dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
 250
 251        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 252                                                     idx * desc_sz);
 253        memset(buf_desc, 0, sizeof(*buf_desc));
 254        buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
 255        buf_desc->dma = cpu_to_le32(dma);
 256}
 257
 258static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
 259                                struct rtw_pci_rx_ring *rx_ring,
 260                                u8 desc_size, u32 len)
 261{
 262        struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
 263        struct sk_buff *skb = NULL;
 264        dma_addr_t dma;
 265        u8 *head;
 266        int ring_sz = desc_size * len;
 267        int buf_sz = RTK_PCI_RX_BUF_SIZE;
 268        int i, allocated;
 269        int ret = 0;
 270
 271        if (len > TRX_BD_IDX_MASK) {
 272                rtw_err(rtwdev, "len %d exceeds maximum RX entries\n", len);
 273                return -EINVAL;
 274        }
 275
 276        head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
 277        if (!head) {
 278                rtw_err(rtwdev, "failed to allocate rx ring\n");
 279                return -ENOMEM;
 280        }
 281        rx_ring->r.head = head;
 282
 283        for (i = 0; i < len; i++) {
 284                skb = dev_alloc_skb(buf_sz);
 285                if (!skb) {
 286                        allocated = i;
 287                        ret = -ENOMEM;
 288                        goto err_out;
 289                }
 290
 291                memset(skb->data, 0, buf_sz);
 292                rx_ring->buf[i] = skb;
 293                ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
 294                if (ret) {
 295                        allocated = i;
 296                        dev_kfree_skb_any(skb);
 297                        goto err_out;
 298                }
 299        }
 300
 301        rx_ring->r.dma = dma;
 302        rx_ring->r.len = len;
 303        rx_ring->r.desc_size = desc_size;
 304        rx_ring->r.wp = 0;
 305        rx_ring->r.rp = 0;
 306
 307        return 0;
 308
 309err_out:
 310        for (i = 0; i < allocated; i++) {
 311                skb = rx_ring->buf[i];
 312                if (!skb)
 313                        continue;
 314                dma = *((dma_addr_t *)skb->cb);
 315                dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
 316                dev_kfree_skb_any(skb);
 317                rx_ring->buf[i] = NULL;
 318        }
 319        dma_free_coherent(&pdev->dev, ring_sz, head, dma);
 320
 321        rtw_err(rtwdev, "failed to init rx buffer\n");
 322
 323        return ret;
 324}
 325
 326static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
 327{
 328        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 329        struct rtw_pci_tx_ring *tx_ring;
 330        struct rtw_pci_rx_ring *rx_ring;
 331        struct rtw_chip_info *chip = rtwdev->chip;
 332        int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
 333        int tx_desc_size, rx_desc_size;
 334        u32 len;
 335        int ret;
 336
 337        tx_desc_size = chip->tx_buf_desc_sz;
 338
 339        for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
 340                tx_ring = &rtwpci->tx_rings[i];
 341                len = max_num_of_tx_queue(i);
 342                ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
 343                if (ret)
 344                        goto out;
 345        }
 346
 347        rx_desc_size = chip->rx_buf_desc_sz;
 348
 349        for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
 350                rx_ring = &rtwpci->rx_rings[j];
 351                ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
 352                                           RTK_MAX_RX_DESC_NUM);
 353                if (ret)
 354                        goto out;
 355        }
 356
 357        return 0;
 358
 359out:
 360        tx_alloced = i;
 361        for (i = 0; i < tx_alloced; i++) {
 362                tx_ring = &rtwpci->tx_rings[i];
 363                rtw_pci_free_tx_ring(rtwdev, tx_ring);
 364        }
 365
 366        rx_alloced = j;
 367        for (j = 0; j < rx_alloced; j++) {
 368                rx_ring = &rtwpci->rx_rings[j];
 369                rtw_pci_free_rx_ring(rtwdev, rx_ring);
 370        }
 371
 372        return ret;
 373}
 374
 375static void rtw_pci_deinit(struct rtw_dev *rtwdev)
 376{
 377        rtw_pci_free_trx_ring(rtwdev);
 378}
 379
 380static int rtw_pci_init(struct rtw_dev *rtwdev)
 381{
 382        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 383        int ret = 0;
 384
 385        rtwpci->irq_mask[0] = IMR_HIGHDOK |
 386                              IMR_MGNTDOK |
 387                              IMR_BKDOK |
 388                              IMR_BEDOK |
 389                              IMR_VIDOK |
 390                              IMR_VODOK |
 391                              IMR_ROK |
 392                              IMR_BCNDMAINT_E |
 393                              IMR_C2HCMD |
 394                              0;
 395        rtwpci->irq_mask[1] = IMR_TXFOVW |
 396                              0;
 397        rtwpci->irq_mask[3] = IMR_H2CDOK |
 398                              0;
 399        spin_lock_init(&rtwpci->irq_lock);
 400        spin_lock_init(&rtwpci->hwirq_lock);
 401        ret = rtw_pci_init_trx_ring(rtwdev);
 402
 403        return ret;
 404}
 405
 406static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
 407{
 408        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 409        u32 len;
 410        u8 tmp;
 411        dma_addr_t dma;
 412
 413        tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
 414        rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
 415
 416        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
 417        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
 418
 419        if (!rtw_chip_wcpu_11n(rtwdev)) {
 420                len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
 421                dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
 422                rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
 423                rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
 424                rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
 425                rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
 426        }
 427
 428        len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
 429        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
 430        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
 431        rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
 432        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
 433        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
 434
 435        len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
 436        dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
 437        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
 438        rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
 439        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
 440        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
 441
 442        len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
 443        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
 444        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
 445        rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
 446        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
 447        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
 448
 449        len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
 450        dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
 451        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
 452        rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
 453        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
 454        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
 455
 456        len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
 457        dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
 458        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
 459        rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
 460        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
 461        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
 462
 463        len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
 464        dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
 465        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
 466        rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
 467        rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
 468        rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
 469
 470        len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
 471        dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
 472        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
 473        rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
 474        rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
 475        rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
 476
 477        /* reset read/write point */
 478        rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
 479
 480        /* reset H2C Queue index in a single write */
 481        if (rtw_chip_wcpu_11ac(rtwdev))
 482                rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
 483                                BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
 484}
 485
 486static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
 487{
 488        rtw_pci_reset_buf_desc(rtwdev);
 489}
 490
 491static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
 492                                     struct rtw_pci *rtwpci, bool exclude_rx)
 493{
 494        unsigned long flags;
 495        u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
 496
 497        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
 498
 499        rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
 500        rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
 501        if (rtw_chip_wcpu_11ac(rtwdev))
 502                rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
 503
 504        rtwpci->irq_enabled = true;
 505
 506        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
 507}
 508
 509static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
 510                                      struct rtw_pci *rtwpci)
 511{
 512        unsigned long flags;
 513
 514        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
 515
 516        if (!rtwpci->irq_enabled)
 517                goto out;
 518
 519        rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
 520        rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
 521        if (rtw_chip_wcpu_11ac(rtwdev))
 522                rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
 523
 524        rtwpci->irq_enabled = false;
 525
 526out:
 527        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
 528}
 529
 530static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 531{
 532        /* reset dma and rx tag */
 533        rtw_write32_set(rtwdev, RTK_PCI_CTRL,
 534                        BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
 535        rtwpci->rx_tag = 0;
 536}
 537
 538static int rtw_pci_setup(struct rtw_dev *rtwdev)
 539{
 540        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 541
 542        rtw_pci_reset_trx_ring(rtwdev);
 543        rtw_pci_dma_reset(rtwdev, rtwpci);
 544
 545        return 0;
 546}
 547
 548static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
 549{
 550        struct rtw_pci_tx_ring *tx_ring;
 551        u8 queue;
 552
 553        rtw_pci_reset_trx_ring(rtwdev);
 554        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 555                tx_ring = &rtwpci->tx_rings[queue];
 556                rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
 557        }
 558}
 559
 560static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
 561{
 562        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 563
 564        if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
 565                return;
 566
 567        napi_enable(&rtwpci->napi);
 568}
 569
 570static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
 571{
 572        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 573
 574        if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
 575                return;
 576
 577        napi_synchronize(&rtwpci->napi);
 578        napi_disable(&rtwpci->napi);
 579}
 580
 581static int rtw_pci_start(struct rtw_dev *rtwdev)
 582{
 583        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 584
 585        rtw_pci_napi_start(rtwdev);
 586
 587        spin_lock_bh(&rtwpci->irq_lock);
 588        rtwpci->running = true;
 589        rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
 590        spin_unlock_bh(&rtwpci->irq_lock);
 591
 592        return 0;
 593}
 594
 595static void rtw_pci_stop(struct rtw_dev *rtwdev)
 596{
 597        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 598        struct pci_dev *pdev = rtwpci->pdev;
 599
 600        spin_lock_bh(&rtwpci->irq_lock);
 601        rtwpci->running = false;
 602        rtw_pci_disable_interrupt(rtwdev, rtwpci);
 603        spin_unlock_bh(&rtwpci->irq_lock);
 604
 605        synchronize_irq(pdev->irq);
 606        rtw_pci_napi_stop(rtwdev);
 607
 608        spin_lock_bh(&rtwpci->irq_lock);
 609        rtw_pci_dma_release(rtwdev, rtwpci);
 610        spin_unlock_bh(&rtwpci->irq_lock);
 611}
 612
 613static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
 614{
 615        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 616        struct rtw_pci_tx_ring *tx_ring;
 617        bool tx_empty = true;
 618        u8 queue;
 619
 620        lockdep_assert_held(&rtwpci->irq_lock);
 621
 622        /* Deep PS state is not allowed to TX-DMA */
 623        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
 624                /* BCN queue is rsvd page, does not have DMA interrupt
 625                 * H2C queue is managed by firmware
 626                 */
 627                if (queue == RTW_TX_QUEUE_BCN ||
 628                    queue == RTW_TX_QUEUE_H2C)
 629                        continue;
 630
 631                tx_ring = &rtwpci->tx_rings[queue];
 632
 633                /* check if there is any skb DMAing */
 634                if (skb_queue_len(&tx_ring->queue)) {
 635                        tx_empty = false;
 636                        break;
 637                }
 638        }
 639
 640        if (!tx_empty) {
 641                rtw_dbg(rtwdev, RTW_DBG_PS,
 642                        "TX path not empty, cannot enter deep power save state\n");
 643                return;
 644        }
 645
 646        set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
 647        rtw_power_mode_change(rtwdev, true);
 648}
 649
 650static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
 651{
 652        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 653
 654        lockdep_assert_held(&rtwpci->irq_lock);
 655
 656        if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 657                rtw_power_mode_change(rtwdev, false);
 658}
 659
 660static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
 661{
 662        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 663
 664        spin_lock_bh(&rtwpci->irq_lock);
 665
 666        if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 667                rtw_pci_deep_ps_enter(rtwdev);
 668
 669        if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
 670                rtw_pci_deep_ps_leave(rtwdev);
 671
 672        spin_unlock_bh(&rtwpci->irq_lock);
 673}
 674
 675static u8 ac_to_hwq[] = {
 676        [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
 677        [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
 678        [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
 679        [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
 680};
 681
 682static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
 683
 684static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
 685{
 686        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 687        __le16 fc = hdr->frame_control;
 688        u8 q_mapping = skb_get_queue_mapping(skb);
 689        u8 queue;
 690
 691        if (unlikely(ieee80211_is_beacon(fc)))
 692                queue = RTW_TX_QUEUE_BCN;
 693        else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
 694                queue = RTW_TX_QUEUE_MGMT;
 695        else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
 696                queue = ac_to_hwq[IEEE80211_AC_BE];
 697        else
 698                queue = ac_to_hwq[q_mapping];
 699
 700        return queue;
 701}
 702
 703static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
 704                                      struct rtw_pci_tx_ring *ring)
 705{
 706        struct sk_buff *prev = skb_dequeue(&ring->queue);
 707        struct rtw_pci_tx_data *tx_data;
 708        dma_addr_t dma;
 709
 710        if (!prev)
 711                return;
 712
 713        tx_data = rtw_pci_get_tx_data(prev);
 714        dma = tx_data->dma;
 715        dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
 716        dev_kfree_skb_any(prev);
 717}
 718
 719static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
 720                              struct rtw_pci_rx_ring *rx_ring,
 721                              u32 idx)
 722{
 723        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 724        struct rtw_chip_info *chip = rtwdev->chip;
 725        struct rtw_pci_rx_buffer_desc *buf_desc;
 726        u32 desc_sz = chip->rx_buf_desc_sz;
 727        u16 total_pkt_size;
 728
 729        buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
 730                                                     idx * desc_sz);
 731        total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
 732
 733        /* rx tag mismatch, throw a warning */
 734        if (total_pkt_size != rtwpci->rx_tag)
 735                rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
 736
 737        rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
 738}
 739
 740static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
 741{
 742        u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
 743        u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
 744
 745        return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
 746}
 747
 748static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
 749{
 750        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 751        struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
 752        u32 cur_rp;
 753        u8 i;
 754
 755        /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
 756         * bit dynamic, it's hard to define a reasonable fixed total timeout to
 757         * use read_poll_timeout* helper. Instead, we can ensure a reasonable
 758         * polling times, so we just use for loop with udelay here.
 759         */
 760        for (i = 0; i < 30; i++) {
 761                cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
 762                if (cur_rp == ring->r.wp)
 763                        return;
 764
 765                udelay(1);
 766        }
 767
 768        if (!drop)
 769                rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
 770}
 771
 772static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
 773                                   bool drop)
 774{
 775        u8 q;
 776
 777        for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
 778                /* It may be not necessary to flush BCN and H2C tx queues. */
 779                if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C)
 780                        continue;
 781
 782                if (pci_queues & BIT(q))
 783                        __pci_flush_queue(rtwdev, q, drop);
 784        }
 785}
 786
 787static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
 788{
 789        u32 pci_queues = 0;
 790        u8 i;
 791
 792        /* If all of the hardware queues are requested to flush,
 793         * flush all of the pci queues.
 794         */
 795        if (queues == BIT(rtwdev->hw->queues) - 1) {
 796                pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
 797        } else {
 798                for (i = 0; i < rtwdev->hw->queues; i++)
 799                        if (queues & BIT(i))
 800                                pci_queues |= BIT(ac_to_hwq[i]);
 801        }
 802
 803        __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
 804}
 805
 806static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
 807{
 808        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 809        struct rtw_pci_tx_ring *ring;
 810        u32 bd_idx;
 811
 812        ring = &rtwpci->tx_rings[queue];
 813        bd_idx = rtw_pci_tx_queue_idx_addr[queue];
 814
 815        spin_lock_bh(&rtwpci->irq_lock);
 816        rtw_pci_deep_ps_leave(rtwdev);
 817        rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
 818        spin_unlock_bh(&rtwpci->irq_lock);
 819}
 820
 821static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
 822{
 823        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 824        u8 queue;
 825
 826        for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
 827                if (test_and_clear_bit(queue, rtwpci->tx_queued))
 828                        rtw_pci_tx_kick_off_queue(rtwdev, queue);
 829}
 830
 831static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
 832                                 struct rtw_tx_pkt_info *pkt_info,
 833                                 struct sk_buff *skb, u8 queue)
 834{
 835        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 836        struct rtw_chip_info *chip = rtwdev->chip;
 837        struct rtw_pci_tx_ring *ring;
 838        struct rtw_pci_tx_data *tx_data;
 839        dma_addr_t dma;
 840        u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
 841        u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
 842        u32 size;
 843        u32 psb_len;
 844        u8 *pkt_desc;
 845        struct rtw_pci_tx_buffer_desc *buf_desc;
 846
 847        ring = &rtwpci->tx_rings[queue];
 848
 849        size = skb->len;
 850
 851        if (queue == RTW_TX_QUEUE_BCN)
 852                rtw_pci_release_rsvd_page(rtwpci, ring);
 853        else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
 854                return -ENOSPC;
 855
 856        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
 857        memset(pkt_desc, 0, tx_pkt_desc_sz);
 858        pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
 859        rtw_tx_fill_tx_desc(pkt_info, skb);
 860        dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
 861                             DMA_TO_DEVICE);
 862        if (dma_mapping_error(&rtwpci->pdev->dev, dma))
 863                return -EBUSY;
 864
 865        /* after this we got dma mapped, there is no way back */
 866        buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
 867        memset(buf_desc, 0, tx_buf_desc_sz);
 868        psb_len = (skb->len - 1) / 128 + 1;
 869        if (queue == RTW_TX_QUEUE_BCN)
 870                psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
 871
 872        buf_desc[0].psb_len = cpu_to_le16(psb_len);
 873        buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
 874        buf_desc[0].dma = cpu_to_le32(dma);
 875        buf_desc[1].buf_size = cpu_to_le16(size);
 876        buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
 877
 878        tx_data = rtw_pci_get_tx_data(skb);
 879        tx_data->dma = dma;
 880        tx_data->sn = pkt_info->sn;
 881
 882        spin_lock_bh(&rtwpci->irq_lock);
 883
 884        skb_queue_tail(&ring->queue, skb);
 885
 886        if (queue == RTW_TX_QUEUE_BCN)
 887                goto out_unlock;
 888
 889        /* update write-index, and kick it off later */
 890        set_bit(queue, rtwpci->tx_queued);
 891        if (++ring->r.wp >= ring->r.len)
 892                ring->r.wp = 0;
 893
 894out_unlock:
 895        spin_unlock_bh(&rtwpci->irq_lock);
 896
 897        return 0;
 898}
 899
 900static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
 901                                        u32 size)
 902{
 903        struct sk_buff *skb;
 904        struct rtw_tx_pkt_info pkt_info = {0};
 905        u8 reg_bcn_work;
 906        int ret;
 907
 908        skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
 909        if (!skb)
 910                return -ENOMEM;
 911
 912        ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
 913        if (ret) {
 914                rtw_err(rtwdev, "failed to write rsvd page data\n");
 915                return ret;
 916        }
 917
 918        /* reserved pages go through beacon queue */
 919        reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
 920        reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
 921        rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
 922
 923        return 0;
 924}
 925
 926static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
 927{
 928        struct sk_buff *skb;
 929        struct rtw_tx_pkt_info pkt_info = {0};
 930        int ret;
 931
 932        skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
 933        if (!skb)
 934                return -ENOMEM;
 935
 936        ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
 937        if (ret) {
 938                rtw_err(rtwdev, "failed to write h2c data\n");
 939                return ret;
 940        }
 941
 942        rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
 943
 944        return 0;
 945}
 946
 947static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
 948                            struct rtw_tx_pkt_info *pkt_info,
 949                            struct sk_buff *skb)
 950{
 951        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
 952        struct rtw_pci_tx_ring *ring;
 953        u8 queue = rtw_hw_queue_mapping(skb);
 954        int ret;
 955
 956        ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
 957        if (ret)
 958                return ret;
 959
 960        ring = &rtwpci->tx_rings[queue];
 961        spin_lock_bh(&rtwpci->irq_lock);
 962        if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
 963                ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
 964                ring->queue_stopped = true;
 965        }
 966        spin_unlock_bh(&rtwpci->irq_lock);
 967
 968        return 0;
 969}
 970
 971static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
 972                           u8 hw_queue)
 973{
 974        struct ieee80211_hw *hw = rtwdev->hw;
 975        struct ieee80211_tx_info *info;
 976        struct rtw_pci_tx_ring *ring;
 977        struct rtw_pci_tx_data *tx_data;
 978        struct sk_buff *skb;
 979        u32 count;
 980        u32 bd_idx_addr;
 981        u32 bd_idx, cur_rp, rp_idx;
 982        u16 q_map;
 983
 984        ring = &rtwpci->tx_rings[hw_queue];
 985
 986        bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
 987        bd_idx = rtw_read32(rtwdev, bd_idx_addr);
 988        cur_rp = bd_idx >> 16;
 989        cur_rp &= TRX_BD_IDX_MASK;
 990        rp_idx = ring->r.rp;
 991        if (cur_rp >= ring->r.rp)
 992                count = cur_rp - ring->r.rp;
 993        else
 994                count = ring->r.len - (ring->r.rp - cur_rp);
 995
 996        while (count--) {
 997                skb = skb_dequeue(&ring->queue);
 998                if (!skb) {
 999                        rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
1000                                count, hw_queue, bd_idx, ring->r.rp, cur_rp);
1001                        break;
1002                }
1003                tx_data = rtw_pci_get_tx_data(skb);
1004                dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1005                                 DMA_TO_DEVICE);
1006
1007                /* just free command packets from host to card */
1008                if (hw_queue == RTW_TX_QUEUE_H2C) {
1009                        dev_kfree_skb_irq(skb);
1010                        continue;
1011                }
1012
1013                if (ring->queue_stopped &&
1014                    avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1015                        q_map = skb_get_queue_mapping(skb);
1016                        ieee80211_wake_queue(hw, q_map);
1017                        ring->queue_stopped = false;
1018                }
1019
1020                if (++rp_idx >= ring->r.len)
1021                        rp_idx = 0;
1022
1023                skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1024
1025                info = IEEE80211_SKB_CB(skb);
1026
1027                /* enqueue to wait for tx report */
1028                if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1029                        rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1030                        continue;
1031                }
1032
1033                /* always ACK for others, then they won't be marked as drop */
1034                if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1035                        info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1036                else
1037                        info->flags |= IEEE80211_TX_STAT_ACK;
1038
1039                ieee80211_tx_info_clear_status(info);
1040                ieee80211_tx_status_irqsafe(hw, skb);
1041        }
1042
1043        ring->r.rp = cur_rp;
1044}
1045
1046static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1047{
1048        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1049        struct napi_struct *napi = &rtwpci->napi;
1050
1051        napi_schedule(napi);
1052}
1053
1054static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1055                                     struct rtw_pci *rtwpci)
1056{
1057        struct rtw_pci_rx_ring *ring;
1058        int count = 0;
1059        u32 tmp, cur_wp;
1060
1061        ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1062        tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1063        cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1064        if (cur_wp >= ring->r.wp)
1065                count = cur_wp - ring->r.wp;
1066        else
1067                count = ring->r.len - (ring->r.wp - cur_wp);
1068
1069        return count;
1070}
1071
1072static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1073                           u8 hw_queue, u32 limit)
1074{
1075        struct rtw_chip_info *chip = rtwdev->chip;
1076        struct napi_struct *napi = &rtwpci->napi;
1077        struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1078        struct rtw_rx_pkt_stat pkt_stat;
1079        struct ieee80211_rx_status rx_status;
1080        struct sk_buff *skb, *new;
1081        u32 cur_rp = ring->r.rp;
1082        u32 count, rx_done = 0;
1083        u32 pkt_offset;
1084        u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1085        u32 buf_desc_sz = chip->rx_buf_desc_sz;
1086        u32 new_len;
1087        u8 *rx_desc;
1088        dma_addr_t dma;
1089
1090        count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1091        count = min(count, limit);
1092
1093        while (count--) {
1094                rtw_pci_dma_check(rtwdev, ring, cur_rp);
1095                skb = ring->buf[cur_rp];
1096                dma = *((dma_addr_t *)skb->cb);
1097                dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1098                                        DMA_FROM_DEVICE);
1099                rx_desc = skb->data;
1100                chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1101
1102                /* offset from rx_desc to payload */
1103                pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1104                             pkt_stat.shift;
1105
1106                /* allocate a new skb for this frame,
1107                 * discard the frame if none available
1108                 */
1109                new_len = pkt_stat.pkt_len + pkt_offset;
1110                new = dev_alloc_skb(new_len);
1111                if (WARN_ONCE(!new, "rx routine starvation\n"))
1112                        goto next_rp;
1113
1114                /* put the DMA data including rx_desc from phy to new skb */
1115                skb_put_data(new, skb->data, new_len);
1116
1117                if (pkt_stat.is_c2h) {
1118                        rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1119                } else {
1120                        /* remove rx_desc */
1121                        skb_pull(new, pkt_offset);
1122
1123                        rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1124                        memcpy(new->cb, &rx_status, sizeof(rx_status));
1125                        ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1126                        rx_done++;
1127                }
1128
1129next_rp:
1130                /* new skb delivered to mac80211, re-enable original skb DMA */
1131                rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1132                                            buf_desc_sz);
1133
1134                /* host read next element in ring */
1135                if (++cur_rp >= ring->r.len)
1136                        cur_rp = 0;
1137        }
1138
1139        ring->r.rp = cur_rp;
1140        /* 'rp', the last position we have read, is seen as previous posistion
1141         * of 'wp' that is used to calculate 'count' next time.
1142         */
1143        ring->r.wp = cur_rp;
1144        rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1145
1146        return rx_done;
1147}
1148
1149static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1150                                   struct rtw_pci *rtwpci, u32 *irq_status)
1151{
1152        unsigned long flags;
1153
1154        spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1155
1156        irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1157        irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1158        if (rtw_chip_wcpu_11ac(rtwdev))
1159                irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1160        else
1161                irq_status[3] = 0;
1162        irq_status[0] &= rtwpci->irq_mask[0];
1163        irq_status[1] &= rtwpci->irq_mask[1];
1164        irq_status[3] &= rtwpci->irq_mask[3];
1165        rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1166        rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1167        if (rtw_chip_wcpu_11ac(rtwdev))
1168                rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1169
1170        spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1171}
1172
1173static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1174{
1175        struct rtw_dev *rtwdev = dev;
1176        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1177
1178        /* disable RTW PCI interrupt to avoid more interrupts before the end of
1179         * thread function
1180         *
1181         * disable HIMR here to also avoid new HISR flag being raised before
1182         * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1183         * are cleared, the edge-triggered interrupt will not be generated when
1184         * a new HISR flag is set.
1185         */
1186        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1187
1188        return IRQ_WAKE_THREAD;
1189}
1190
1191static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1192{
1193        struct rtw_dev *rtwdev = dev;
1194        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1195        u32 irq_status[4];
1196        bool rx = false;
1197
1198        spin_lock_bh(&rtwpci->irq_lock);
1199        rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1200
1201        if (irq_status[0] & IMR_MGNTDOK)
1202                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1203        if (irq_status[0] & IMR_HIGHDOK)
1204                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1205        if (irq_status[0] & IMR_BEDOK)
1206                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1207        if (irq_status[0] & IMR_BKDOK)
1208                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1209        if (irq_status[0] & IMR_VODOK)
1210                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1211        if (irq_status[0] & IMR_VIDOK)
1212                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1213        if (irq_status[3] & IMR_H2CDOK)
1214                rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1215        if (irq_status[0] & IMR_ROK) {
1216                rtw_pci_rx_isr(rtwdev);
1217                rx = true;
1218        }
1219        if (unlikely(irq_status[0] & IMR_C2HCMD))
1220                rtw_fw_c2h_cmd_isr(rtwdev);
1221
1222        /* all of the jobs for this interrupt have been done */
1223        if (rtwpci->running)
1224                rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1225        spin_unlock_bh(&rtwpci->irq_lock);
1226
1227        return IRQ_HANDLED;
1228}
1229
1230static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1231                              struct pci_dev *pdev)
1232{
1233        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1234        unsigned long len;
1235        u8 bar_id = 2;
1236        int ret;
1237
1238        ret = pci_request_regions(pdev, KBUILD_MODNAME);
1239        if (ret) {
1240                rtw_err(rtwdev, "failed to request pci regions\n");
1241                return ret;
1242        }
1243
1244        len = pci_resource_len(pdev, bar_id);
1245        rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1246        if (!rtwpci->mmap) {
1247                pci_release_regions(pdev);
1248                rtw_err(rtwdev, "failed to map pci memory\n");
1249                return -ENOMEM;
1250        }
1251
1252        return 0;
1253}
1254
1255static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1256                                 struct pci_dev *pdev)
1257{
1258        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1259
1260        if (rtwpci->mmap) {
1261                pci_iounmap(pdev, rtwpci->mmap);
1262                pci_release_regions(pdev);
1263        }
1264}
1265
1266static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1267{
1268        u16 write_addr;
1269        u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1270        u8 flag;
1271        u8 cnt;
1272
1273        write_addr = addr & BITS_DBI_ADDR_MASK;
1274        write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1275        rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1276        rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1277        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1278
1279        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1280                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1281                if (flag == 0)
1282                        return;
1283
1284                udelay(10);
1285        }
1286
1287        WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1288}
1289
1290static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1291{
1292        u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1293        u8 flag;
1294        u8 cnt;
1295
1296        rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1297        rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1298
1299        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1300                flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1301                if (flag == 0) {
1302                        read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1303                        *value = rtw_read8(rtwdev, read_addr);
1304                        return 0;
1305                }
1306
1307                udelay(10);
1308        }
1309
1310        WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1311        return -EIO;
1312}
1313
1314static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1315{
1316        u8 page;
1317        u8 wflag;
1318        u8 cnt;
1319
1320        rtw_write16(rtwdev, REG_MDIO_V1, data);
1321
1322        page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1323        page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1324        rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1325        rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1326        rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1327
1328        for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1329                wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1330                                        BIT_MDIO_WFLAG_V1);
1331                if (wflag == 0)
1332                        return;
1333
1334                udelay(10);
1335        }
1336
1337        WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1338}
1339
1340static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1341{
1342        u8 value;
1343        int ret;
1344
1345        if (rtw_pci_disable_aspm)
1346                return;
1347
1348        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1349        if (ret) {
1350                rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1351                return;
1352        }
1353
1354        if (enable)
1355                value |= BIT_CLKREQ_SW_EN;
1356        else
1357                value &= ~BIT_CLKREQ_SW_EN;
1358
1359        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1360}
1361
1362static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1363{
1364        u8 value;
1365        int ret;
1366
1367        if (rtw_pci_disable_aspm)
1368                return;
1369
1370        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1371        if (ret) {
1372                rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1373                return;
1374        }
1375
1376        if (enable)
1377                value |= BIT_L1_SW_EN;
1378        else
1379                value &= ~BIT_L1_SW_EN;
1380
1381        rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1382}
1383
1384static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1385{
1386        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1387
1388        /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1389         * only be enabled when host supports it.
1390         *
1391         * And ASPM mechanism should be enabled when driver/firmware enters
1392         * power save mode, without having heavy traffic. Because we've
1393         * experienced some inter-operability issues that the link tends
1394         * to enter L1 state on the fly even when driver is having high
1395         * throughput. This is probably because the ASPM behavior slightly
1396         * varies from different SOC.
1397         */
1398        if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
1399                rtw_pci_aspm_set(rtwdev, enter);
1400}
1401
1402static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1403{
1404        struct rtw_chip_info *chip = rtwdev->chip;
1405        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1406        struct pci_dev *pdev = rtwpci->pdev;
1407        u16 link_ctrl;
1408        int ret;
1409
1410        /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1411         * to add clock delay to cover the REFCLK timing gap.
1412         */
1413        if (chip->id == RTW_CHIP_TYPE_8822C)
1414                rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1415
1416        /* Though there is standard PCIE configuration space to set the
1417         * link control register, but by Realtek's design, driver should
1418         * check if host supports CLKREQ/ASPM to enable the HW module.
1419         *
1420         * These functions are implemented by two HW modules associated,
1421         * one is responsible to access PCIE configuration space to
1422         * follow the host settings, and another is in charge of doing
1423         * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1424         * the host does not support it, and due to some reasons or wrong
1425         * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1426         * loss if HW misbehaves on the link.
1427         *
1428         * Hence it's designed that driver should first check the PCIE
1429         * configuration space is sync'ed and enabled, then driver can turn
1430         * on the other module that is actually working on the mechanism.
1431         */
1432        ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1433        if (ret) {
1434                rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1435                return;
1436        }
1437
1438        if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1439                rtw_pci_clkreq_set(rtwdev, true);
1440
1441        rtwpci->link_ctrl = link_ctrl;
1442}
1443
1444static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1445{
1446        struct rtw_chip_info *chip = rtwdev->chip;
1447
1448        switch (chip->id) {
1449        case RTW_CHIP_TYPE_8822C:
1450                if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1451                        rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1452                                         BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1453                break;
1454        default:
1455                break;
1456        }
1457}
1458
1459static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1460{
1461        struct rtw_chip_info *chip = rtwdev->chip;
1462        const struct rtw_intf_phy_para *para;
1463        u16 cut;
1464        u16 value;
1465        u16 offset;
1466        int i;
1467
1468        cut = BIT(0) << rtwdev->hal.cut_version;
1469
1470        for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1471                para = &chip->intf_table->gen1_para[i];
1472                if (!(para->cut_mask & cut))
1473                        continue;
1474                if (para->offset == 0xffff)
1475                        break;
1476                offset = para->offset;
1477                value = para->value;
1478                if (para->ip_sel == RTW_IP_SEL_PHY)
1479                        rtw_mdio_write(rtwdev, offset, value, true);
1480                else
1481                        rtw_dbi_write8(rtwdev, offset, value);
1482        }
1483
1484        for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1485                para = &chip->intf_table->gen2_para[i];
1486                if (!(para->cut_mask & cut))
1487                        continue;
1488                if (para->offset == 0xffff)
1489                        break;
1490                offset = para->offset;
1491                value = para->value;
1492                if (para->ip_sel == RTW_IP_SEL_PHY)
1493                        rtw_mdio_write(rtwdev, offset, value, false);
1494                else
1495                        rtw_dbi_write8(rtwdev, offset, value);
1496        }
1497
1498        rtw_pci_link_cfg(rtwdev);
1499}
1500
1501static int __maybe_unused rtw_pci_suspend(struct device *dev)
1502{
1503        return 0;
1504}
1505
1506static int __maybe_unused rtw_pci_resume(struct device *dev)
1507{
1508        return 0;
1509}
1510
1511SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1512EXPORT_SYMBOL(rtw_pm_ops);
1513
1514static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1515{
1516        int ret;
1517
1518        ret = pci_enable_device(pdev);
1519        if (ret) {
1520                rtw_err(rtwdev, "failed to enable pci device\n");
1521                return ret;
1522        }
1523
1524        pci_set_master(pdev);
1525        pci_set_drvdata(pdev, rtwdev->hw);
1526        SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1527
1528        return 0;
1529}
1530
1531static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1532{
1533        pci_clear_master(pdev);
1534        pci_disable_device(pdev);
1535}
1536
1537static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1538{
1539        struct rtw_pci *rtwpci;
1540        int ret;
1541
1542        rtwpci = (struct rtw_pci *)rtwdev->priv;
1543        rtwpci->pdev = pdev;
1544
1545        /* after this driver can access to hw registers */
1546        ret = rtw_pci_io_mapping(rtwdev, pdev);
1547        if (ret) {
1548                rtw_err(rtwdev, "failed to request pci io region\n");
1549                goto err_out;
1550        }
1551
1552        ret = rtw_pci_init(rtwdev);
1553        if (ret) {
1554                rtw_err(rtwdev, "failed to allocate pci resources\n");
1555                goto err_io_unmap;
1556        }
1557
1558        return 0;
1559
1560err_io_unmap:
1561        rtw_pci_io_unmapping(rtwdev, pdev);
1562
1563err_out:
1564        return ret;
1565}
1566
1567static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1568{
1569        rtw_pci_deinit(rtwdev);
1570        rtw_pci_io_unmapping(rtwdev, pdev);
1571}
1572
1573static struct rtw_hci_ops rtw_pci_ops = {
1574        .tx_write = rtw_pci_tx_write,
1575        .tx_kick_off = rtw_pci_tx_kick_off,
1576        .flush_queues = rtw_pci_flush_queues,
1577        .setup = rtw_pci_setup,
1578        .start = rtw_pci_start,
1579        .stop = rtw_pci_stop,
1580        .deep_ps = rtw_pci_deep_ps,
1581        .link_ps = rtw_pci_link_ps,
1582        .interface_cfg = rtw_pci_interface_cfg,
1583
1584        .read8 = rtw_pci_read8,
1585        .read16 = rtw_pci_read16,
1586        .read32 = rtw_pci_read32,
1587        .write8 = rtw_pci_write8,
1588        .write16 = rtw_pci_write16,
1589        .write32 = rtw_pci_write32,
1590        .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1591        .write_data_h2c = rtw_pci_write_data_h2c,
1592};
1593
1594static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1595{
1596        unsigned int flags = PCI_IRQ_LEGACY;
1597        int ret;
1598
1599        if (!rtw_disable_msi)
1600                flags |= PCI_IRQ_MSI;
1601
1602        ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1603        if (ret < 0) {
1604                rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1605                return ret;
1606        }
1607
1608        ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1609                                        rtw_pci_interrupt_handler,
1610                                        rtw_pci_interrupt_threadfn,
1611                                        IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1612        if (ret) {
1613                rtw_err(rtwdev, "failed to request irq %d\n", ret);
1614                pci_free_irq_vectors(pdev);
1615        }
1616
1617        return ret;
1618}
1619
1620static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1621{
1622        devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1623        pci_free_irq_vectors(pdev);
1624}
1625
1626static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1627{
1628        struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1629        struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1630                                              priv);
1631        int work_done = 0;
1632
1633        while (work_done < budget) {
1634                u32 work_done_once;
1635
1636                work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1637                                                 budget - work_done);
1638                if (work_done_once == 0)
1639                        break;
1640                work_done += work_done_once;
1641        }
1642        if (work_done < budget) {
1643                napi_complete_done(napi, work_done);
1644                spin_lock_bh(&rtwpci->irq_lock);
1645                if (rtwpci->running)
1646                        rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1647                spin_unlock_bh(&rtwpci->irq_lock);
1648                /* When ISR happens during polling and before napi_complete
1649                 * while no further data is received. Data on the dma_ring will
1650                 * not be processed immediately. Check whether dma ring is
1651                 * empty and perform napi_schedule accordingly.
1652                 */
1653                if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1654                        napi_schedule(napi);
1655        }
1656
1657        return work_done;
1658}
1659
1660static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1661{
1662        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1663
1664        init_dummy_netdev(&rtwpci->netdev);
1665        netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll,
1666                       RTW_NAPI_WEIGHT_NUM);
1667}
1668
1669static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1670{
1671        struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1672
1673        rtw_pci_napi_stop(rtwdev);
1674        netif_napi_del(&rtwpci->napi);
1675}
1676
1677enum rtw88_quirk_dis_pci_caps {
1678        QUIRK_DIS_PCI_CAP_MSI,
1679        QUIRK_DIS_PCI_CAP_ASPM,
1680};
1681
1682static int disable_pci_caps(const struct dmi_system_id *dmi)
1683{
1684        uintptr_t dis_caps = (uintptr_t)dmi->driver_data;
1685
1686        if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_MSI))
1687                rtw_disable_msi = true;
1688        if (dis_caps & BIT(QUIRK_DIS_PCI_CAP_ASPM))
1689                rtw_pci_disable_aspm = true;
1690
1691        return 1;
1692}
1693
1694static const struct dmi_system_id rtw88_pci_quirks[] = {
1695        {
1696                .callback = disable_pci_caps,
1697                .ident = "Protempo Ltd L116HTN6SPW",
1698                .matches = {
1699                        DMI_MATCH(DMI_SYS_VENDOR, "Protempo Ltd"),
1700                        DMI_MATCH(DMI_PRODUCT_NAME, "L116HTN6SPW"),
1701                },
1702                .driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
1703        },
1704        {}
1705};
1706
1707int rtw_pci_probe(struct pci_dev *pdev,
1708                  const struct pci_device_id *id)
1709{
1710        struct ieee80211_hw *hw;
1711        struct rtw_dev *rtwdev;
1712        int drv_data_size;
1713        int ret;
1714
1715        drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1716        hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1717        if (!hw) {
1718                dev_err(&pdev->dev, "failed to allocate hw\n");
1719                return -ENOMEM;
1720        }
1721
1722        rtwdev = hw->priv;
1723        rtwdev->hw = hw;
1724        rtwdev->dev = &pdev->dev;
1725        rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1726        rtwdev->hci.ops = &rtw_pci_ops;
1727        rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1728
1729        ret = rtw_core_init(rtwdev);
1730        if (ret)
1731                goto err_release_hw;
1732
1733        rtw_dbg(rtwdev, RTW_DBG_PCI,
1734                "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1735                pdev->vendor, pdev->device, pdev->revision);
1736
1737        ret = rtw_pci_claim(rtwdev, pdev);
1738        if (ret) {
1739                rtw_err(rtwdev, "failed to claim pci device\n");
1740                goto err_deinit_core;
1741        }
1742
1743        ret = rtw_pci_setup_resource(rtwdev, pdev);
1744        if (ret) {
1745                rtw_err(rtwdev, "failed to setup pci resources\n");
1746                goto err_pci_declaim;
1747        }
1748
1749        rtw_pci_napi_init(rtwdev);
1750
1751        ret = rtw_chip_info_setup(rtwdev);
1752        if (ret) {
1753                rtw_err(rtwdev, "failed to setup chip information\n");
1754                goto err_destroy_pci;
1755        }
1756
1757        dmi_check_system(rtw88_pci_quirks);
1758        rtw_pci_phy_cfg(rtwdev);
1759
1760        ret = rtw_register_hw(rtwdev, hw);
1761        if (ret) {
1762                rtw_err(rtwdev, "failed to register hw\n");
1763                goto err_destroy_pci;
1764        }
1765
1766        ret = rtw_pci_request_irq(rtwdev, pdev);
1767        if (ret) {
1768                ieee80211_unregister_hw(hw);
1769                goto err_destroy_pci;
1770        }
1771
1772        return 0;
1773
1774err_destroy_pci:
1775        rtw_pci_napi_deinit(rtwdev);
1776        rtw_pci_destroy(rtwdev, pdev);
1777
1778err_pci_declaim:
1779        rtw_pci_declaim(rtwdev, pdev);
1780
1781err_deinit_core:
1782        rtw_core_deinit(rtwdev);
1783
1784err_release_hw:
1785        ieee80211_free_hw(hw);
1786
1787        return ret;
1788}
1789EXPORT_SYMBOL(rtw_pci_probe);
1790
1791void rtw_pci_remove(struct pci_dev *pdev)
1792{
1793        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1794        struct rtw_dev *rtwdev;
1795        struct rtw_pci *rtwpci;
1796
1797        if (!hw)
1798                return;
1799
1800        rtwdev = hw->priv;
1801        rtwpci = (struct rtw_pci *)rtwdev->priv;
1802
1803        rtw_unregister_hw(rtwdev, hw);
1804        rtw_pci_disable_interrupt(rtwdev, rtwpci);
1805        rtw_pci_napi_deinit(rtwdev);
1806        rtw_pci_destroy(rtwdev, pdev);
1807        rtw_pci_declaim(rtwdev, pdev);
1808        rtw_pci_free_irq(rtwdev, pdev);
1809        rtw_core_deinit(rtwdev);
1810        ieee80211_free_hw(hw);
1811}
1812EXPORT_SYMBOL(rtw_pci_remove);
1813
1814void rtw_pci_shutdown(struct pci_dev *pdev)
1815{
1816        struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1817        struct rtw_dev *rtwdev;
1818        struct rtw_chip_info *chip;
1819
1820        if (!hw)
1821                return;
1822
1823        rtwdev = hw->priv;
1824        chip = rtwdev->chip;
1825
1826        if (chip->ops->shutdown)
1827                chip->ops->shutdown(rtwdev);
1828
1829        pci_set_power_state(pdev, PCI_D3hot);
1830}
1831EXPORT_SYMBOL(rtw_pci_shutdown);
1832
1833MODULE_AUTHOR("Realtek Corporation");
1834MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1835MODULE_LICENSE("Dual BSD/GPL");
1836