linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_irq_mode {
  37        ATH10K_PCI_IRQ_AUTO = 0,
  38        ATH10K_PCI_IRQ_LEGACY = 1,
  39        ATH10K_PCI_IRQ_MSI = 2,
  40};
  41
  42enum ath10k_pci_reset_mode {
  43        ATH10K_PCI_RESET_AUTO = 0,
  44        ATH10K_PCI_RESET_WARM_ONLY = 1,
  45};
  46
  47static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  48static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  49
  50module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  51MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  52
  53module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  54MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  55
  56/* how long wait to wait for target to initialise, in ms */
  57#define ATH10K_PCI_TARGET_WAIT 3000
  58#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  59
  60#define QCA988X_2_0_DEVICE_ID   (0x003c)
  61
  62static const struct pci_device_id ath10k_pci_id_table[] = {
  63        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  64        {0}
  65};
  66
  67static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  68static int ath10k_pci_cold_reset(struct ath10k *ar);
  69static int ath10k_pci_warm_reset(struct ath10k *ar);
  70static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
  71static int ath10k_pci_init_irq(struct ath10k *ar);
  72static int ath10k_pci_deinit_irq(struct ath10k *ar);
  73static int ath10k_pci_request_irq(struct ath10k *ar);
  74static void ath10k_pci_free_irq(struct ath10k *ar);
  75static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  76                               struct ath10k_ce_pipe *rx_pipe,
  77                               struct bmi_xfer *xfer);
  78
  79static const struct ce_attr host_ce_config_wlan[] = {
  80        /* CE0: host->target HTC control and raw streams */
  81        {
  82                .flags = CE_ATTR_FLAGS,
  83                .src_nentries = 16,
  84                .src_sz_max = 256,
  85                .dest_nentries = 0,
  86        },
  87
  88        /* CE1: target->host HTT + HTC control */
  89        {
  90                .flags = CE_ATTR_FLAGS,
  91                .src_nentries = 0,
  92                .src_sz_max = 512,
  93                .dest_nentries = 512,
  94        },
  95
  96        /* CE2: target->host WMI */
  97        {
  98                .flags = CE_ATTR_FLAGS,
  99                .src_nentries = 0,
 100                .src_sz_max = 2048,
 101                .dest_nentries = 32,
 102        },
 103
 104        /* CE3: host->target WMI */
 105        {
 106                .flags = CE_ATTR_FLAGS,
 107                .src_nentries = 32,
 108                .src_sz_max = 2048,
 109                .dest_nentries = 0,
 110        },
 111
 112        /* CE4: host->target HTT */
 113        {
 114                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 115                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 116                .src_sz_max = 256,
 117                .dest_nentries = 0,
 118        },
 119
 120        /* CE5: unused */
 121        {
 122                .flags = CE_ATTR_FLAGS,
 123                .src_nentries = 0,
 124                .src_sz_max = 0,
 125                .dest_nentries = 0,
 126        },
 127
 128        /* CE6: target autonomous hif_memcpy */
 129        {
 130                .flags = CE_ATTR_FLAGS,
 131                .src_nentries = 0,
 132                .src_sz_max = 0,
 133                .dest_nentries = 0,
 134        },
 135
 136        /* CE7: ce_diag, the Diagnostic Window */
 137        {
 138                .flags = CE_ATTR_FLAGS,
 139                .src_nentries = 2,
 140                .src_sz_max = DIAG_TRANSFER_LIMIT,
 141                .dest_nentries = 2,
 142        },
 143};
 144
 145/* Target firmware's Copy Engine configuration. */
 146static const struct ce_pipe_config target_ce_config_wlan[] = {
 147        /* CE0: host->target HTC control and raw streams */
 148        {
 149                .pipenum = __cpu_to_le32(0),
 150                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 151                .nentries = __cpu_to_le32(32),
 152                .nbytes_max = __cpu_to_le32(256),
 153                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 154                .reserved = __cpu_to_le32(0),
 155        },
 156
 157        /* CE1: target->host HTT + HTC control */
 158        {
 159                .pipenum = __cpu_to_le32(1),
 160                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 161                .nentries = __cpu_to_le32(32),
 162                .nbytes_max = __cpu_to_le32(512),
 163                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 164                .reserved = __cpu_to_le32(0),
 165        },
 166
 167        /* CE2: target->host WMI */
 168        {
 169                .pipenum = __cpu_to_le32(2),
 170                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 171                .nentries = __cpu_to_le32(32),
 172                .nbytes_max = __cpu_to_le32(2048),
 173                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 174                .reserved = __cpu_to_le32(0),
 175        },
 176
 177        /* CE3: host->target WMI */
 178        {
 179                .pipenum = __cpu_to_le32(3),
 180                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 181                .nentries = __cpu_to_le32(32),
 182                .nbytes_max = __cpu_to_le32(2048),
 183                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 184                .reserved = __cpu_to_le32(0),
 185        },
 186
 187        /* CE4: host->target HTT */
 188        {
 189                .pipenum = __cpu_to_le32(4),
 190                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 191                .nentries = __cpu_to_le32(256),
 192                .nbytes_max = __cpu_to_le32(256),
 193                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 194                .reserved = __cpu_to_le32(0),
 195        },
 196
 197        /* NB: 50% of src nentries, since tx has 2 frags */
 198
 199        /* CE5: unused */
 200        {
 201                .pipenum = __cpu_to_le32(5),
 202                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 203                .nentries = __cpu_to_le32(32),
 204                .nbytes_max = __cpu_to_le32(2048),
 205                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 206                .reserved = __cpu_to_le32(0),
 207        },
 208
 209        /* CE6: Reserved for target autonomous hif_memcpy */
 210        {
 211                .pipenum = __cpu_to_le32(6),
 212                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 213                .nentries = __cpu_to_le32(32),
 214                .nbytes_max = __cpu_to_le32(4096),
 215                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 216                .reserved = __cpu_to_le32(0),
 217        },
 218
 219        /* CE7 used only by Host */
 220};
 221
 222/*
 223 * Map from service/endpoint to Copy Engine.
 224 * This table is derived from the CE_PCI TABLE, above.
 225 * It is passed to the Target at startup for use by firmware.
 226 */
 227static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
 228        {
 229                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 230                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 231                __cpu_to_le32(3),
 232        },
 233        {
 234                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 235                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 236                __cpu_to_le32(2),
 237        },
 238        {
 239                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 240                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 241                __cpu_to_le32(3),
 242        },
 243        {
 244                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 245                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 246                __cpu_to_le32(2),
 247        },
 248        {
 249                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 250                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 251                __cpu_to_le32(3),
 252        },
 253        {
 254                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 255                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 256                __cpu_to_le32(2),
 257        },
 258        {
 259                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 260                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 261                __cpu_to_le32(3),
 262        },
 263        {
 264                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 265                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 266                __cpu_to_le32(2),
 267        },
 268        {
 269                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 270                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 271                __cpu_to_le32(3),
 272        },
 273        {
 274                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 275                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 276                __cpu_to_le32(2),
 277        },
 278        {
 279                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 280                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 281                __cpu_to_le32(0),
 282        },
 283        {
 284                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 285                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 286                __cpu_to_le32(1),
 287        },
 288        { /* not used */
 289                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 290                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 291                __cpu_to_le32(0),
 292        },
 293        { /* not used */
 294                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 295                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 296                __cpu_to_le32(1),
 297        },
 298        {
 299                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 300                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 301                __cpu_to_le32(4),
 302        },
 303        {
 304                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 305                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 306                __cpu_to_le32(1),
 307        },
 308
 309        /* (Additions here) */
 310
 311        { /* must be last */
 312                __cpu_to_le32(0),
 313                __cpu_to_le32(0),
 314                __cpu_to_le32(0),
 315        },
 316};
 317
 318static bool ath10k_pci_irq_pending(struct ath10k *ar)
 319{
 320        u32 cause;
 321
 322        /* Check if the shared legacy irq is for us */
 323        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 324                                  PCIE_INTR_CAUSE_ADDRESS);
 325        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 326                return true;
 327
 328        return false;
 329}
 330
 331static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 332{
 333        /* IMPORTANT: INTR_CLR register has to be set after
 334         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 335         * really cleared. */
 336        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 337                           0);
 338        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 339                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 340
 341        /* IMPORTANT: this extra read transaction is required to
 342         * flush the posted write buffer. */
 343        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 344                                PCIE_INTR_ENABLE_ADDRESS);
 345}
 346
 347static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 348{
 349        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 350                           PCIE_INTR_ENABLE_ADDRESS,
 351                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 352
 353        /* IMPORTANT: this extra read transaction is required to
 354         * flush the posted write buffer. */
 355        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 356                                PCIE_INTR_ENABLE_ADDRESS);
 357}
 358
 359static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 360{
 361        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 362
 363        if (ar_pci->num_msi_intrs > 1)
 364                return "msi-x";
 365
 366        if (ar_pci->num_msi_intrs == 1)
 367                return "msi";
 368
 369        return "legacy";
 370}
 371
 372static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 373{
 374        struct ath10k *ar = pipe->hif_ce_state;
 375        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 376        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 377        struct sk_buff *skb;
 378        dma_addr_t paddr;
 379        int ret;
 380
 381        lockdep_assert_held(&ar_pci->ce_lock);
 382
 383        skb = dev_alloc_skb(pipe->buf_sz);
 384        if (!skb)
 385                return -ENOMEM;
 386
 387        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 388
 389        paddr = dma_map_single(ar->dev, skb->data,
 390                               skb->len + skb_tailroom(skb),
 391                               DMA_FROM_DEVICE);
 392        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 393                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 394                dev_kfree_skb_any(skb);
 395                return -EIO;
 396        }
 397
 398        ATH10K_SKB_CB(skb)->paddr = paddr;
 399
 400        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 401        if (ret) {
 402                ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 403                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 404                                 DMA_FROM_DEVICE);
 405                dev_kfree_skb_any(skb);
 406                return ret;
 407        }
 408
 409        return 0;
 410}
 411
 412static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 413{
 414        struct ath10k *ar = pipe->hif_ce_state;
 415        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 416        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 417        int ret, num;
 418
 419        lockdep_assert_held(&ar_pci->ce_lock);
 420
 421        if (pipe->buf_sz == 0)
 422                return;
 423
 424        if (!ce_pipe->dest_ring)
 425                return;
 426
 427        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 428        while (num--) {
 429                ret = __ath10k_pci_rx_post_buf(pipe);
 430                if (ret) {
 431                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 432                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 433                                  ATH10K_PCI_RX_POST_RETRY_MS);
 434                        break;
 435                }
 436        }
 437}
 438
 439static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 440{
 441        struct ath10k *ar = pipe->hif_ce_state;
 442        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 443
 444        spin_lock_bh(&ar_pci->ce_lock);
 445        __ath10k_pci_rx_post_pipe(pipe);
 446        spin_unlock_bh(&ar_pci->ce_lock);
 447}
 448
 449static void ath10k_pci_rx_post(struct ath10k *ar)
 450{
 451        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 452        int i;
 453
 454        spin_lock_bh(&ar_pci->ce_lock);
 455        for (i = 0; i < CE_COUNT; i++)
 456                __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 457        spin_unlock_bh(&ar_pci->ce_lock);
 458}
 459
 460static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 461{
 462        struct ath10k *ar = (void *)ptr;
 463
 464        ath10k_pci_rx_post(ar);
 465}
 466
 467/*
 468 * Diagnostic read/write access is provided for startup/config/debug usage.
 469 * Caller must guarantee proper alignment, when applicable, and single user
 470 * at any moment.
 471 */
 472static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 473                                    int nbytes)
 474{
 475        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 476        int ret = 0;
 477        u32 buf;
 478        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 479        unsigned int id;
 480        unsigned int flags;
 481        struct ath10k_ce_pipe *ce_diag;
 482        /* Host buffer address in CE space */
 483        u32 ce_data;
 484        dma_addr_t ce_data_base = 0;
 485        void *data_buf = NULL;
 486        int i;
 487
 488        spin_lock_bh(&ar_pci->ce_lock);
 489
 490        ce_diag = ar_pci->ce_diag;
 491
 492        /*
 493         * Allocate a temporary bounce buffer to hold caller's data
 494         * to be DMA'ed from Target. This guarantees
 495         *   1) 4-byte alignment
 496         *   2) Buffer in DMA-able space
 497         */
 498        orig_nbytes = nbytes;
 499        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 500                                                       orig_nbytes,
 501                                                       &ce_data_base,
 502                                                       GFP_ATOMIC);
 503
 504        if (!data_buf) {
 505                ret = -ENOMEM;
 506                goto done;
 507        }
 508        memset(data_buf, 0, orig_nbytes);
 509
 510        remaining_bytes = orig_nbytes;
 511        ce_data = ce_data_base;
 512        while (remaining_bytes) {
 513                nbytes = min_t(unsigned int, remaining_bytes,
 514                               DIAG_TRANSFER_LIMIT);
 515
 516                ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
 517                if (ret != 0)
 518                        goto done;
 519
 520                /* Request CE to send from Target(!) address to Host buffer */
 521                /*
 522                 * The address supplied by the caller is in the
 523                 * Target CPU virtual address space.
 524                 *
 525                 * In order to use this address with the diagnostic CE,
 526                 * convert it from Target CPU virtual address space
 527                 * to CE address space
 528                 */
 529                address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
 530                                                     address);
 531
 532                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 533                                            0);
 534                if (ret)
 535                        goto done;
 536
 537                i = 0;
 538                while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
 539                                                            &completed_nbytes,
 540                                                            &id) != 0) {
 541                        mdelay(1);
 542                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 543                                ret = -EBUSY;
 544                                goto done;
 545                        }
 546                }
 547
 548                if (nbytes != completed_nbytes) {
 549                        ret = -EIO;
 550                        goto done;
 551                }
 552
 553                if (buf != (u32)address) {
 554                        ret = -EIO;
 555                        goto done;
 556                }
 557
 558                i = 0;
 559                while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 560                                                            &completed_nbytes,
 561                                                            &id, &flags) != 0) {
 562                        mdelay(1);
 563
 564                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 565                                ret = -EBUSY;
 566                                goto done;
 567                        }
 568                }
 569
 570                if (nbytes != completed_nbytes) {
 571                        ret = -EIO;
 572                        goto done;
 573                }
 574
 575                if (buf != ce_data) {
 576                        ret = -EIO;
 577                        goto done;
 578                }
 579
 580                remaining_bytes -= nbytes;
 581                address += nbytes;
 582                ce_data += nbytes;
 583        }
 584
 585done:
 586        if (ret == 0)
 587                memcpy(data, data_buf, orig_nbytes);
 588        else
 589                ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 590                            address, ret);
 591
 592        if (data_buf)
 593                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 594                                  ce_data_base);
 595
 596        spin_unlock_bh(&ar_pci->ce_lock);
 597
 598        return ret;
 599}
 600
 601static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 602{
 603        __le32 val = 0;
 604        int ret;
 605
 606        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 607        *value = __le32_to_cpu(val);
 608
 609        return ret;
 610}
 611
 612static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
 613                                     u32 src, u32 len)
 614{
 615        u32 host_addr, addr;
 616        int ret;
 617
 618        host_addr = host_interest_item_address(src);
 619
 620        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
 621        if (ret != 0) {
 622                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
 623                            src, ret);
 624                return ret;
 625        }
 626
 627        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
 628        if (ret != 0) {
 629                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
 630                            addr, len, ret);
 631                return ret;
 632        }
 633
 634        return 0;
 635}
 636
 637#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
 638        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
 639
 640static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
 641                                     const void *data, int nbytes)
 642{
 643        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 644        int ret = 0;
 645        u32 buf;
 646        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 647        unsigned int id;
 648        unsigned int flags;
 649        struct ath10k_ce_pipe *ce_diag;
 650        void *data_buf = NULL;
 651        u32 ce_data;    /* Host buffer address in CE space */
 652        dma_addr_t ce_data_base = 0;
 653        int i;
 654
 655        spin_lock_bh(&ar_pci->ce_lock);
 656
 657        ce_diag = ar_pci->ce_diag;
 658
 659        /*
 660         * Allocate a temporary bounce buffer to hold caller's data
 661         * to be DMA'ed to Target. This guarantees
 662         *   1) 4-byte alignment
 663         *   2) Buffer in DMA-able space
 664         */
 665        orig_nbytes = nbytes;
 666        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 667                                                       orig_nbytes,
 668                                                       &ce_data_base,
 669                                                       GFP_ATOMIC);
 670        if (!data_buf) {
 671                ret = -ENOMEM;
 672                goto done;
 673        }
 674
 675        /* Copy caller's data to allocated DMA buf */
 676        memcpy(data_buf, data, orig_nbytes);
 677
 678        /*
 679         * The address supplied by the caller is in the
 680         * Target CPU virtual address space.
 681         *
 682         * In order to use this address with the diagnostic CE,
 683         * convert it from
 684         *    Target CPU virtual address space
 685         * to
 686         *    CE address space
 687         */
 688        address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
 689
 690        remaining_bytes = orig_nbytes;
 691        ce_data = ce_data_base;
 692        while (remaining_bytes) {
 693                /* FIXME: check cast */
 694                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
 695
 696                /* Set up to receive directly into Target(!) address */
 697                ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
 698                if (ret != 0)
 699                        goto done;
 700
 701                /*
 702                 * Request CE to send caller-supplied data that
 703                 * was copied to bounce buffer to Target(!) address.
 704                 */
 705                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
 706                                            nbytes, 0, 0);
 707                if (ret != 0)
 708                        goto done;
 709
 710                i = 0;
 711                while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
 712                                                            &completed_nbytes,
 713                                                            &id) != 0) {
 714                        mdelay(1);
 715
 716                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 717                                ret = -EBUSY;
 718                                goto done;
 719                        }
 720                }
 721
 722                if (nbytes != completed_nbytes) {
 723                        ret = -EIO;
 724                        goto done;
 725                }
 726
 727                if (buf != ce_data) {
 728                        ret = -EIO;
 729                        goto done;
 730                }
 731
 732                i = 0;
 733                while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
 734                                                            &completed_nbytes,
 735                                                            &id, &flags) != 0) {
 736                        mdelay(1);
 737
 738                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 739                                ret = -EBUSY;
 740                                goto done;
 741                        }
 742                }
 743
 744                if (nbytes != completed_nbytes) {
 745                        ret = -EIO;
 746                        goto done;
 747                }
 748
 749                if (buf != address) {
 750                        ret = -EIO;
 751                        goto done;
 752                }
 753
 754                remaining_bytes -= nbytes;
 755                address += nbytes;
 756                ce_data += nbytes;
 757        }
 758
 759done:
 760        if (data_buf) {
 761                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 762                                  ce_data_base);
 763        }
 764
 765        if (ret != 0)
 766                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
 767                            address, ret);
 768
 769        spin_unlock_bh(&ar_pci->ce_lock);
 770
 771        return ret;
 772}
 773
 774static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
 775{
 776        __le32 val = __cpu_to_le32(value);
 777
 778        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
 779}
 780
 781static bool ath10k_pci_is_awake(struct ath10k *ar)
 782{
 783        u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
 784
 785        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 786}
 787
 788static int ath10k_pci_wake_wait(struct ath10k *ar)
 789{
 790        int tot_delay = 0;
 791        int curr_delay = 5;
 792
 793        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 794                if (ath10k_pci_is_awake(ar))
 795                        return 0;
 796
 797                udelay(curr_delay);
 798                tot_delay += curr_delay;
 799
 800                if (curr_delay < 50)
 801                        curr_delay += 5;
 802        }
 803
 804        return -ETIMEDOUT;
 805}
 806
 807static int ath10k_pci_wake(struct ath10k *ar)
 808{
 809        ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 810                               PCIE_SOC_WAKE_V_MASK);
 811        return ath10k_pci_wake_wait(ar);
 812}
 813
 814static void ath10k_pci_sleep(struct ath10k *ar)
 815{
 816        ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
 817                               PCIE_SOC_WAKE_RESET);
 818}
 819
 820/* Called by lower (CE) layer when a send to Target completes. */
 821static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 822{
 823        struct ath10k *ar = ce_state->ar;
 824        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 825        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 826        struct sk_buff_head list;
 827        struct sk_buff *skb;
 828        u32 ce_data;
 829        unsigned int nbytes;
 830        unsigned int transfer_id;
 831
 832        __skb_queue_head_init(&list);
 833        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
 834                                             &nbytes, &transfer_id) == 0) {
 835                /* no need to call tx completion for NULL pointers */
 836                if (skb == NULL)
 837                        continue;
 838
 839                __skb_queue_tail(&list, skb);
 840        }
 841
 842        while ((skb = __skb_dequeue(&list)))
 843                cb->tx_completion(ar, skb);
 844}
 845
 846/* Called by lower (CE) layer when data is received from the Target. */
 847static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
 848{
 849        struct ath10k *ar = ce_state->ar;
 850        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 851        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
 852        struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
 853        struct sk_buff *skb;
 854        struct sk_buff_head list;
 855        void *transfer_context;
 856        u32 ce_data;
 857        unsigned int nbytes, max_nbytes;
 858        unsigned int transfer_id;
 859        unsigned int flags;
 860
 861        __skb_queue_head_init(&list);
 862        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
 863                                             &ce_data, &nbytes, &transfer_id,
 864                                             &flags) == 0) {
 865                skb = transfer_context;
 866                max_nbytes = skb->len + skb_tailroom(skb);
 867                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
 868                                 max_nbytes, DMA_FROM_DEVICE);
 869
 870                if (unlikely(max_nbytes < nbytes)) {
 871                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
 872                                    nbytes, max_nbytes);
 873                        dev_kfree_skb_any(skb);
 874                        continue;
 875                }
 876
 877                skb_put(skb, nbytes);
 878                __skb_queue_tail(&list, skb);
 879        }
 880
 881        while ((skb = __skb_dequeue(&list))) {
 882                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
 883                           ce_state->id, skb->len);
 884                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
 885                                skb->data, skb->len);
 886
 887                cb->rx_completion(ar, skb);
 888        }
 889
 890        ath10k_pci_rx_post_pipe(pipe_info);
 891}
 892
 893static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
 894                                struct ath10k_hif_sg_item *items, int n_items)
 895{
 896        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 897        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
 898        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
 899        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
 900        unsigned int nentries_mask;
 901        unsigned int sw_index;
 902        unsigned int write_index;
 903        int err, i = 0;
 904
 905        spin_lock_bh(&ar_pci->ce_lock);
 906
 907        nentries_mask = src_ring->nentries_mask;
 908        sw_index = src_ring->sw_index;
 909        write_index = src_ring->write_index;
 910
 911        if (unlikely(CE_RING_DELTA(nentries_mask,
 912                                   write_index, sw_index - 1) < n_items)) {
 913                err = -ENOBUFS;
 914                goto err;
 915        }
 916
 917        for (i = 0; i < n_items - 1; i++) {
 918                ath10k_dbg(ar, ATH10K_DBG_PCI,
 919                           "pci tx item %d paddr 0x%08x len %d n_items %d\n",
 920                           i, items[i].paddr, items[i].len, n_items);
 921                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
 922                                items[i].vaddr, items[i].len);
 923
 924                err = ath10k_ce_send_nolock(ce_pipe,
 925                                            items[i].transfer_context,
 926                                            items[i].paddr,
 927                                            items[i].len,
 928                                            items[i].transfer_id,
 929                                            CE_SEND_FLAG_GATHER);
 930                if (err)
 931                        goto err;
 932        }
 933
 934        /* `i` is equal to `n_items -1` after for() */
 935
 936        ath10k_dbg(ar, ATH10K_DBG_PCI,
 937                   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
 938                   i, items[i].paddr, items[i].len, n_items);
 939        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
 940                        items[i].vaddr, items[i].len);
 941
 942        err = ath10k_ce_send_nolock(ce_pipe,
 943                                    items[i].transfer_context,
 944                                    items[i].paddr,
 945                                    items[i].len,
 946                                    items[i].transfer_id,
 947                                    0);
 948        if (err)
 949                goto err;
 950
 951        spin_unlock_bh(&ar_pci->ce_lock);
 952        return 0;
 953
 954err:
 955        for (; i > 0; i--)
 956                __ath10k_ce_send_revert(ce_pipe);
 957
 958        spin_unlock_bh(&ar_pci->ce_lock);
 959        return err;
 960}
 961
 962static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
 963                                    size_t buf_len)
 964{
 965        return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
 966}
 967
 968static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 969{
 970        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 971
 972        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
 973
 974        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 975}
 976
 977static void ath10k_pci_dump_registers(struct ath10k *ar,
 978                                      struct ath10k_fw_crash_data *crash_data)
 979{
 980        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
 981        int i, ret;
 982
 983        lockdep_assert_held(&ar->data_lock);
 984
 985        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
 986                                      hi_failure_state,
 987                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
 988        if (ret) {
 989                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
 990                return;
 991        }
 992
 993        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
 994
 995        ath10k_err(ar, "firmware register dump:\n");
 996        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
 997                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
 998                           i,
 999                           __le32_to_cpu(reg_dump_values[i]),
1000                           __le32_to_cpu(reg_dump_values[i + 1]),
1001                           __le32_to_cpu(reg_dump_values[i + 2]),
1002                           __le32_to_cpu(reg_dump_values[i + 3]));
1003
1004        if (!crash_data)
1005                return;
1006
1007        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1008                crash_data->registers[i] = reg_dump_values[i];
1009}
1010
1011static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1012{
1013        struct ath10k_fw_crash_data *crash_data;
1014        char uuid[50];
1015
1016        spin_lock_bh(&ar->data_lock);
1017
1018        ar->stats.fw_crash_counter++;
1019
1020        crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1021
1022        if (crash_data)
1023                scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1024        else
1025                scnprintf(uuid, sizeof(uuid), "n/a");
1026
1027        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1028        ath10k_print_driver_info(ar);
1029        ath10k_pci_dump_registers(ar, crash_data);
1030
1031        spin_unlock_bh(&ar->data_lock);
1032
1033        queue_work(ar->workqueue, &ar->restart_work);
1034}
1035
1036static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1037                                               int force)
1038{
1039        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1040
1041        if (!force) {
1042                int resources;
1043                /*
1044                 * Decide whether to actually poll for completions, or just
1045                 * wait for a later chance.
1046                 * If there seem to be plenty of resources left, then just wait
1047                 * since checking involves reading a CE register, which is a
1048                 * relatively expensive operation.
1049                 */
1050                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1051
1052                /*
1053                 * If at least 50% of the total resources are still available,
1054                 * don't bother checking again yet.
1055                 */
1056                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1057                        return;
1058        }
1059        ath10k_ce_per_engine_service(ar, pipe);
1060}
1061
1062static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1063                                         struct ath10k_hif_cb *callbacks)
1064{
1065        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1066
1067        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1068
1069        memcpy(&ar_pci->msg_callbacks_current, callbacks,
1070               sizeof(ar_pci->msg_callbacks_current));
1071}
1072
1073static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1074{
1075        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1076        int i;
1077
1078        tasklet_kill(&ar_pci->intr_tq);
1079        tasklet_kill(&ar_pci->msi_fw_err);
1080
1081        for (i = 0; i < CE_COUNT; i++)
1082                tasklet_kill(&ar_pci->pipe_info[i].intr);
1083
1084        del_timer_sync(&ar_pci->rx_post_retry);
1085}
1086
1087static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1088                                              u16 service_id, u8 *ul_pipe,
1089                                              u8 *dl_pipe, int *ul_is_polled,
1090                                              int *dl_is_polled)
1091{
1092        const struct service_to_pipe *entry;
1093        bool ul_set = false, dl_set = false;
1094        int i;
1095
1096        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1097
1098        /* polling for received messages not supported */
1099        *dl_is_polled = 0;
1100
1101        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1102                entry = &target_service_to_ce_map_wlan[i];
1103
1104                if (__le32_to_cpu(entry->service_id) != service_id)
1105                        continue;
1106
1107                switch (__le32_to_cpu(entry->pipedir)) {
1108                case PIPEDIR_NONE:
1109                        break;
1110                case PIPEDIR_IN:
1111                        WARN_ON(dl_set);
1112                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1113                        dl_set = true;
1114                        break;
1115                case PIPEDIR_OUT:
1116                        WARN_ON(ul_set);
1117                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1118                        ul_set = true;
1119                        break;
1120                case PIPEDIR_INOUT:
1121                        WARN_ON(dl_set);
1122                        WARN_ON(ul_set);
1123                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1124                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1125                        dl_set = true;
1126                        ul_set = true;
1127                        break;
1128                }
1129        }
1130
1131        if (WARN_ON(!ul_set || !dl_set))
1132                return -ENOENT;
1133
1134        *ul_is_polled =
1135                (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1136
1137        return 0;
1138}
1139
1140static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1141                                            u8 *ul_pipe, u8 *dl_pipe)
1142{
1143        int ul_is_polled, dl_is_polled;
1144
1145        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1146
1147        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1148                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1149                                                 ul_pipe,
1150                                                 dl_pipe,
1151                                                 &ul_is_polled,
1152                                                 &dl_is_polled);
1153}
1154
1155static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1156{
1157        u32 val;
1158
1159        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1160        val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1161
1162        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1163}
1164
1165static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1166{
1167        u32 val;
1168
1169        val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
1170        val |= CORE_CTRL_PCIE_REG_31_MASK;
1171
1172        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
1173}
1174
1175static void ath10k_pci_irq_disable(struct ath10k *ar)
1176{
1177        ath10k_ce_disable_interrupts(ar);
1178        ath10k_pci_disable_and_clear_legacy_irq(ar);
1179        ath10k_pci_irq_msi_fw_mask(ar);
1180}
1181
1182static void ath10k_pci_irq_sync(struct ath10k *ar)
1183{
1184        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1185        int i;
1186
1187        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1188                synchronize_irq(ar_pci->pdev->irq + i);
1189}
1190
1191static void ath10k_pci_irq_enable(struct ath10k *ar)
1192{
1193        ath10k_ce_enable_interrupts(ar);
1194        ath10k_pci_enable_legacy_irq(ar);
1195        ath10k_pci_irq_msi_fw_unmask(ar);
1196}
1197
1198static int ath10k_pci_hif_start(struct ath10k *ar)
1199{
1200        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1201
1202        ath10k_pci_irq_enable(ar);
1203        ath10k_pci_rx_post(ar);
1204
1205        return 0;
1206}
1207
1208static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1209{
1210        struct ath10k *ar;
1211        struct ath10k_ce_pipe *ce_pipe;
1212        struct ath10k_ce_ring *ce_ring;
1213        struct sk_buff *skb;
1214        int i;
1215
1216        ar = pci_pipe->hif_ce_state;
1217        ce_pipe = pci_pipe->ce_hdl;
1218        ce_ring = ce_pipe->dest_ring;
1219
1220        if (!ce_ring)
1221                return;
1222
1223        if (!pci_pipe->buf_sz)
1224                return;
1225
1226        for (i = 0; i < ce_ring->nentries; i++) {
1227                skb = ce_ring->per_transfer_context[i];
1228                if (!skb)
1229                        continue;
1230
1231                ce_ring->per_transfer_context[i] = NULL;
1232
1233                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1234                                 skb->len + skb_tailroom(skb),
1235                                 DMA_FROM_DEVICE);
1236                dev_kfree_skb_any(skb);
1237        }
1238}
1239
1240static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1241{
1242        struct ath10k *ar;
1243        struct ath10k_pci *ar_pci;
1244        struct ath10k_ce_pipe *ce_pipe;
1245        struct ath10k_ce_ring *ce_ring;
1246        struct ce_desc *ce_desc;
1247        struct sk_buff *skb;
1248        unsigned int id;
1249        int i;
1250
1251        ar = pci_pipe->hif_ce_state;
1252        ar_pci = ath10k_pci_priv(ar);
1253        ce_pipe = pci_pipe->ce_hdl;
1254        ce_ring = ce_pipe->src_ring;
1255
1256        if (!ce_ring)
1257                return;
1258
1259        if (!pci_pipe->buf_sz)
1260                return;
1261
1262        ce_desc = ce_ring->shadow_base;
1263        if (WARN_ON(!ce_desc))
1264                return;
1265
1266        for (i = 0; i < ce_ring->nentries; i++) {
1267                skb = ce_ring->per_transfer_context[i];
1268                if (!skb)
1269                        continue;
1270
1271                ce_ring->per_transfer_context[i] = NULL;
1272                id = MS(__le16_to_cpu(ce_desc[i].flags),
1273                        CE_DESC_FLAGS_META_DATA);
1274
1275                ar_pci->msg_callbacks_current.tx_completion(ar, skb);
1276        }
1277}
1278
1279/*
1280 * Cleanup residual buffers for device shutdown:
1281 *    buffers that were enqueued for receive
1282 *    buffers that were to be sent
1283 * Note: Buffers that had completed but which were
1284 * not yet processed are on a completion queue. They
1285 * are handled when the completion thread shuts down.
1286 */
1287static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1288{
1289        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1290        int pipe_num;
1291
1292        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1293                struct ath10k_pci_pipe *pipe_info;
1294
1295                pipe_info = &ar_pci->pipe_info[pipe_num];
1296                ath10k_pci_rx_pipe_cleanup(pipe_info);
1297                ath10k_pci_tx_pipe_cleanup(pipe_info);
1298        }
1299}
1300
1301static void ath10k_pci_ce_deinit(struct ath10k *ar)
1302{
1303        int i;
1304
1305        for (i = 0; i < CE_COUNT; i++)
1306                ath10k_ce_deinit_pipe(ar, i);
1307}
1308
1309static void ath10k_pci_flush(struct ath10k *ar)
1310{
1311        ath10k_pci_kill_tasklet(ar);
1312        ath10k_pci_buffer_cleanup(ar);
1313}
1314
1315static void ath10k_pci_hif_stop(struct ath10k *ar)
1316{
1317        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1318
1319        /* Most likely the device has HTT Rx ring configured. The only way to
1320         * prevent the device from accessing (and possible corrupting) host
1321         * memory is to reset the chip now.
1322         *
1323         * There's also no known way of masking MSI interrupts on the device.
1324         * For ranged MSI the CE-related interrupts can be masked. However
1325         * regardless how many MSI interrupts are assigned the first one
1326         * is always used for firmware indications (crashes) and cannot be
1327         * masked. To prevent the device from asserting the interrupt reset it
1328         * before proceeding with cleanup.
1329         */
1330        ath10k_pci_warm_reset(ar);
1331
1332        ath10k_pci_irq_disable(ar);
1333        ath10k_pci_irq_sync(ar);
1334        ath10k_pci_flush(ar);
1335}
1336
1337static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1338                                           void *req, u32 req_len,
1339                                           void *resp, u32 *resp_len)
1340{
1341        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1342        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1343        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1344        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1345        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1346        dma_addr_t req_paddr = 0;
1347        dma_addr_t resp_paddr = 0;
1348        struct bmi_xfer xfer = {};
1349        void *treq, *tresp = NULL;
1350        int ret = 0;
1351
1352        might_sleep();
1353
1354        if (resp && !resp_len)
1355                return -EINVAL;
1356
1357        if (resp && resp_len && *resp_len == 0)
1358                return -EINVAL;
1359
1360        treq = kmemdup(req, req_len, GFP_KERNEL);
1361        if (!treq)
1362                return -ENOMEM;
1363
1364        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1365        ret = dma_mapping_error(ar->dev, req_paddr);
1366        if (ret)
1367                goto err_dma;
1368
1369        if (resp && resp_len) {
1370                tresp = kzalloc(*resp_len, GFP_KERNEL);
1371                if (!tresp) {
1372                        ret = -ENOMEM;
1373                        goto err_req;
1374                }
1375
1376                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1377                                            DMA_FROM_DEVICE);
1378                ret = dma_mapping_error(ar->dev, resp_paddr);
1379                if (ret)
1380                        goto err_req;
1381
1382                xfer.wait_for_resp = true;
1383                xfer.resp_len = 0;
1384
1385                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1386        }
1387
1388        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1389        if (ret)
1390                goto err_resp;
1391
1392        ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1393        if (ret) {
1394                u32 unused_buffer;
1395                unsigned int unused_nbytes;
1396                unsigned int unused_id;
1397
1398                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1399                                           &unused_nbytes, &unused_id);
1400        } else {
1401                /* non-zero means we did not time out */
1402                ret = 0;
1403        }
1404
1405err_resp:
1406        if (resp) {
1407                u32 unused_buffer;
1408
1409                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1410                dma_unmap_single(ar->dev, resp_paddr,
1411                                 *resp_len, DMA_FROM_DEVICE);
1412        }
1413err_req:
1414        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1415
1416        if (ret == 0 && resp_len) {
1417                *resp_len = min(*resp_len, xfer.resp_len);
1418                memcpy(resp, tresp, xfer.resp_len);
1419        }
1420err_dma:
1421        kfree(treq);
1422        kfree(tresp);
1423
1424        return ret;
1425}
1426
1427static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1428{
1429        struct bmi_xfer *xfer;
1430        u32 ce_data;
1431        unsigned int nbytes;
1432        unsigned int transfer_id;
1433
1434        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1435                                          &nbytes, &transfer_id))
1436                return;
1437
1438        xfer->tx_done = true;
1439}
1440
1441static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1442{
1443        struct ath10k *ar = ce_state->ar;
1444        struct bmi_xfer *xfer;
1445        u32 ce_data;
1446        unsigned int nbytes;
1447        unsigned int transfer_id;
1448        unsigned int flags;
1449
1450        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1451                                          &nbytes, &transfer_id, &flags))
1452                return;
1453
1454        if (WARN_ON_ONCE(!xfer))
1455                return;
1456
1457        if (!xfer->wait_for_resp) {
1458                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1459                return;
1460        }
1461
1462        xfer->resp_len = nbytes;
1463        xfer->rx_done = true;
1464}
1465
1466static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1467                               struct ath10k_ce_pipe *rx_pipe,
1468                               struct bmi_xfer *xfer)
1469{
1470        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1471
1472        while (time_before_eq(jiffies, timeout)) {
1473                ath10k_pci_bmi_send_done(tx_pipe);
1474                ath10k_pci_bmi_recv_data(rx_pipe);
1475
1476                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1477                        return 0;
1478
1479                schedule();
1480        }
1481
1482        return -ETIMEDOUT;
1483}
1484
1485/*
1486 * Send an interrupt to the device to wake up the Target CPU
1487 * so it has an opportunity to notice any changed state.
1488 */
1489static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1490{
1491        u32 addr, val;
1492
1493        addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1494        val = ath10k_pci_read32(ar, addr);
1495        val |= CORE_CTRL_CPU_INTR_MASK;
1496        ath10k_pci_write32(ar, addr, val);
1497
1498        return 0;
1499}
1500
1501static int ath10k_pci_init_config(struct ath10k *ar)
1502{
1503        u32 interconnect_targ_addr;
1504        u32 pcie_state_targ_addr = 0;
1505        u32 pipe_cfg_targ_addr = 0;
1506        u32 svc_to_pipe_map = 0;
1507        u32 pcie_config_flags = 0;
1508        u32 ealloc_value;
1509        u32 ealloc_targ_addr;
1510        u32 flag2_value;
1511        u32 flag2_targ_addr;
1512        int ret = 0;
1513
1514        /* Download to Target the CE Config and the service-to-CE map */
1515        interconnect_targ_addr =
1516                host_interest_item_address(HI_ITEM(hi_interconnect_state));
1517
1518        /* Supply Target-side CE configuration */
1519        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1520                                     &pcie_state_targ_addr);
1521        if (ret != 0) {
1522                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1523                return ret;
1524        }
1525
1526        if (pcie_state_targ_addr == 0) {
1527                ret = -EIO;
1528                ath10k_err(ar, "Invalid pcie state addr\n");
1529                return ret;
1530        }
1531
1532        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1533                                          offsetof(struct pcie_state,
1534                                                   pipe_cfg_addr)),
1535                                     &pipe_cfg_targ_addr);
1536        if (ret != 0) {
1537                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1538                return ret;
1539        }
1540
1541        if (pipe_cfg_targ_addr == 0) {
1542                ret = -EIO;
1543                ath10k_err(ar, "Invalid pipe cfg addr\n");
1544                return ret;
1545        }
1546
1547        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1548                                        target_ce_config_wlan,
1549                                        sizeof(target_ce_config_wlan));
1550
1551        if (ret != 0) {
1552                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1553                return ret;
1554        }
1555
1556        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1557                                          offsetof(struct pcie_state,
1558                                                   svc_to_pipe_map)),
1559                                     &svc_to_pipe_map);
1560        if (ret != 0) {
1561                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1562                return ret;
1563        }
1564
1565        if (svc_to_pipe_map == 0) {
1566                ret = -EIO;
1567                ath10k_err(ar, "Invalid svc_to_pipe map\n");
1568                return ret;
1569        }
1570
1571        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1572                                        target_service_to_ce_map_wlan,
1573                                        sizeof(target_service_to_ce_map_wlan));
1574        if (ret != 0) {
1575                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1576                return ret;
1577        }
1578
1579        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1580                                          offsetof(struct pcie_state,
1581                                                   config_flags)),
1582                                     &pcie_config_flags);
1583        if (ret != 0) {
1584                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1585                return ret;
1586        }
1587
1588        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1589
1590        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1591                                           offsetof(struct pcie_state,
1592                                                    config_flags)),
1593                                      pcie_config_flags);
1594        if (ret != 0) {
1595                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1596                return ret;
1597        }
1598
1599        /* configure early allocation */
1600        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1601
1602        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1603        if (ret != 0) {
1604                ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1605                return ret;
1606        }
1607
1608        /* first bank is switched to IRAM */
1609        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1610                         HI_EARLY_ALLOC_MAGIC_MASK);
1611        ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1612                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1613
1614        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1615        if (ret != 0) {
1616                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1617                return ret;
1618        }
1619
1620        /* Tell Target to proceed with initialization */
1621        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1622
1623        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1624        if (ret != 0) {
1625                ath10k_err(ar, "Failed to get option val: %d\n", ret);
1626                return ret;
1627        }
1628
1629        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1630
1631        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1632        if (ret != 0) {
1633                ath10k_err(ar, "Failed to set option val: %d\n", ret);
1634                return ret;
1635        }
1636
1637        return 0;
1638}
1639
1640static int ath10k_pci_alloc_pipes(struct ath10k *ar)
1641{
1642        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1643        struct ath10k_pci_pipe *pipe;
1644        int i, ret;
1645
1646        for (i = 0; i < CE_COUNT; i++) {
1647                pipe = &ar_pci->pipe_info[i];
1648                pipe->ce_hdl = &ar_pci->ce_states[i];
1649                pipe->pipe_num = i;
1650                pipe->hif_ce_state = ar;
1651
1652                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
1653                                           ath10k_pci_ce_send_done,
1654                                           ath10k_pci_ce_recv_data);
1655                if (ret) {
1656                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1657                                   i, ret);
1658                        return ret;
1659                }
1660
1661                /* Last CE is Diagnostic Window */
1662                if (i == CE_COUNT - 1) {
1663                        ar_pci->ce_diag = pipe->ce_hdl;
1664                        continue;
1665                }
1666
1667                pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
1668        }
1669
1670        return 0;
1671}
1672
1673static void ath10k_pci_free_pipes(struct ath10k *ar)
1674{
1675        int i;
1676
1677        for (i = 0; i < CE_COUNT; i++)
1678                ath10k_ce_free_pipe(ar, i);
1679}
1680
1681static int ath10k_pci_init_pipes(struct ath10k *ar)
1682{
1683        int i, ret;
1684
1685        for (i = 0; i < CE_COUNT; i++) {
1686                ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
1687                if (ret) {
1688                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
1689                                   i, ret);
1690                        return ret;
1691                }
1692        }
1693
1694        return 0;
1695}
1696
1697static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1698{
1699        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1700               FW_IND_EVENT_PENDING;
1701}
1702
1703static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1704{
1705        u32 val;
1706
1707        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1708        val &= ~FW_IND_EVENT_PENDING;
1709        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1710}
1711
1712/* this function effectively clears target memory controller assert line */
1713static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1714{
1715        u32 val;
1716
1717        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1718        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1719                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
1720        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1721
1722        msleep(10);
1723
1724        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1725        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1726                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1727        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1728
1729        msleep(10);
1730}
1731
1732static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
1733{
1734        u32 val;
1735
1736        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1737
1738        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1739                                SOC_RESET_CONTROL_ADDRESS);
1740        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1741                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1742}
1743
1744static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
1745{
1746        u32 val;
1747
1748        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1749                                SOC_RESET_CONTROL_ADDRESS);
1750
1751        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1752                           val | SOC_RESET_CONTROL_CE_RST_MASK);
1753        msleep(10);
1754        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1755                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1756}
1757
1758static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
1759{
1760        u32 val;
1761
1762        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1763                                SOC_LF_TIMER_CONTROL0_ADDRESS);
1764        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1765                           SOC_LF_TIMER_CONTROL0_ADDRESS,
1766                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1767}
1768
1769static int ath10k_pci_warm_reset(struct ath10k *ar)
1770{
1771        int ret;
1772
1773        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
1774
1775        spin_lock_bh(&ar->data_lock);
1776        ar->stats.fw_warm_reset_counter++;
1777        spin_unlock_bh(&ar->data_lock);
1778
1779        ath10k_pci_irq_disable(ar);
1780
1781        /* Make sure the target CPU is not doing anything dangerous, e.g. if it
1782         * were to access copy engine while host performs copy engine reset
1783         * then it is possible for the device to confuse pci-e controller to
1784         * the point of bringing host system to a complete stop (i.e. hang).
1785         */
1786        ath10k_pci_warm_reset_si0(ar);
1787        ath10k_pci_warm_reset_cpu(ar);
1788        ath10k_pci_init_pipes(ar);
1789        ath10k_pci_wait_for_target_init(ar);
1790
1791        ath10k_pci_warm_reset_clear_lf(ar);
1792        ath10k_pci_warm_reset_ce(ar);
1793        ath10k_pci_warm_reset_cpu(ar);
1794        ath10k_pci_init_pipes(ar);
1795
1796        ret = ath10k_pci_wait_for_target_init(ar);
1797        if (ret) {
1798                ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
1799                return ret;
1800        }
1801
1802        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
1803
1804        return 0;
1805}
1806
1807static int ath10k_pci_chip_reset(struct ath10k *ar)
1808{
1809        int i, ret;
1810        u32 val;
1811
1812        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n");
1813
1814        /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
1815         * It is thus preferred to use warm reset which is safer but may not be
1816         * able to recover the device from all possible fail scenarios.
1817         *
1818         * Warm reset doesn't always work on first try so attempt it a few
1819         * times before giving up.
1820         */
1821        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1822                ret = ath10k_pci_warm_reset(ar);
1823                if (ret) {
1824                        ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
1825                                    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
1826                                    ret);
1827                        continue;
1828                }
1829
1830                /* FIXME: Sometimes copy engine doesn't recover after warm
1831                 * reset. In most cases this needs cold reset. In some of these
1832                 * cases the device is in such a state that a cold reset may
1833                 * lock up the host.
1834                 *
1835                 * Reading any host interest register via copy engine is
1836                 * sufficient to verify if device is capable of booting
1837                 * firmware blob.
1838                 */
1839                ret = ath10k_pci_init_pipes(ar);
1840                if (ret) {
1841                        ath10k_warn(ar, "failed to init copy engine: %d\n",
1842                                    ret);
1843                        continue;
1844                }
1845
1846                ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
1847                                             &val);
1848                if (ret) {
1849                        ath10k_warn(ar, "failed to poke copy engine: %d\n",
1850                                    ret);
1851                        continue;
1852                }
1853
1854                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
1855                return 0;
1856        }
1857
1858        if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
1859                ath10k_warn(ar, "refusing cold reset as requested\n");
1860                return -EPERM;
1861        }
1862
1863        ret = ath10k_pci_cold_reset(ar);
1864        if (ret) {
1865                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
1866                return ret;
1867        }
1868
1869        ret = ath10k_pci_wait_for_target_init(ar);
1870        if (ret) {
1871                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
1872                            ret);
1873                return ret;
1874        }
1875
1876        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n");
1877
1878        return 0;
1879}
1880
1881static int ath10k_pci_hif_power_up(struct ath10k *ar)
1882{
1883        int ret;
1884
1885        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
1886
1887        ret = ath10k_pci_wake(ar);
1888        if (ret) {
1889                ath10k_err(ar, "failed to wake up target: %d\n", ret);
1890                return ret;
1891        }
1892
1893        /*
1894         * Bring the target up cleanly.
1895         *
1896         * The target may be in an undefined state with an AUX-powered Target
1897         * and a Host in WoW mode. If the Host crashes, loses power, or is
1898         * restarted (without unloading the driver) then the Target is left
1899         * (aux) powered and running. On a subsequent driver load, the Target
1900         * is in an unexpected state. We try to catch that here in order to
1901         * reset the Target and retry the probe.
1902         */
1903        ret = ath10k_pci_chip_reset(ar);
1904        if (ret) {
1905                ath10k_err(ar, "failed to reset chip: %d\n", ret);
1906                goto err_sleep;
1907        }
1908
1909        ret = ath10k_pci_init_pipes(ar);
1910        if (ret) {
1911                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1912                goto err_sleep;
1913        }
1914
1915        ret = ath10k_pci_init_config(ar);
1916        if (ret) {
1917                ath10k_err(ar, "failed to setup init config: %d\n", ret);
1918                goto err_ce;
1919        }
1920
1921        ret = ath10k_pci_wake_target_cpu(ar);
1922        if (ret) {
1923                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
1924                goto err_ce;
1925        }
1926
1927        return 0;
1928
1929err_ce:
1930        ath10k_pci_ce_deinit(ar);
1931
1932err_sleep:
1933        ath10k_pci_sleep(ar);
1934        return ret;
1935}
1936
1937static void ath10k_pci_hif_power_down(struct ath10k *ar)
1938{
1939        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1940
1941        /* Currently hif_power_up performs effectively a reset and hif_stop
1942         * resets the chip as well so there's no point in resetting here.
1943         */
1944
1945        ath10k_pci_sleep(ar);
1946}
1947
1948#ifdef CONFIG_PM
1949
1950#define ATH10K_PCI_PM_CONTROL 0x44
1951
1952static int ath10k_pci_hif_suspend(struct ath10k *ar)
1953{
1954        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1955        struct pci_dev *pdev = ar_pci->pdev;
1956        u32 val;
1957
1958        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1959
1960        if ((val & 0x000000ff) != 0x3) {
1961                pci_save_state(pdev);
1962                pci_disable_device(pdev);
1963                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1964                                       (val & 0xffffff00) | 0x03);
1965        }
1966
1967        return 0;
1968}
1969
1970static int ath10k_pci_hif_resume(struct ath10k *ar)
1971{
1972        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1973        struct pci_dev *pdev = ar_pci->pdev;
1974        u32 val;
1975
1976        pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1977
1978        if ((val & 0x000000ff) != 0) {
1979                pci_restore_state(pdev);
1980                pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1981                                       val & 0xffffff00);
1982                /*
1983                 * Suspend/Resume resets the PCI configuration space,
1984                 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1985                 * to keep PCI Tx retries from interfering with C3 CPU state
1986                 */
1987                pci_read_config_dword(pdev, 0x40, &val);
1988
1989                if ((val & 0x0000ff00) != 0)
1990                        pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1991        }
1992
1993        return 0;
1994}
1995#endif
1996
1997static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1998        .tx_sg                  = ath10k_pci_hif_tx_sg,
1999        .diag_read              = ath10k_pci_hif_diag_read,
2000        .diag_write             = ath10k_pci_diag_write_mem,
2001        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2002        .start                  = ath10k_pci_hif_start,
2003        .stop                   = ath10k_pci_hif_stop,
2004        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2005        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2006        .send_complete_check    = ath10k_pci_hif_send_complete_check,
2007        .set_callbacks          = ath10k_pci_hif_set_callbacks,
2008        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2009        .power_up               = ath10k_pci_hif_power_up,
2010        .power_down             = ath10k_pci_hif_power_down,
2011        .read32                 = ath10k_pci_read32,
2012        .write32                = ath10k_pci_write32,
2013#ifdef CONFIG_PM
2014        .suspend                = ath10k_pci_hif_suspend,
2015        .resume                 = ath10k_pci_hif_resume,
2016#endif
2017};
2018
2019static void ath10k_pci_ce_tasklet(unsigned long ptr)
2020{
2021        struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2022        struct ath10k_pci *ar_pci = pipe->ar_pci;
2023
2024        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2025}
2026
2027static void ath10k_msi_err_tasklet(unsigned long data)
2028{
2029        struct ath10k *ar = (struct ath10k *)data;
2030
2031        if (!ath10k_pci_has_fw_crashed(ar)) {
2032                ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
2033                return;
2034        }
2035
2036        ath10k_pci_fw_crashed_clear(ar);
2037        ath10k_pci_fw_crashed_dump(ar);
2038}
2039
2040/*
2041 * Handler for a per-engine interrupt on a PARTICULAR CE.
2042 * This is used in cases where each CE has a private MSI interrupt.
2043 */
2044static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2045{
2046        struct ath10k *ar = arg;
2047        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2048        int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2049
2050        if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2051                ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
2052                            ce_id);
2053                return IRQ_HANDLED;
2054        }
2055
2056        /*
2057         * NOTE: We are able to derive ce_id from irq because we
2058         * use a one-to-one mapping for CE's 0..5.
2059         * CE's 6 & 7 do not use interrupts at all.
2060         *
2061         * This mapping must be kept in sync with the mapping
2062         * used by firmware.
2063         */
2064        tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2065        return IRQ_HANDLED;
2066}
2067
2068static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2069{
2070        struct ath10k *ar = arg;
2071        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2072
2073        tasklet_schedule(&ar_pci->msi_fw_err);
2074        return IRQ_HANDLED;
2075}
2076
2077/*
2078 * Top-level interrupt handler for all PCI interrupts from a Target.
2079 * When a block of MSI interrupts is allocated, this top-level handler
2080 * is not used; instead, we directly call the correct sub-handler.
2081 */
2082static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2083{
2084        struct ath10k *ar = arg;
2085        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2086
2087        if (ar_pci->num_msi_intrs == 0) {
2088                if (!ath10k_pci_irq_pending(ar))
2089                        return IRQ_NONE;
2090
2091                ath10k_pci_disable_and_clear_legacy_irq(ar);
2092        }
2093
2094        tasklet_schedule(&ar_pci->intr_tq);
2095
2096        return IRQ_HANDLED;
2097}
2098
2099static void ath10k_pci_tasklet(unsigned long data)
2100{
2101        struct ath10k *ar = (struct ath10k *)data;
2102        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2103
2104        if (ath10k_pci_has_fw_crashed(ar)) {
2105                ath10k_pci_fw_crashed_clear(ar);
2106                ath10k_pci_fw_crashed_dump(ar);
2107                return;
2108        }
2109
2110        ath10k_ce_per_engine_service_any(ar);
2111
2112        /* Re-enable legacy irq that was disabled in the irq handler */
2113        if (ar_pci->num_msi_intrs == 0)
2114                ath10k_pci_enable_legacy_irq(ar);
2115}
2116
2117static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2118{
2119        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2120        int ret, i;
2121
2122        ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2123                          ath10k_pci_msi_fw_handler,
2124                          IRQF_SHARED, "ath10k_pci", ar);
2125        if (ret) {
2126                ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2127                            ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2128                return ret;
2129        }
2130
2131        for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2132                ret = request_irq(ar_pci->pdev->irq + i,
2133                                  ath10k_pci_per_engine_handler,
2134                                  IRQF_SHARED, "ath10k_pci", ar);
2135                if (ret) {
2136                        ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2137                                    ar_pci->pdev->irq + i, ret);
2138
2139                        for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2140                                free_irq(ar_pci->pdev->irq + i, ar);
2141
2142                        free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2143                        return ret;
2144                }
2145        }
2146
2147        return 0;
2148}
2149
2150static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2151{
2152        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2153        int ret;
2154
2155        ret = request_irq(ar_pci->pdev->irq,
2156                          ath10k_pci_interrupt_handler,
2157                          IRQF_SHARED, "ath10k_pci", ar);
2158        if (ret) {
2159                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2160                            ar_pci->pdev->irq, ret);
2161                return ret;
2162        }
2163
2164        return 0;
2165}
2166
2167static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2168{
2169        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2170        int ret;
2171
2172        ret = request_irq(ar_pci->pdev->irq,
2173                          ath10k_pci_interrupt_handler,
2174                          IRQF_SHARED, "ath10k_pci", ar);
2175        if (ret) {
2176                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2177                            ar_pci->pdev->irq, ret);
2178                return ret;
2179        }
2180
2181        return 0;
2182}
2183
2184static int ath10k_pci_request_irq(struct ath10k *ar)
2185{
2186        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2187
2188        switch (ar_pci->num_msi_intrs) {
2189        case 0:
2190                return ath10k_pci_request_irq_legacy(ar);
2191        case 1:
2192                return ath10k_pci_request_irq_msi(ar);
2193        case MSI_NUM_REQUEST:
2194                return ath10k_pci_request_irq_msix(ar);
2195        }
2196
2197        ath10k_warn(ar, "unknown irq configuration upon request\n");
2198        return -EINVAL;
2199}
2200
2201static void ath10k_pci_free_irq(struct ath10k *ar)
2202{
2203        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2204        int i;
2205
2206        /* There's at least one interrupt irregardless whether its legacy INTR
2207         * or MSI or MSI-X */
2208        for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2209                free_irq(ar_pci->pdev->irq + i, ar);
2210}
2211
2212static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2213{
2214        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2215        int i;
2216
2217        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2218        tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2219                     (unsigned long)ar);
2220
2221        for (i = 0; i < CE_COUNT; i++) {
2222                ar_pci->pipe_info[i].ar_pci = ar_pci;
2223                tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2224                             (unsigned long)&ar_pci->pipe_info[i]);
2225        }
2226}
2227
2228static int ath10k_pci_init_irq(struct ath10k *ar)
2229{
2230        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231        int ret;
2232
2233        ath10k_pci_init_irq_tasklets(ar);
2234
2235        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2236                ath10k_info(ar, "limiting irq mode to: %d\n",
2237                            ath10k_pci_irq_mode);
2238
2239        /* Try MSI-X */
2240        if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2241                ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2242                ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2243                                           ar_pci->num_msi_intrs);
2244                if (ret > 0)
2245                        return 0;
2246
2247                /* fall-through */
2248        }
2249
2250        /* Try MSI */
2251        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2252                ar_pci->num_msi_intrs = 1;
2253                ret = pci_enable_msi(ar_pci->pdev);
2254                if (ret == 0)
2255                        return 0;
2256
2257                /* fall-through */
2258        }
2259
2260        /* Try legacy irq
2261         *
2262         * A potential race occurs here: The CORE_BASE write
2263         * depends on target correctly decoding AXI address but
2264         * host won't know when target writes BAR to CORE_CTRL.
2265         * This write might get lost if target has NOT written BAR.
2266         * For now, fix the race by repeating the write in below
2267         * synchronization checking. */
2268        ar_pci->num_msi_intrs = 0;
2269
2270        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2271                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2272
2273        return 0;
2274}
2275
2276static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2277{
2278        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2279                           0);
2280}
2281
2282static int ath10k_pci_deinit_irq(struct ath10k *ar)
2283{
2284        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2285
2286        switch (ar_pci->num_msi_intrs) {
2287        case 0:
2288                ath10k_pci_deinit_irq_legacy(ar);
2289                return 0;
2290        case 1:
2291                /* fall-through */
2292        case MSI_NUM_REQUEST:
2293                pci_disable_msi(ar_pci->pdev);
2294                return 0;
2295        default:
2296                pci_disable_msi(ar_pci->pdev);
2297        }
2298
2299        ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2300        return -EINVAL;
2301}
2302
2303static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2304{
2305        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2306        unsigned long timeout;
2307        u32 val;
2308
2309        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2310
2311        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2312
2313        do {
2314                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2315
2316                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2317                           val);
2318
2319                /* target should never return this */
2320                if (val == 0xffffffff)
2321                        continue;
2322
2323                /* the device has crashed so don't bother trying anymore */
2324                if (val & FW_IND_EVENT_PENDING)
2325                        break;
2326
2327                if (val & FW_IND_INITIALIZED)
2328                        break;
2329
2330                if (ar_pci->num_msi_intrs == 0)
2331                        /* Fix potential race by repeating CORE_BASE writes */
2332                        ath10k_pci_enable_legacy_irq(ar);
2333
2334                mdelay(10);
2335        } while (time_before(jiffies, timeout));
2336
2337        ath10k_pci_disable_and_clear_legacy_irq(ar);
2338        ath10k_pci_irq_msi_fw_mask(ar);
2339
2340        if (val == 0xffffffff) {
2341                ath10k_err(ar, "failed to read device register, device is gone\n");
2342                return -EIO;
2343        }
2344
2345        if (val & FW_IND_EVENT_PENDING) {
2346                ath10k_warn(ar, "device has crashed during init\n");
2347                ath10k_pci_fw_crashed_clear(ar);
2348                ath10k_pci_fw_crashed_dump(ar);
2349                return -ECOMM;
2350        }
2351
2352        if (!(val & FW_IND_INITIALIZED)) {
2353                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2354                           val);
2355                return -ETIMEDOUT;
2356        }
2357
2358        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2359        return 0;
2360}
2361
2362static int ath10k_pci_cold_reset(struct ath10k *ar)
2363{
2364        int i;
2365        u32 val;
2366
2367        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2368
2369        spin_lock_bh(&ar->data_lock);
2370
2371        ar->stats.fw_cold_reset_counter++;
2372
2373        spin_unlock_bh(&ar->data_lock);
2374
2375        /* Put Target, including PCIe, into RESET. */
2376        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2377        val |= 1;
2378        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2379
2380        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2381                if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2382                                          RTC_STATE_COLD_RESET_MASK)
2383                        break;
2384                msleep(1);
2385        }
2386
2387        /* Pull Target, including PCIe, out of RESET. */
2388        val &= ~1;
2389        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2390
2391        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2392                if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2393                                            RTC_STATE_COLD_RESET_MASK))
2394                        break;
2395                msleep(1);
2396        }
2397
2398        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2399
2400        return 0;
2401}
2402
2403static int ath10k_pci_claim(struct ath10k *ar)
2404{
2405        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2406        struct pci_dev *pdev = ar_pci->pdev;
2407        u32 lcr_val;
2408        int ret;
2409
2410        pci_set_drvdata(pdev, ar);
2411
2412        ret = pci_enable_device(pdev);
2413        if (ret) {
2414                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2415                return ret;
2416        }
2417
2418        ret = pci_request_region(pdev, BAR_NUM, "ath");
2419        if (ret) {
2420                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2421                           ret);
2422                goto err_device;
2423        }
2424
2425        /* Target expects 32 bit DMA. Enforce it. */
2426        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2427        if (ret) {
2428                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2429                goto err_region;
2430        }
2431
2432        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2433        if (ret) {
2434                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2435                           ret);
2436                goto err_region;
2437        }
2438
2439        pci_set_master(pdev);
2440
2441        /* Workaround: Disable ASPM */
2442        pci_read_config_dword(pdev, 0x80, &lcr_val);
2443        pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2444
2445        /* Arrange for access to Target SoC registers. */
2446        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2447        if (!ar_pci->mem) {
2448                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2449                ret = -EIO;
2450                goto err_master;
2451        }
2452
2453        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2454        return 0;
2455
2456err_master:
2457        pci_clear_master(pdev);
2458
2459err_region:
2460        pci_release_region(pdev, BAR_NUM);
2461
2462err_device:
2463        pci_disable_device(pdev);
2464
2465        return ret;
2466}
2467
2468static void ath10k_pci_release(struct ath10k *ar)
2469{
2470        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2471        struct pci_dev *pdev = ar_pci->pdev;
2472
2473        pci_iounmap(pdev, ar_pci->mem);
2474        pci_release_region(pdev, BAR_NUM);
2475        pci_clear_master(pdev);
2476        pci_disable_device(pdev);
2477}
2478
2479static int ath10k_pci_probe(struct pci_dev *pdev,
2480                            const struct pci_device_id *pci_dev)
2481{
2482        int ret = 0;
2483        struct ath10k *ar;
2484        struct ath10k_pci *ar_pci;
2485        u32 chip_id;
2486
2487        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2488                                ATH10K_BUS_PCI,
2489                                &ath10k_pci_hif_ops);
2490        if (!ar) {
2491                dev_err(&pdev->dev, "failed to allocate core\n");
2492                return -ENOMEM;
2493        }
2494
2495        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2496
2497        ar_pci = ath10k_pci_priv(ar);
2498        ar_pci->pdev = pdev;
2499        ar_pci->dev = &pdev->dev;
2500        ar_pci->ar = ar;
2501
2502        spin_lock_init(&ar_pci->ce_lock);
2503        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2504                    (unsigned long)ar);
2505
2506        ret = ath10k_pci_claim(ar);
2507        if (ret) {
2508                ath10k_err(ar, "failed to claim device: %d\n", ret);
2509                goto err_core_destroy;
2510        }
2511
2512        ret = ath10k_pci_wake(ar);
2513        if (ret) {
2514                ath10k_err(ar, "failed to wake up: %d\n", ret);
2515                goto err_release;
2516        }
2517
2518        chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2519        if (chip_id == 0xffffffff) {
2520                ath10k_err(ar, "failed to get chip id\n");
2521                goto err_sleep;
2522        }
2523
2524        ret = ath10k_pci_alloc_pipes(ar);
2525        if (ret) {
2526                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2527                           ret);
2528                goto err_sleep;
2529        }
2530
2531        ath10k_pci_ce_deinit(ar);
2532        ath10k_pci_irq_disable(ar);
2533
2534        ret = ath10k_pci_init_irq(ar);
2535        if (ret) {
2536                ath10k_err(ar, "failed to init irqs: %d\n", ret);
2537                goto err_free_pipes;
2538        }
2539
2540        ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2541                    ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2542                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2543
2544        ret = ath10k_pci_request_irq(ar);
2545        if (ret) {
2546                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2547                goto err_deinit_irq;
2548        }
2549
2550        ath10k_pci_sleep(ar);
2551
2552        ret = ath10k_core_register(ar, chip_id);
2553        if (ret) {
2554                ath10k_err(ar, "failed to register driver core: %d\n", ret);
2555                goto err_free_irq;
2556        }
2557
2558        return 0;
2559
2560err_free_irq:
2561        ath10k_pci_free_irq(ar);
2562        ath10k_pci_kill_tasklet(ar);
2563
2564err_deinit_irq:
2565        ath10k_pci_deinit_irq(ar);
2566
2567err_free_pipes:
2568        ath10k_pci_free_pipes(ar);
2569
2570err_sleep:
2571        ath10k_pci_sleep(ar);
2572
2573err_release:
2574        ath10k_pci_release(ar);
2575
2576err_core_destroy:
2577        ath10k_core_destroy(ar);
2578
2579        return ret;
2580}
2581
2582static void ath10k_pci_remove(struct pci_dev *pdev)
2583{
2584        struct ath10k *ar = pci_get_drvdata(pdev);
2585        struct ath10k_pci *ar_pci;
2586
2587        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
2588
2589        if (!ar)
2590                return;
2591
2592        ar_pci = ath10k_pci_priv(ar);
2593
2594        if (!ar_pci)
2595                return;
2596
2597        ath10k_core_unregister(ar);
2598        ath10k_pci_free_irq(ar);
2599        ath10k_pci_kill_tasklet(ar);
2600        ath10k_pci_deinit_irq(ar);
2601        ath10k_pci_ce_deinit(ar);
2602        ath10k_pci_free_pipes(ar);
2603        ath10k_pci_release(ar);
2604        ath10k_core_destroy(ar);
2605}
2606
2607MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2608
2609static struct pci_driver ath10k_pci_driver = {
2610        .name = "ath10k_pci",
2611        .id_table = ath10k_pci_id_table,
2612        .probe = ath10k_pci_probe,
2613        .remove = ath10k_pci_remove,
2614};
2615
2616static int __init ath10k_pci_init(void)
2617{
2618        int ret;
2619
2620        ret = pci_register_driver(&ath10k_pci_driver);
2621        if (ret)
2622                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2623                       ret);
2624
2625        return ret;
2626}
2627module_init(ath10k_pci_init);
2628
2629static void __exit ath10k_pci_exit(void)
2630{
2631        pci_unregister_driver(&ath10k_pci_driver);
2632}
2633
2634module_exit(ath10k_pci_exit);
2635
2636MODULE_AUTHOR("Qualcomm Atheros");
2637MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2638MODULE_LICENSE("Dual BSD/GPL");
2639MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2640MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
2641MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
2642MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2643
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.