linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_reset_mode {
  37        ATH10K_PCI_RESET_AUTO = 0,
  38        ATH10K_PCI_RESET_WARM_ONLY = 1,
  39};
  40
  41static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  42static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  43
  44module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  45MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  46
  47module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  48MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  49
  50/* how long wait to wait for target to initialise, in ms */
  51#define ATH10K_PCI_TARGET_WAIT 3000
  52#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  53
  54static const struct pci_device_id ath10k_pci_id_table[] = {
  55        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  56        { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  57        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  58        { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  59        { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
  60        { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
  61        { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  62        { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
  63        {0}
  64};
  65
  66static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  67        /* QCA988X pre 2.0 chips are not supported because they need some nasty
  68         * hacks. ath10k doesn't have them and these devices crash horribly
  69         * because of that.
  70         */
  71        { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  72
  73        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  74        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  75        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  76        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  77        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  78
  79        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  80        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  81        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  82        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  83        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  84
  85        { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
  86
  87        { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
  88
  89        { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
  90
  91        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
  92        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
  93
  94        { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
  95};
  96
  97static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  98static int ath10k_pci_cold_reset(struct ath10k *ar);
  99static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 100static int ath10k_pci_init_irq(struct ath10k *ar);
 101static int ath10k_pci_deinit_irq(struct ath10k *ar);
 102static int ath10k_pci_request_irq(struct ath10k *ar);
 103static void ath10k_pci_free_irq(struct ath10k *ar);
 104static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
 105                               struct ath10k_ce_pipe *rx_pipe,
 106                               struct bmi_xfer *xfer);
 107static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 108static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 109static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 110static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 111static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 112static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 113static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 114
 115static struct ce_attr host_ce_config_wlan[] = {
 116        /* CE0: host->target HTC control and raw streams */
 117        {
 118                .flags = CE_ATTR_FLAGS,
 119                .src_nentries = 16,
 120                .src_sz_max = 256,
 121                .dest_nentries = 0,
 122                .send_cb = ath10k_pci_htc_tx_cb,
 123        },
 124
 125        /* CE1: target->host HTT + HTC control */
 126        {
 127                .flags = CE_ATTR_FLAGS,
 128                .src_nentries = 0,
 129                .src_sz_max = 2048,
 130                .dest_nentries = 512,
 131                .recv_cb = ath10k_pci_htt_htc_rx_cb,
 132        },
 133
 134        /* CE2: target->host WMI */
 135        {
 136                .flags = CE_ATTR_FLAGS,
 137                .src_nentries = 0,
 138                .src_sz_max = 2048,
 139                .dest_nentries = 128,
 140                .recv_cb = ath10k_pci_htc_rx_cb,
 141        },
 142
 143        /* CE3: host->target WMI */
 144        {
 145                .flags = CE_ATTR_FLAGS,
 146                .src_nentries = 32,
 147                .src_sz_max = 2048,
 148                .dest_nentries = 0,
 149                .send_cb = ath10k_pci_htc_tx_cb,
 150        },
 151
 152        /* CE4: host->target HTT */
 153        {
 154                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 155                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 156                .src_sz_max = 256,
 157                .dest_nentries = 0,
 158                .send_cb = ath10k_pci_htt_tx_cb,
 159        },
 160
 161        /* CE5: target->host HTT (HIF->HTT) */
 162        {
 163                .flags = CE_ATTR_FLAGS,
 164                .src_nentries = 0,
 165                .src_sz_max = 512,
 166                .dest_nentries = 512,
 167                .recv_cb = ath10k_pci_htt_rx_cb,
 168        },
 169
 170        /* CE6: target autonomous hif_memcpy */
 171        {
 172                .flags = CE_ATTR_FLAGS,
 173                .src_nentries = 0,
 174                .src_sz_max = 0,
 175                .dest_nentries = 0,
 176        },
 177
 178        /* CE7: ce_diag, the Diagnostic Window */
 179        {
 180                .flags = CE_ATTR_FLAGS,
 181                .src_nentries = 2,
 182                .src_sz_max = DIAG_TRANSFER_LIMIT,
 183                .dest_nentries = 2,
 184        },
 185
 186        /* CE8: target->host pktlog */
 187        {
 188                .flags = CE_ATTR_FLAGS,
 189                .src_nentries = 0,
 190                .src_sz_max = 2048,
 191                .dest_nentries = 128,
 192                .recv_cb = ath10k_pci_pktlog_rx_cb,
 193        },
 194
 195        /* CE9 target autonomous qcache memcpy */
 196        {
 197                .flags = CE_ATTR_FLAGS,
 198                .src_nentries = 0,
 199                .src_sz_max = 0,
 200                .dest_nentries = 0,
 201        },
 202
 203        /* CE10: target autonomous hif memcpy */
 204        {
 205                .flags = CE_ATTR_FLAGS,
 206                .src_nentries = 0,
 207                .src_sz_max = 0,
 208                .dest_nentries = 0,
 209        },
 210
 211        /* CE11: target autonomous hif memcpy */
 212        {
 213                .flags = CE_ATTR_FLAGS,
 214                .src_nentries = 0,
 215                .src_sz_max = 0,
 216                .dest_nentries = 0,
 217        },
 218};
 219
 220/* Target firmware's Copy Engine configuration. */
 221static struct ce_pipe_config target_ce_config_wlan[] = {
 222        /* CE0: host->target HTC control and raw streams */
 223        {
 224                .pipenum = __cpu_to_le32(0),
 225                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 226                .nentries = __cpu_to_le32(32),
 227                .nbytes_max = __cpu_to_le32(256),
 228                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 229                .reserved = __cpu_to_le32(0),
 230        },
 231
 232        /* CE1: target->host HTT + HTC control */
 233        {
 234                .pipenum = __cpu_to_le32(1),
 235                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 236                .nentries = __cpu_to_le32(32),
 237                .nbytes_max = __cpu_to_le32(2048),
 238                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 239                .reserved = __cpu_to_le32(0),
 240        },
 241
 242        /* CE2: target->host WMI */
 243        {
 244                .pipenum = __cpu_to_le32(2),
 245                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 246                .nentries = __cpu_to_le32(64),
 247                .nbytes_max = __cpu_to_le32(2048),
 248                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 249                .reserved = __cpu_to_le32(0),
 250        },
 251
 252        /* CE3: host->target WMI */
 253        {
 254                .pipenum = __cpu_to_le32(3),
 255                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 256                .nentries = __cpu_to_le32(32),
 257                .nbytes_max = __cpu_to_le32(2048),
 258                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 259                .reserved = __cpu_to_le32(0),
 260        },
 261
 262        /* CE4: host->target HTT */
 263        {
 264                .pipenum = __cpu_to_le32(4),
 265                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 266                .nentries = __cpu_to_le32(256),
 267                .nbytes_max = __cpu_to_le32(256),
 268                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 269                .reserved = __cpu_to_le32(0),
 270        },
 271
 272        /* NB: 50% of src nentries, since tx has 2 frags */
 273
 274        /* CE5: target->host HTT (HIF->HTT) */
 275        {
 276                .pipenum = __cpu_to_le32(5),
 277                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 278                .nentries = __cpu_to_le32(32),
 279                .nbytes_max = __cpu_to_le32(512),
 280                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 281                .reserved = __cpu_to_le32(0),
 282        },
 283
 284        /* CE6: Reserved for target autonomous hif_memcpy */
 285        {
 286                .pipenum = __cpu_to_le32(6),
 287                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 288                .nentries = __cpu_to_le32(32),
 289                .nbytes_max = __cpu_to_le32(4096),
 290                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 291                .reserved = __cpu_to_le32(0),
 292        },
 293
 294        /* CE7 used only by Host */
 295        {
 296                .pipenum = __cpu_to_le32(7),
 297                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 298                .nentries = __cpu_to_le32(0),
 299                .nbytes_max = __cpu_to_le32(0),
 300                .flags = __cpu_to_le32(0),
 301                .reserved = __cpu_to_le32(0),
 302        },
 303
 304        /* CE8 target->host packtlog */
 305        {
 306                .pipenum = __cpu_to_le32(8),
 307                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 308                .nentries = __cpu_to_le32(64),
 309                .nbytes_max = __cpu_to_le32(2048),
 310                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 311                .reserved = __cpu_to_le32(0),
 312        },
 313
 314        /* CE9 target autonomous qcache memcpy */
 315        {
 316                .pipenum = __cpu_to_le32(9),
 317                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 318                .nentries = __cpu_to_le32(32),
 319                .nbytes_max = __cpu_to_le32(2048),
 320                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 321                .reserved = __cpu_to_le32(0),
 322        },
 323
 324        /* It not necessary to send target wlan configuration for CE10 & CE11
 325         * as these CEs are not actively used in target.
 326         */
 327};
 328
 329/*
 330 * Map from service/endpoint to Copy Engine.
 331 * This table is derived from the CE_PCI TABLE, above.
 332 * It is passed to the Target at startup for use by firmware.
 333 */
 334static struct service_to_pipe target_service_to_ce_map_wlan[] = {
 335        {
 336                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 337                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 338                __cpu_to_le32(3),
 339        },
 340        {
 341                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 342                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 343                __cpu_to_le32(2),
 344        },
 345        {
 346                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 347                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 348                __cpu_to_le32(3),
 349        },
 350        {
 351                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 352                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 353                __cpu_to_le32(2),
 354        },
 355        {
 356                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 357                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 358                __cpu_to_le32(3),
 359        },
 360        {
 361                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 362                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 363                __cpu_to_le32(2),
 364        },
 365        {
 366                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 367                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 368                __cpu_to_le32(3),
 369        },
 370        {
 371                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 372                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 373                __cpu_to_le32(2),
 374        },
 375        {
 376                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 377                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 378                __cpu_to_le32(3),
 379        },
 380        {
 381                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 382                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 383                __cpu_to_le32(2),
 384        },
 385        {
 386                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 387                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 388                __cpu_to_le32(0),
 389        },
 390        {
 391                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 392                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 393                __cpu_to_le32(1),
 394        },
 395        { /* not used */
 396                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 397                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 398                __cpu_to_le32(0),
 399        },
 400        { /* not used */
 401                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 402                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 403                __cpu_to_le32(1),
 404        },
 405        {
 406                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 407                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 408                __cpu_to_le32(4),
 409        },
 410        {
 411                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 412                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 413                __cpu_to_le32(5),
 414        },
 415
 416        /* (Additions here) */
 417
 418        { /* must be last */
 419                __cpu_to_le32(0),
 420                __cpu_to_le32(0),
 421                __cpu_to_le32(0),
 422        },
 423};
 424
 425static bool ath10k_pci_is_awake(struct ath10k *ar)
 426{
 427        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 428        u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 429                           RTC_STATE_ADDRESS);
 430
 431        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 432}
 433
 434static void __ath10k_pci_wake(struct ath10k *ar)
 435{
 436        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 437
 438        lockdep_assert_held(&ar_pci->ps_lock);
 439
 440        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 441                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 442
 443        iowrite32(PCIE_SOC_WAKE_V_MASK,
 444                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 445                  PCIE_SOC_WAKE_ADDRESS);
 446}
 447
 448static void __ath10k_pci_sleep(struct ath10k *ar)
 449{
 450        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 451
 452        lockdep_assert_held(&ar_pci->ps_lock);
 453
 454        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 455                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 456
 457        iowrite32(PCIE_SOC_WAKE_RESET,
 458                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 459                  PCIE_SOC_WAKE_ADDRESS);
 460        ar_pci->ps_awake = false;
 461}
 462
 463static int ath10k_pci_wake_wait(struct ath10k *ar)
 464{
 465        int tot_delay = 0;
 466        int curr_delay = 5;
 467
 468        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 469                if (ath10k_pci_is_awake(ar)) {
 470                        if (tot_delay > PCIE_WAKE_LATE_US)
 471                                ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
 472                                            tot_delay / 1000);
 473                        return 0;
 474                }
 475
 476                udelay(curr_delay);
 477                tot_delay += curr_delay;
 478
 479                if (curr_delay < 50)
 480                        curr_delay += 5;
 481        }
 482
 483        return -ETIMEDOUT;
 484}
 485
 486static int ath10k_pci_force_wake(struct ath10k *ar)
 487{
 488        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 489        unsigned long flags;
 490        int ret = 0;
 491
 492        if (ar_pci->pci_ps)
 493                return ret;
 494
 495        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 496
 497        if (!ar_pci->ps_awake) {
 498                iowrite32(PCIE_SOC_WAKE_V_MASK,
 499                          ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 500                          PCIE_SOC_WAKE_ADDRESS);
 501
 502                ret = ath10k_pci_wake_wait(ar);
 503                if (ret == 0)
 504                        ar_pci->ps_awake = true;
 505        }
 506
 507        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 508
 509        return ret;
 510}
 511
 512static void ath10k_pci_force_sleep(struct ath10k *ar)
 513{
 514        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 515        unsigned long flags;
 516
 517        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 518
 519        iowrite32(PCIE_SOC_WAKE_RESET,
 520                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 521                  PCIE_SOC_WAKE_ADDRESS);
 522        ar_pci->ps_awake = false;
 523
 524        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 525}
 526
 527static int ath10k_pci_wake(struct ath10k *ar)
 528{
 529        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 530        unsigned long flags;
 531        int ret = 0;
 532
 533        if (ar_pci->pci_ps == 0)
 534                return ret;
 535
 536        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 537
 538        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 539                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 540
 541        /* This function can be called very frequently. To avoid excessive
 542         * CPU stalls for MMIO reads use a cache var to hold the device state.
 543         */
 544        if (!ar_pci->ps_awake) {
 545                __ath10k_pci_wake(ar);
 546
 547                ret = ath10k_pci_wake_wait(ar);
 548                if (ret == 0)
 549                        ar_pci->ps_awake = true;
 550        }
 551
 552        if (ret == 0) {
 553                ar_pci->ps_wake_refcount++;
 554                WARN_ON(ar_pci->ps_wake_refcount == 0);
 555        }
 556
 557        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 558
 559        return ret;
 560}
 561
 562static void ath10k_pci_sleep(struct ath10k *ar)
 563{
 564        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 565        unsigned long flags;
 566
 567        if (ar_pci->pci_ps == 0)
 568                return;
 569
 570        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 571
 572        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 573                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 574
 575        if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 576                goto skip;
 577
 578        ar_pci->ps_wake_refcount--;
 579
 580        mod_timer(&ar_pci->ps_timer, jiffies +
 581                  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 582
 583skip:
 584        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 585}
 586
 587static void ath10k_pci_ps_timer(unsigned long ptr)
 588{
 589        struct ath10k *ar = (void *)ptr;
 590        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 591        unsigned long flags;
 592
 593        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 594
 595        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 596                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 597
 598        if (ar_pci->ps_wake_refcount > 0)
 599                goto skip;
 600
 601        __ath10k_pci_sleep(ar);
 602
 603skip:
 604        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 605}
 606
 607static void ath10k_pci_sleep_sync(struct ath10k *ar)
 608{
 609        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 610        unsigned long flags;
 611
 612        if (ar_pci->pci_ps == 0) {
 613                ath10k_pci_force_sleep(ar);
 614                return;
 615        }
 616
 617        del_timer_sync(&ar_pci->ps_timer);
 618
 619        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 620        WARN_ON(ar_pci->ps_wake_refcount > 0);
 621        __ath10k_pci_sleep(ar);
 622        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 623}
 624
 625static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 626{
 627        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 628        int ret;
 629
 630        if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 631                ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 632                            offset, offset + sizeof(value), ar_pci->mem_len);
 633                return;
 634        }
 635
 636        ret = ath10k_pci_wake(ar);
 637        if (ret) {
 638                ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 639                            value, offset, ret);
 640                return;
 641        }
 642
 643        iowrite32(value, ar_pci->mem + offset);
 644        ath10k_pci_sleep(ar);
 645}
 646
 647static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 648{
 649        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 650        u32 val;
 651        int ret;
 652
 653        if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 654                ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 655                            offset, offset + sizeof(val), ar_pci->mem_len);
 656                return 0;
 657        }
 658
 659        ret = ath10k_pci_wake(ar);
 660        if (ret) {
 661                ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 662                            offset, ret);
 663                return 0xffffffff;
 664        }
 665
 666        val = ioread32(ar_pci->mem + offset);
 667        ath10k_pci_sleep(ar);
 668
 669        return val;
 670}
 671
 672inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 673{
 674        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 675
 676        ar_pci->bus_ops->write32(ar, offset, value);
 677}
 678
 679inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 680{
 681        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 682
 683        return ar_pci->bus_ops->read32(ar, offset);
 684}
 685
 686u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 687{
 688        return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 689}
 690
 691void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 692{
 693        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 694}
 695
 696u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 697{
 698        return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 699}
 700
 701void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 702{
 703        ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 704}
 705
 706bool ath10k_pci_irq_pending(struct ath10k *ar)
 707{
 708        u32 cause;
 709
 710        /* Check if the shared legacy irq is for us */
 711        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 712                                  PCIE_INTR_CAUSE_ADDRESS);
 713        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 714                return true;
 715
 716        return false;
 717}
 718
 719void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 720{
 721        /* IMPORTANT: INTR_CLR register has to be set after
 722         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 723         * really cleared. */
 724        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 725                           0);
 726        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 727                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 728
 729        /* IMPORTANT: this extra read transaction is required to
 730         * flush the posted write buffer. */
 731        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 732                                PCIE_INTR_ENABLE_ADDRESS);
 733}
 734
 735void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 736{
 737        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 738                           PCIE_INTR_ENABLE_ADDRESS,
 739                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 740
 741        /* IMPORTANT: this extra read transaction is required to
 742         * flush the posted write buffer. */
 743        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 744                                PCIE_INTR_ENABLE_ADDRESS);
 745}
 746
 747static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 748{
 749        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 750
 751        if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 752                return "msi";
 753
 754        return "legacy";
 755}
 756
 757static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 758{
 759        struct ath10k *ar = pipe->hif_ce_state;
 760        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 761        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 762        struct sk_buff *skb;
 763        dma_addr_t paddr;
 764        int ret;
 765
 766        skb = dev_alloc_skb(pipe->buf_sz);
 767        if (!skb)
 768                return -ENOMEM;
 769
 770        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 771
 772        paddr = dma_map_single(ar->dev, skb->data,
 773                               skb->len + skb_tailroom(skb),
 774                               DMA_FROM_DEVICE);
 775        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 776                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 777                dev_kfree_skb_any(skb);
 778                return -EIO;
 779        }
 780
 781        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 782
 783        spin_lock_bh(&ar_pci->ce_lock);
 784        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 785        spin_unlock_bh(&ar_pci->ce_lock);
 786        if (ret) {
 787                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 788                                 DMA_FROM_DEVICE);
 789                dev_kfree_skb_any(skb);
 790                return ret;
 791        }
 792
 793        return 0;
 794}
 795
 796static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 797{
 798        struct ath10k *ar = pipe->hif_ce_state;
 799        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 800        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 801        int ret, num;
 802
 803        if (pipe->buf_sz == 0)
 804                return;
 805
 806        if (!ce_pipe->dest_ring)
 807                return;
 808
 809        spin_lock_bh(&ar_pci->ce_lock);
 810        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 811        spin_unlock_bh(&ar_pci->ce_lock);
 812
 813        while (num >= 0) {
 814                ret = __ath10k_pci_rx_post_buf(pipe);
 815                if (ret) {
 816                        if (ret == -ENOSPC)
 817                                break;
 818                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 819                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 820                                  ATH10K_PCI_RX_POST_RETRY_MS);
 821                        break;
 822                }
 823                num--;
 824        }
 825}
 826
 827void ath10k_pci_rx_post(struct ath10k *ar)
 828{
 829        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 830        int i;
 831
 832        for (i = 0; i < CE_COUNT; i++)
 833                ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 834}
 835
 836void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 837{
 838        struct ath10k *ar = (void *)ptr;
 839
 840        ath10k_pci_rx_post(ar);
 841}
 842
 843static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 844{
 845        u32 val = 0;
 846
 847        switch (ar->hw_rev) {
 848        case ATH10K_HW_QCA988X:
 849        case ATH10K_HW_QCA9887:
 850        case ATH10K_HW_QCA6174:
 851        case ATH10K_HW_QCA9377:
 852                val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 853                                          CORE_CTRL_ADDRESS) &
 854                       0x7ff) << 21;
 855                break;
 856        case ATH10K_HW_QCA9888:
 857        case ATH10K_HW_QCA99X0:
 858        case ATH10K_HW_QCA9984:
 859        case ATH10K_HW_QCA4019:
 860                val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 861                break;
 862        }
 863
 864        val |= 0x100000 | (addr & 0xfffff);
 865        return val;
 866}
 867
 868/*
 869 * Diagnostic read/write access is provided for startup/config/debug usage.
 870 * Caller must guarantee proper alignment, when applicable, and single user
 871 * at any moment.
 872 */
 873static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 874                                    int nbytes)
 875{
 876        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 877        int ret = 0;
 878        u32 *buf;
 879        unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
 880        struct ath10k_ce_pipe *ce_diag;
 881        /* Host buffer address in CE space */
 882        u32 ce_data;
 883        dma_addr_t ce_data_base = 0;
 884        void *data_buf = NULL;
 885        int i;
 886
 887        spin_lock_bh(&ar_pci->ce_lock);
 888
 889        ce_diag = ar_pci->ce_diag;
 890
 891        /*
 892         * Allocate a temporary bounce buffer to hold caller's data
 893         * to be DMA'ed from Target. This guarantees
 894         *   1) 4-byte alignment
 895         *   2) Buffer in DMA-able space
 896         */
 897        alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
 898
 899        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 900                                                       alloc_nbytes,
 901                                                       &ce_data_base,
 902                                                       GFP_ATOMIC);
 903
 904        if (!data_buf) {
 905                ret = -ENOMEM;
 906                goto done;
 907        }
 908        memset(data_buf, 0, alloc_nbytes);
 909
 910        remaining_bytes = nbytes;
 911        ce_data = ce_data_base;
 912        while (remaining_bytes) {
 913                nbytes = min_t(unsigned int, remaining_bytes,
 914                               DIAG_TRANSFER_LIMIT);
 915
 916                ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 917                if (ret != 0)
 918                        goto done;
 919
 920                /* Request CE to send from Target(!) address to Host buffer */
 921                /*
 922                 * The address supplied by the caller is in the
 923                 * Target CPU virtual address space.
 924                 *
 925                 * In order to use this address with the diagnostic CE,
 926                 * convert it from Target CPU virtual address space
 927                 * to CE address space
 928                 */
 929                address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 930
 931                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 932                                            0);
 933                if (ret)
 934                        goto done;
 935
 936                i = 0;
 937                while (ath10k_ce_completed_send_next_nolock(ce_diag,
 938                                                            NULL) != 0) {
 939                        mdelay(1);
 940                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 941                                ret = -EBUSY;
 942                                goto done;
 943                        }
 944                }
 945
 946                i = 0;
 947                while (ath10k_ce_completed_recv_next_nolock(ce_diag,
 948                                                            (void **)&buf,
 949                                                            &completed_nbytes)
 950                                                                != 0) {
 951                        mdelay(1);
 952
 953                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 954                                ret = -EBUSY;
 955                                goto done;
 956                        }
 957                }
 958
 959                if (nbytes != completed_nbytes) {
 960                        ret = -EIO;
 961                        goto done;
 962                }
 963
 964                if (*buf != ce_data) {
 965                        ret = -EIO;
 966                        goto done;
 967                }
 968
 969                remaining_bytes -= nbytes;
 970
 971                if (ret) {
 972                        ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 973                                    address, ret);
 974                        break;
 975                }
 976                memcpy(data, data_buf, nbytes);
 977
 978                address += nbytes;
 979                data += nbytes;
 980        }
 981
 982done:
 983
 984        if (data_buf)
 985                dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
 986                                  ce_data_base);
 987
 988        spin_unlock_bh(&ar_pci->ce_lock);
 989
 990        return ret;
 991}
 992
 993static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 994{
 995        __le32 val = 0;
 996        int ret;
 997
 998        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 999        *value = __le32_to_cpu(val);
1000
1001        return ret;
1002}
1003
1004static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
1005                                     u32 src, u32 len)
1006{
1007        u32 host_addr, addr;
1008        int ret;
1009
1010        host_addr = host_interest_item_address(src);
1011
1012        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
1013        if (ret != 0) {
1014                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
1015                            src, ret);
1016                return ret;
1017        }
1018
1019        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1020        if (ret != 0) {
1021                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1022                            addr, len, ret);
1023                return ret;
1024        }
1025
1026        return 0;
1027}
1028
1029#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
1030        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1031
1032int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1033                              const void *data, int nbytes)
1034{
1035        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1036        int ret = 0;
1037        u32 *buf;
1038        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1039        struct ath10k_ce_pipe *ce_diag;
1040        void *data_buf = NULL;
1041        u32 ce_data;    /* Host buffer address in CE space */
1042        dma_addr_t ce_data_base = 0;
1043        int i;
1044
1045        spin_lock_bh(&ar_pci->ce_lock);
1046
1047        ce_diag = ar_pci->ce_diag;
1048
1049        /*
1050         * Allocate a temporary bounce buffer to hold caller's data
1051         * to be DMA'ed to Target. This guarantees
1052         *   1) 4-byte alignment
1053         *   2) Buffer in DMA-able space
1054         */
1055        orig_nbytes = nbytes;
1056        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1057                                                       orig_nbytes,
1058                                                       &ce_data_base,
1059                                                       GFP_ATOMIC);
1060        if (!data_buf) {
1061                ret = -ENOMEM;
1062                goto done;
1063        }
1064
1065        /* Copy caller's data to allocated DMA buf */
1066        memcpy(data_buf, data, orig_nbytes);
1067
1068        /*
1069         * The address supplied by the caller is in the
1070         * Target CPU virtual address space.
1071         *
1072         * In order to use this address with the diagnostic CE,
1073         * convert it from
1074         *    Target CPU virtual address space
1075         * to
1076         *    CE address space
1077         */
1078        address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1079
1080        remaining_bytes = orig_nbytes;
1081        ce_data = ce_data_base;
1082        while (remaining_bytes) {
1083                /* FIXME: check cast */
1084                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1085
1086                /* Set up to receive directly into Target(!) address */
1087                ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
1088                if (ret != 0)
1089                        goto done;
1090
1091                /*
1092                 * Request CE to send caller-supplied data that
1093                 * was copied to bounce buffer to Target(!) address.
1094                 */
1095                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1096                                            nbytes, 0, 0);
1097                if (ret != 0)
1098                        goto done;
1099
1100                i = 0;
1101                while (ath10k_ce_completed_send_next_nolock(ce_diag,
1102                                                            NULL) != 0) {
1103                        mdelay(1);
1104
1105                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1106                                ret = -EBUSY;
1107                                goto done;
1108                        }
1109                }
1110
1111                i = 0;
1112                while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1113                                                            (void **)&buf,
1114                                                            &completed_nbytes)
1115                                                                != 0) {
1116                        mdelay(1);
1117
1118                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1119                                ret = -EBUSY;
1120                                goto done;
1121                        }
1122                }
1123
1124                if (nbytes != completed_nbytes) {
1125                        ret = -EIO;
1126                        goto done;
1127                }
1128
1129                if (*buf != address) {
1130                        ret = -EIO;
1131                        goto done;
1132                }
1133
1134                remaining_bytes -= nbytes;
1135                address += nbytes;
1136                ce_data += nbytes;
1137        }
1138
1139done:
1140        if (data_buf) {
1141                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1142                                  ce_data_base);
1143        }
1144
1145        if (ret != 0)
1146                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1147                            address, ret);
1148
1149        spin_unlock_bh(&ar_pci->ce_lock);
1150
1151        return ret;
1152}
1153
1154static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1155{
1156        __le32 val = __cpu_to_le32(value);
1157
1158        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1159}
1160
1161/* Called by lower (CE) layer when a send to Target completes. */
1162static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1163{
1164        struct ath10k *ar = ce_state->ar;
1165        struct sk_buff_head list;
1166        struct sk_buff *skb;
1167
1168        __skb_queue_head_init(&list);
1169        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1170                /* no need to call tx completion for NULL pointers */
1171                if (skb == NULL)
1172                        continue;
1173
1174                __skb_queue_tail(&list, skb);
1175        }
1176
1177        while ((skb = __skb_dequeue(&list)))
1178                ath10k_htc_tx_completion_handler(ar, skb);
1179}
1180
1181static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1182                                     void (*callback)(struct ath10k *ar,
1183                                                      struct sk_buff *skb))
1184{
1185        struct ath10k *ar = ce_state->ar;
1186        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1187        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1188        struct sk_buff *skb;
1189        struct sk_buff_head list;
1190        void *transfer_context;
1191        unsigned int nbytes, max_nbytes;
1192
1193        __skb_queue_head_init(&list);
1194        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1195                                             &nbytes) == 0) {
1196                skb = transfer_context;
1197                max_nbytes = skb->len + skb_tailroom(skb);
1198                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1199                                 max_nbytes, DMA_FROM_DEVICE);
1200
1201                if (unlikely(max_nbytes < nbytes)) {
1202                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1203                                    nbytes, max_nbytes);
1204                        dev_kfree_skb_any(skb);
1205                        continue;
1206                }
1207
1208                skb_put(skb, nbytes);
1209                __skb_queue_tail(&list, skb);
1210        }
1211
1212        while ((skb = __skb_dequeue(&list))) {
1213                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1214                           ce_state->id, skb->len);
1215                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1216                                skb->data, skb->len);
1217
1218                callback(ar, skb);
1219        }
1220
1221        ath10k_pci_rx_post_pipe(pipe_info);
1222}
1223
1224static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1225                                         void (*callback)(struct ath10k *ar,
1226                                                          struct sk_buff *skb))
1227{
1228        struct ath10k *ar = ce_state->ar;
1229        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1230        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1231        struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1232        struct sk_buff *skb;
1233        struct sk_buff_head list;
1234        void *transfer_context;
1235        unsigned int nbytes, max_nbytes, nentries;
1236        int orig_len;
1237
1238        /* No need to aquire ce_lock for CE5, since this is the only place CE5
1239         * is processed other than init and deinit. Before releasing CE5
1240         * buffers, interrupts are disabled. Thus CE5 access is serialized.
1241         */
1242        __skb_queue_head_init(&list);
1243        while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1244                                                    &nbytes) == 0) {
1245                skb = transfer_context;
1246                max_nbytes = skb->len + skb_tailroom(skb);
1247
1248                if (unlikely(max_nbytes < nbytes)) {
1249                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1250                                    nbytes, max_nbytes);
1251                        continue;
1252                }
1253
1254                dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1255                                        max_nbytes, DMA_FROM_DEVICE);
1256                skb_put(skb, nbytes);
1257                __skb_queue_tail(&list, skb);
1258        }
1259
1260        nentries = skb_queue_len(&list);
1261        while ((skb = __skb_dequeue(&list))) {
1262                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1263                           ce_state->id, skb->len);
1264                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1265                                skb->data, skb->len);
1266
1267                orig_len = skb->len;
1268                callback(ar, skb);
1269                skb_push(skb, orig_len - skb->len);
1270                skb_reset_tail_pointer(skb);
1271                skb_trim(skb, 0);
1272
1273                /*let device gain the buffer again*/
1274                dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1275                                           skb->len + skb_tailroom(skb),
1276                                           DMA_FROM_DEVICE);
1277        }
1278        ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1279}
1280
1281/* Called by lower (CE) layer when data is received from the Target. */
1282static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1283{
1284        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1285}
1286
1287static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1288{
1289        /* CE4 polling needs to be done whenever CE pipe which transports
1290         * HTT Rx (target->host) is processed.
1291         */
1292        ath10k_ce_per_engine_service(ce_state->ar, 4);
1293
1294        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1295}
1296
1297/* Called by lower (CE) layer when data is received from the Target.
1298 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1299 */
1300static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1301{
1302        ath10k_pci_process_rx_cb(ce_state,
1303                                 ath10k_htt_rx_pktlog_completion_handler);
1304}
1305
1306/* Called by lower (CE) layer when a send to HTT Target completes. */
1307static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1308{
1309        struct ath10k *ar = ce_state->ar;
1310        struct sk_buff *skb;
1311
1312        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1313                /* no need to call tx completion for NULL pointers */
1314                if (!skb)
1315                        continue;
1316
1317                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1318                                 skb->len, DMA_TO_DEVICE);
1319                ath10k_htt_hif_tx_complete(ar, skb);
1320        }
1321}
1322
1323static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1324{
1325        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1326        ath10k_htt_t2h_msg_handler(ar, skb);
1327}
1328
1329/* Called by lower (CE) layer when HTT data is received from the Target. */
1330static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1331{
1332        /* CE4 polling needs to be done whenever CE pipe which transports
1333         * HTT Rx (target->host) is processed.
1334         */
1335        ath10k_ce_per_engine_service(ce_state->ar, 4);
1336
1337        ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1338}
1339
1340int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1341                         struct ath10k_hif_sg_item *items, int n_items)
1342{
1343        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1344        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1345        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1346        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1347        unsigned int nentries_mask;
1348        unsigned int sw_index;
1349        unsigned int write_index;
1350        int err, i = 0;
1351
1352        spin_lock_bh(&ar_pci->ce_lock);
1353
1354        nentries_mask = src_ring->nentries_mask;
1355        sw_index = src_ring->sw_index;
1356        write_index = src_ring->write_index;
1357
1358        if (unlikely(CE_RING_DELTA(nentries_mask,
1359                                   write_index, sw_index - 1) < n_items)) {
1360                err = -ENOBUFS;
1361                goto err;
1362        }
1363
1364        for (i = 0; i < n_items - 1; i++) {
1365                ath10k_dbg(ar, ATH10K_DBG_PCI,
1366                           "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1367                           i, items[i].paddr, items[i].len, n_items);
1368                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1369                                items[i].vaddr, items[i].len);
1370
1371                err = ath10k_ce_send_nolock(ce_pipe,
1372                                            items[i].transfer_context,
1373                                            items[i].paddr,
1374                                            items[i].len,
1375                                            items[i].transfer_id,
1376                                            CE_SEND_FLAG_GATHER);
1377                if (err)
1378                        goto err;
1379        }
1380
1381        /* `i` is equal to `n_items -1` after for() */
1382
1383        ath10k_dbg(ar, ATH10K_DBG_PCI,
1384                   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1385                   i, items[i].paddr, items[i].len, n_items);
1386        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1387                        items[i].vaddr, items[i].len);
1388
1389        err = ath10k_ce_send_nolock(ce_pipe,
1390                                    items[i].transfer_context,
1391                                    items[i].paddr,
1392                                    items[i].len,
1393                                    items[i].transfer_id,
1394                                    0);
1395        if (err)
1396                goto err;
1397
1398        spin_unlock_bh(&ar_pci->ce_lock);
1399        return 0;
1400
1401err:
1402        for (; i > 0; i--)
1403                __ath10k_ce_send_revert(ce_pipe);
1404
1405        spin_unlock_bh(&ar_pci->ce_lock);
1406        return err;
1407}
1408
1409int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1410                             size_t buf_len)
1411{
1412        return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1413}
1414
1415u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1416{
1417        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1418
1419        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1420
1421        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1422}
1423
1424static void ath10k_pci_dump_registers(struct ath10k *ar,
1425                                      struct ath10k_fw_crash_data *crash_data)
1426{
1427        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1428        int i, ret;
1429
1430        lockdep_assert_held(&ar->data_lock);
1431
1432        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1433                                      hi_failure_state,
1434                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1435        if (ret) {
1436                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1437                return;
1438        }
1439
1440        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1441
1442        ath10k_err(ar, "firmware register dump:\n");
1443        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1444                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1445                           i,
1446                           __le32_to_cpu(reg_dump_values[i]),
1447                           __le32_to_cpu(reg_dump_values[i + 1]),
1448                           __le32_to_cpu(reg_dump_values[i + 2]),
1449                           __le32_to_cpu(reg_dump_values[i + 3]));
1450
1451        if (!crash_data)
1452                return;
1453
1454        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1455                crash_data->registers[i] = reg_dump_values[i];
1456}
1457
1458static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1459{
1460        struct ath10k_fw_crash_data *crash_data;
1461        char uuid[50];
1462
1463        spin_lock_bh(&ar->data_lock);
1464
1465        ar->stats.fw_crash_counter++;
1466
1467        crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1468
1469        if (crash_data)
1470                scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1471        else
1472                scnprintf(uuid, sizeof(uuid), "n/a");
1473
1474        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1475        ath10k_print_driver_info(ar);
1476        ath10k_pci_dump_registers(ar, crash_data);
1477
1478        spin_unlock_bh(&ar->data_lock);
1479
1480        queue_work(ar->workqueue, &ar->restart_work);
1481}
1482
1483void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1484                                        int force)
1485{
1486        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1487
1488        if (!force) {
1489                int resources;
1490                /*
1491                 * Decide whether to actually poll for completions, or just
1492                 * wait for a later chance.
1493                 * If there seem to be plenty of resources left, then just wait
1494                 * since checking involves reading a CE register, which is a
1495                 * relatively expensive operation.
1496                 */
1497                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1498
1499                /*
1500                 * If at least 50% of the total resources are still available,
1501                 * don't bother checking again yet.
1502                 */
1503                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1504                        return;
1505        }
1506        ath10k_ce_per_engine_service(ar, pipe);
1507}
1508
1509static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
1510{
1511        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1512
1513        del_timer_sync(&ar_pci->rx_post_retry);
1514}
1515
1516int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1517                                       u8 *ul_pipe, u8 *dl_pipe)
1518{
1519        const struct service_to_pipe *entry;
1520        bool ul_set = false, dl_set = false;
1521        int i;
1522
1523        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1524
1525        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1526                entry = &target_service_to_ce_map_wlan[i];
1527
1528                if (__le32_to_cpu(entry->service_id) != service_id)
1529                        continue;
1530
1531                switch (__le32_to_cpu(entry->pipedir)) {
1532                case PIPEDIR_NONE:
1533                        break;
1534                case PIPEDIR_IN:
1535                        WARN_ON(dl_set);
1536                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1537                        dl_set = true;
1538                        break;
1539                case PIPEDIR_OUT:
1540                        WARN_ON(ul_set);
1541                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1542                        ul_set = true;
1543                        break;
1544                case PIPEDIR_INOUT:
1545                        WARN_ON(dl_set);
1546                        WARN_ON(ul_set);
1547                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1548                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1549                        dl_set = true;
1550                        ul_set = true;
1551                        break;
1552                }
1553        }
1554
1555        if (WARN_ON(!ul_set || !dl_set))
1556                return -ENOENT;
1557
1558        return 0;
1559}
1560
1561void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1562                                     u8 *ul_pipe, u8 *dl_pipe)
1563{
1564        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1565
1566        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1567                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1568                                                 ul_pipe, dl_pipe);
1569}
1570
1571void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1572{
1573        u32 val;
1574
1575        switch (ar->hw_rev) {
1576        case ATH10K_HW_QCA988X:
1577        case ATH10K_HW_QCA9887:
1578        case ATH10K_HW_QCA6174:
1579        case ATH10K_HW_QCA9377:
1580                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1581                                        CORE_CTRL_ADDRESS);
1582                val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1583                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1584                                   CORE_CTRL_ADDRESS, val);
1585                break;
1586        case ATH10K_HW_QCA99X0:
1587        case ATH10K_HW_QCA9984:
1588        case ATH10K_HW_QCA9888:
1589        case ATH10K_HW_QCA4019:
1590                /* TODO: Find appropriate register configuration for QCA99X0
1591                 *  to mask irq/MSI.
1592                 */
1593                 break;
1594        }
1595}
1596
1597static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1598{
1599        u32 val;
1600
1601        switch (ar->hw_rev) {
1602        case ATH10K_HW_QCA988X:
1603        case ATH10K_HW_QCA9887:
1604        case ATH10K_HW_QCA6174:
1605        case ATH10K_HW_QCA9377:
1606                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1607                                        CORE_CTRL_ADDRESS);
1608                val |= CORE_CTRL_PCIE_REG_31_MASK;
1609                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1610                                   CORE_CTRL_ADDRESS, val);
1611                break;
1612        case ATH10K_HW_QCA99X0:
1613        case ATH10K_HW_QCA9984:
1614        case ATH10K_HW_QCA9888:
1615        case ATH10K_HW_QCA4019:
1616                /* TODO: Find appropriate register configuration for QCA99X0
1617                 *  to unmask irq/MSI.
1618                 */
1619                break;
1620        }
1621}
1622
1623static void ath10k_pci_irq_disable(struct ath10k *ar)
1624{
1625        ath10k_ce_disable_interrupts(ar);
1626        ath10k_pci_disable_and_clear_legacy_irq(ar);
1627        ath10k_pci_irq_msi_fw_mask(ar);
1628}
1629
1630static void ath10k_pci_irq_sync(struct ath10k *ar)
1631{
1632        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1633
1634        synchronize_irq(ar_pci->pdev->irq);
1635}
1636
1637static void ath10k_pci_irq_enable(struct ath10k *ar)
1638{
1639        ath10k_ce_enable_interrupts(ar);
1640        ath10k_pci_enable_legacy_irq(ar);
1641        ath10k_pci_irq_msi_fw_unmask(ar);
1642}
1643
1644static int ath10k_pci_hif_start(struct ath10k *ar)
1645{
1646        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1647
1648        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1649
1650        ath10k_pci_irq_enable(ar);
1651        ath10k_pci_rx_post(ar);
1652
1653        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1654                                   ar_pci->link_ctl);
1655
1656        return 0;
1657}
1658
1659static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1660{
1661        struct ath10k *ar;
1662        struct ath10k_ce_pipe *ce_pipe;
1663        struct ath10k_ce_ring *ce_ring;
1664        struct sk_buff *skb;
1665        int i;
1666
1667        ar = pci_pipe->hif_ce_state;
1668        ce_pipe = pci_pipe->ce_hdl;
1669        ce_ring = ce_pipe->dest_ring;
1670
1671        if (!ce_ring)
1672                return;
1673
1674        if (!pci_pipe->buf_sz)
1675                return;
1676
1677        for (i = 0; i < ce_ring->nentries; i++) {
1678                skb = ce_ring->per_transfer_context[i];
1679                if (!skb)
1680                        continue;
1681
1682                ce_ring->per_transfer_context[i] = NULL;
1683
1684                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1685                                 skb->len + skb_tailroom(skb),
1686                                 DMA_FROM_DEVICE);
1687                dev_kfree_skb_any(skb);
1688        }
1689}
1690
1691static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1692{
1693        struct ath10k *ar;
1694        struct ath10k_ce_pipe *ce_pipe;
1695        struct ath10k_ce_ring *ce_ring;
1696        struct sk_buff *skb;
1697        int i;
1698
1699        ar = pci_pipe->hif_ce_state;
1700        ce_pipe = pci_pipe->ce_hdl;
1701        ce_ring = ce_pipe->src_ring;
1702
1703        if (!ce_ring)
1704                return;
1705
1706        if (!pci_pipe->buf_sz)
1707                return;
1708
1709        for (i = 0; i < ce_ring->nentries; i++) {
1710                skb = ce_ring->per_transfer_context[i];
1711                if (!skb)
1712                        continue;
1713
1714                ce_ring->per_transfer_context[i] = NULL;
1715
1716                ath10k_htc_tx_completion_handler(ar, skb);
1717        }
1718}
1719
1720/*
1721 * Cleanup residual buffers for device shutdown:
1722 *    buffers that were enqueued for receive
1723 *    buffers that were to be sent
1724 * Note: Buffers that had completed but which were
1725 * not yet processed are on a completion queue. They
1726 * are handled when the completion thread shuts down.
1727 */
1728static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1729{
1730        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1731        int pipe_num;
1732
1733        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1734                struct ath10k_pci_pipe *pipe_info;
1735
1736                pipe_info = &ar_pci->pipe_info[pipe_num];
1737                ath10k_pci_rx_pipe_cleanup(pipe_info);
1738                ath10k_pci_tx_pipe_cleanup(pipe_info);
1739        }
1740}
1741
1742void ath10k_pci_ce_deinit(struct ath10k *ar)
1743{
1744        int i;
1745
1746        for (i = 0; i < CE_COUNT; i++)
1747                ath10k_ce_deinit_pipe(ar, i);
1748}
1749
1750void ath10k_pci_flush(struct ath10k *ar)
1751{
1752        ath10k_pci_rx_retry_sync(ar);
1753        ath10k_pci_buffer_cleanup(ar);
1754}
1755
1756static void ath10k_pci_hif_stop(struct ath10k *ar)
1757{
1758        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1759        unsigned long flags;
1760
1761        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1762
1763        /* Most likely the device has HTT Rx ring configured. The only way to
1764         * prevent the device from accessing (and possible corrupting) host
1765         * memory is to reset the chip now.
1766         *
1767         * There's also no known way of masking MSI interrupts on the device.
1768         * For ranged MSI the CE-related interrupts can be masked. However
1769         * regardless how many MSI interrupts are assigned the first one
1770         * is always used for firmware indications (crashes) and cannot be
1771         * masked. To prevent the device from asserting the interrupt reset it
1772         * before proceeding with cleanup.
1773         */
1774        ath10k_pci_safe_chip_reset(ar);
1775
1776        ath10k_pci_irq_disable(ar);
1777        ath10k_pci_irq_sync(ar);
1778        ath10k_pci_flush(ar);
1779        napi_synchronize(&ar->napi);
1780        napi_disable(&ar->napi);
1781
1782        spin_lock_irqsave(&ar_pci->ps_lock, flags);
1783        WARN_ON(ar_pci->ps_wake_refcount > 0);
1784        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1785}
1786
1787int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1788                                    void *req, u32 req_len,
1789                                    void *resp, u32 *resp_len)
1790{
1791        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1792        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1793        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1794        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1795        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1796        dma_addr_t req_paddr = 0;
1797        dma_addr_t resp_paddr = 0;
1798        struct bmi_xfer xfer = {};
1799        void *treq, *tresp = NULL;
1800        int ret = 0;
1801
1802        might_sleep();
1803
1804        if (resp && !resp_len)
1805                return -EINVAL;
1806
1807        if (resp && resp_len && *resp_len == 0)
1808                return -EINVAL;
1809
1810        treq = kmemdup(req, req_len, GFP_KERNEL);
1811        if (!treq)
1812                return -ENOMEM;
1813
1814        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1815        ret = dma_mapping_error(ar->dev, req_paddr);
1816        if (ret) {
1817                ret = -EIO;
1818                goto err_dma;
1819        }
1820
1821        if (resp && resp_len) {
1822                tresp = kzalloc(*resp_len, GFP_KERNEL);
1823                if (!tresp) {
1824                        ret = -ENOMEM;
1825                        goto err_req;
1826                }
1827
1828                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1829                                            DMA_FROM_DEVICE);
1830                ret = dma_mapping_error(ar->dev, resp_paddr);
1831                if (ret) {
1832                        ret = -EIO;
1833                        goto err_req;
1834                }
1835
1836                xfer.wait_for_resp = true;
1837                xfer.resp_len = 0;
1838
1839                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1840        }
1841
1842        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1843        if (ret)
1844                goto err_resp;
1845
1846        ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1847        if (ret) {
1848                u32 unused_buffer;
1849                unsigned int unused_nbytes;
1850                unsigned int unused_id;
1851
1852                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1853                                           &unused_nbytes, &unused_id);
1854        } else {
1855                /* non-zero means we did not time out */
1856                ret = 0;
1857        }
1858
1859err_resp:
1860        if (resp) {
1861                u32 unused_buffer;
1862
1863                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1864                dma_unmap_single(ar->dev, resp_paddr,
1865                                 *resp_len, DMA_FROM_DEVICE);
1866        }
1867err_req:
1868        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1869
1870        if (ret == 0 && resp_len) {
1871                *resp_len = min(*resp_len, xfer.resp_len);
1872                memcpy(resp, tresp, xfer.resp_len);
1873        }
1874err_dma:
1875        kfree(treq);
1876        kfree(tresp);
1877
1878        return ret;
1879}
1880
1881static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1882{
1883        struct bmi_xfer *xfer;
1884
1885        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
1886                return;
1887
1888        xfer->tx_done = true;
1889}
1890
1891static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1892{
1893        struct ath10k *ar = ce_state->ar;
1894        struct bmi_xfer *xfer;
1895        unsigned int nbytes;
1896
1897        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
1898                                          &nbytes))
1899                return;
1900
1901        if (WARN_ON_ONCE(!xfer))
1902                return;
1903
1904        if (!xfer->wait_for_resp) {
1905                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1906                return;
1907        }
1908
1909        xfer->resp_len = nbytes;
1910        xfer->rx_done = true;
1911}
1912
1913static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1914                               struct ath10k_ce_pipe *rx_pipe,
1915                               struct bmi_xfer *xfer)
1916{
1917        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1918
1919        while (time_before_eq(jiffies, timeout)) {
1920                ath10k_pci_bmi_send_done(tx_pipe);
1921                ath10k_pci_bmi_recv_data(rx_pipe);
1922
1923                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1924                        return 0;
1925
1926                schedule();
1927        }
1928
1929        return -ETIMEDOUT;
1930}
1931
1932/*
1933 * Send an interrupt to the device to wake up the Target CPU
1934 * so it has an opportunity to notice any changed state.
1935 */
1936static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1937{
1938        u32 addr, val;
1939
1940        addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1941        val = ath10k_pci_read32(ar, addr);
1942        val |= CORE_CTRL_CPU_INTR_MASK;
1943        ath10k_pci_write32(ar, addr, val);
1944
1945        return 0;
1946}
1947
1948static int ath10k_pci_get_num_banks(struct ath10k *ar)
1949{
1950        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1951
1952        switch (ar_pci->pdev->device) {
1953        case QCA988X_2_0_DEVICE_ID:
1954        case QCA99X0_2_0_DEVICE_ID:
1955        case QCA9888_2_0_DEVICE_ID:
1956        case QCA9984_1_0_DEVICE_ID:
1957        case QCA9887_1_0_DEVICE_ID:
1958                return 1;
1959        case QCA6164_2_1_DEVICE_ID:
1960        case QCA6174_2_1_DEVICE_ID:
1961                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1962                case QCA6174_HW_1_0_CHIP_ID_REV:
1963                case QCA6174_HW_1_1_CHIP_ID_REV:
1964                case QCA6174_HW_2_1_CHIP_ID_REV:
1965                case QCA6174_HW_2_2_CHIP_ID_REV:
1966                        return 3;
1967                case QCA6174_HW_1_3_CHIP_ID_REV:
1968                        return 2;
1969                case QCA6174_HW_3_0_CHIP_ID_REV:
1970                case QCA6174_HW_3_1_CHIP_ID_REV:
1971                case QCA6174_HW_3_2_CHIP_ID_REV:
1972                        return 9;
1973                }
1974                break;
1975        case QCA9377_1_0_DEVICE_ID:
1976                return 2;
1977        }
1978
1979        ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1980        return 1;
1981}
1982
1983static int ath10k_bus_get_num_banks(struct ath10k *ar)
1984{
1985        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1986
1987        return ar_pci->bus_ops->get_num_banks(ar);
1988}
1989
1990int ath10k_pci_init_config(struct ath10k *ar)
1991{
1992        u32 interconnect_targ_addr;
1993        u32 pcie_state_targ_addr = 0;
1994        u32 pipe_cfg_targ_addr = 0;
1995        u32 svc_to_pipe_map = 0;
1996        u32 pcie_config_flags = 0;
1997        u32 ealloc_value;
1998        u32 ealloc_targ_addr;
1999        u32 flag2_value;
2000        u32 flag2_targ_addr;
2001        int ret = 0;
2002
2003        /* Download to Target the CE Config and the service-to-CE map */
2004        interconnect_targ_addr =
2005                host_interest_item_address(HI_ITEM(hi_interconnect_state));
2006
2007        /* Supply Target-side CE configuration */
2008        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
2009                                     &pcie_state_targ_addr);
2010        if (ret != 0) {
2011                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
2012                return ret;
2013        }
2014
2015        if (pcie_state_targ_addr == 0) {
2016                ret = -EIO;
2017                ath10k_err(ar, "Invalid pcie state addr\n");
2018                return ret;
2019        }
2020
2021        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2022                                          offsetof(struct pcie_state,
2023                                                   pipe_cfg_addr)),
2024                                     &pipe_cfg_targ_addr);
2025        if (ret != 0) {
2026                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2027                return ret;
2028        }
2029
2030        if (pipe_cfg_targ_addr == 0) {
2031                ret = -EIO;
2032                ath10k_err(ar, "Invalid pipe cfg addr\n");
2033                return ret;
2034        }
2035
2036        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2037                                        target_ce_config_wlan,
2038                                        sizeof(struct ce_pipe_config) *
2039                                        NUM_TARGET_CE_CONFIG_WLAN);
2040
2041        if (ret != 0) {
2042                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2043                return ret;
2044        }
2045
2046        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2047                                          offsetof(struct pcie_state,
2048                                                   svc_to_pipe_map)),
2049                                     &svc_to_pipe_map);
2050        if (ret != 0) {
2051                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2052                return ret;
2053        }
2054
2055        if (svc_to_pipe_map == 0) {
2056                ret = -EIO;
2057                ath10k_err(ar, "Invalid svc_to_pipe map\n");
2058                return ret;
2059        }
2060
2061        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2062                                        target_service_to_ce_map_wlan,
2063                                        sizeof(target_service_to_ce_map_wlan));
2064        if (ret != 0) {
2065                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2066                return ret;
2067        }
2068
2069        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2070                                          offsetof(struct pcie_state,
2071                                                   config_flags)),
2072                                     &pcie_config_flags);
2073        if (ret != 0) {
2074                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2075                return ret;
2076        }
2077
2078        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2079
2080        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2081                                           offsetof(struct pcie_state,
2082                                                    config_flags)),
2083                                      pcie_config_flags);
2084        if (ret != 0) {
2085                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2086                return ret;
2087        }
2088
2089        /* configure early allocation */
2090        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2091
2092        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2093        if (ret != 0) {
2094                ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
2095                return ret;
2096        }
2097
2098        /* first bank is switched to IRAM */
2099        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2100                         HI_EARLY_ALLOC_MAGIC_MASK);
2101        ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2102                          HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2103                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2104
2105        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2106        if (ret != 0) {
2107                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2108                return ret;
2109        }
2110
2111        /* Tell Target to proceed with initialization */
2112        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2113
2114        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2115        if (ret != 0) {
2116                ath10k_err(ar, "Failed to get option val: %d\n", ret);
2117                return ret;
2118        }
2119
2120        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2121
2122        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2123        if (ret != 0) {
2124                ath10k_err(ar, "Failed to set option val: %d\n", ret);
2125                return ret;
2126        }
2127
2128        return 0;
2129}
2130
2131static void ath10k_pci_override_ce_config(struct ath10k *ar)
2132{
2133        struct ce_attr *attr;
2134        struct ce_pipe_config *config;
2135
2136        /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2137         * since it is currently used for other feature.
2138         */
2139
2140        /* Override Host's Copy Engine 5 configuration */
2141        attr = &host_ce_config_wlan[5];
2142        attr->src_sz_max = 0;
2143        attr->dest_nentries = 0;
2144
2145        /* Override Target firmware's Copy Engine configuration */
2146        config = &target_ce_config_wlan[5];
2147        config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2148        config->nbytes_max = __cpu_to_le32(2048);
2149
2150        /* Map from service/endpoint to Copy Engine */
2151        target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2152}
2153
2154int ath10k_pci_alloc_pipes(struct ath10k *ar)
2155{
2156        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2157        struct ath10k_pci_pipe *pipe;
2158        int i, ret;
2159
2160        for (i = 0; i < CE_COUNT; i++) {
2161                pipe = &ar_pci->pipe_info[i];
2162                pipe->ce_hdl = &ar_pci->ce_states[i];
2163                pipe->pipe_num = i;
2164                pipe->hif_ce_state = ar;
2165
2166                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2167                if (ret) {
2168                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2169                                   i, ret);
2170                        return ret;
2171                }
2172
2173                /* Last CE is Diagnostic Window */
2174                if (i == CE_DIAG_PIPE) {
2175                        ar_pci->ce_diag = pipe->ce_hdl;
2176                        continue;
2177                }
2178
2179                pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2180        }
2181
2182        return 0;
2183}
2184
2185void ath10k_pci_free_pipes(struct ath10k *ar)
2186{
2187        int i;
2188
2189        for (i = 0; i < CE_COUNT; i++)
2190                ath10k_ce_free_pipe(ar, i);
2191}
2192
2193int ath10k_pci_init_pipes(struct ath10k *ar)
2194{
2195        int i, ret;
2196
2197        for (i = 0; i < CE_COUNT; i++) {
2198                ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2199                if (ret) {
2200                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2201                                   i, ret);
2202                        return ret;
2203                }
2204        }
2205
2206        return 0;
2207}
2208
2209static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2210{
2211        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2212               FW_IND_EVENT_PENDING;
2213}
2214
2215static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2216{
2217        u32 val;
2218
2219        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2220        val &= ~FW_IND_EVENT_PENDING;
2221        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2222}
2223
2224static bool ath10k_pci_has_device_gone(struct ath10k *ar)
2225{
2226        u32 val;
2227
2228        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2229        return (val == 0xffffffff);
2230}
2231
2232/* this function effectively clears target memory controller assert line */
2233static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2234{
2235        u32 val;
2236
2237        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2238        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2239                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
2240        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2241
2242        msleep(10);
2243
2244        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2245        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2246                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2247        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2248
2249        msleep(10);
2250}
2251
2252static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2253{
2254        u32 val;
2255
2256        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2257
2258        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2259                                SOC_RESET_CONTROL_ADDRESS);
2260        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2261                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2262}
2263
2264static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2265{
2266        u32 val;
2267
2268        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2269                                SOC_RESET_CONTROL_ADDRESS);
2270
2271        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2272                           val | SOC_RESET_CONTROL_CE_RST_MASK);
2273        msleep(10);
2274        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2275                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2276}
2277
2278static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2279{
2280        u32 val;
2281
2282        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2283                                SOC_LF_TIMER_CONTROL0_ADDRESS);
2284        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2285                           SOC_LF_TIMER_CONTROL0_ADDRESS,
2286                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2287}
2288
2289static int ath10k_pci_warm_reset(struct ath10k *ar)
2290{
2291        int ret;
2292
2293        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2294
2295        spin_lock_bh(&ar->data_lock);
2296        ar->stats.fw_warm_reset_counter++;
2297        spin_unlock_bh(&ar->data_lock);
2298
2299        ath10k_pci_irq_disable(ar);
2300
2301        /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2302         * were to access copy engine while host performs copy engine reset
2303         * then it is possible for the device to confuse pci-e controller to
2304         * the point of bringing host system to a complete stop (i.e. hang).
2305         */
2306        ath10k_pci_warm_reset_si0(ar);
2307        ath10k_pci_warm_reset_cpu(ar);
2308        ath10k_pci_init_pipes(ar);
2309        ath10k_pci_wait_for_target_init(ar);
2310
2311        ath10k_pci_warm_reset_clear_lf(ar);
2312        ath10k_pci_warm_reset_ce(ar);
2313        ath10k_pci_warm_reset_cpu(ar);
2314        ath10k_pci_init_pipes(ar);
2315
2316        ret = ath10k_pci_wait_for_target_init(ar);
2317        if (ret) {
2318                ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2319                return ret;
2320        }
2321
2322        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2323
2324        return 0;
2325}
2326
2327static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
2328{
2329        ath10k_pci_irq_disable(ar);
2330        return ath10k_pci_qca99x0_chip_reset(ar);
2331}
2332
2333static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2334{
2335        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2336
2337        if (!ar_pci->pci_soft_reset)
2338                return -ENOTSUPP;
2339
2340        return ar_pci->pci_soft_reset(ar);
2341}
2342
2343static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2344{
2345        int i, ret;
2346        u32 val;
2347
2348        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2349
2350        /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2351         * It is thus preferred to use warm reset which is safer but may not be
2352         * able to recover the device from all possible fail scenarios.
2353         *
2354         * Warm reset doesn't always work on first try so attempt it a few
2355         * times before giving up.
2356         */
2357        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2358                ret = ath10k_pci_warm_reset(ar);
2359                if (ret) {
2360                        ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2361                                    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2362                                    ret);
2363                        continue;
2364                }
2365
2366                /* FIXME: Sometimes copy engine doesn't recover after warm
2367                 * reset. In most cases this needs cold reset. In some of these
2368                 * cases the device is in such a state that a cold reset may
2369                 * lock up the host.
2370                 *
2371                 * Reading any host interest register via copy engine is
2372                 * sufficient to verify if device is capable of booting
2373                 * firmware blob.
2374                 */
2375                ret = ath10k_pci_init_pipes(ar);
2376                if (ret) {
2377                        ath10k_warn(ar, "failed to init copy engine: %d\n",
2378                                    ret);
2379                        continue;
2380                }
2381
2382                ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2383                                             &val);
2384                if (ret) {
2385                        ath10k_warn(ar, "failed to poke copy engine: %d\n",
2386                                    ret);
2387                        continue;
2388                }
2389
2390                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2391                return 0;
2392        }
2393
2394        if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2395                ath10k_warn(ar, "refusing cold reset as requested\n");
2396                return -EPERM;
2397        }
2398
2399        ret = ath10k_pci_cold_reset(ar);
2400        if (ret) {
2401                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2402                return ret;
2403        }
2404
2405        ret = ath10k_pci_wait_for_target_init(ar);
2406        if (ret) {
2407                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2408                            ret);
2409                return ret;
2410        }
2411
2412        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2413
2414        return 0;
2415}
2416
2417static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2418{
2419        int ret;
2420
2421        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2422
2423        /* FIXME: QCA6174 requires cold + warm reset to work. */
2424
2425        ret = ath10k_pci_cold_reset(ar);
2426        if (ret) {
2427                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2428                return ret;
2429        }
2430
2431        ret = ath10k_pci_wait_for_target_init(ar);
2432        if (ret) {
2433                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2434                            ret);
2435                return ret;
2436        }
2437
2438        ret = ath10k_pci_warm_reset(ar);
2439        if (ret) {
2440                ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2441                return ret;
2442        }
2443
2444        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2445
2446        return 0;
2447}
2448
2449static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2450{
2451        int ret;
2452
2453        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2454
2455        ret = ath10k_pci_cold_reset(ar);
2456        if (ret) {
2457                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2458                return ret;
2459        }
2460
2461        ret = ath10k_pci_wait_for_target_init(ar);
2462        if (ret) {
2463                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2464                            ret);
2465                return ret;
2466        }
2467
2468        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2469
2470        return 0;
2471}
2472
2473static int ath10k_pci_chip_reset(struct ath10k *ar)
2474{
2475        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2476
2477        if (WARN_ON(!ar_pci->pci_hard_reset))
2478                return -ENOTSUPP;
2479
2480        return ar_pci->pci_hard_reset(ar);
2481}
2482
2483static int ath10k_pci_hif_power_up(struct ath10k *ar)
2484{
2485        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2486        int ret;
2487
2488        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2489
2490        pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2491                                  &ar_pci->link_ctl);
2492        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2493                                   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2494
2495        /*
2496         * Bring the target up cleanly.
2497         *
2498         * The target may be in an undefined state with an AUX-powered Target
2499         * and a Host in WoW mode. If the Host crashes, loses power, or is
2500         * restarted (without unloading the driver) then the Target is left
2501         * (aux) powered and running. On a subsequent driver load, the Target
2502         * is in an unexpected state. We try to catch that here in order to
2503         * reset the Target and retry the probe.
2504         */
2505        ret = ath10k_pci_chip_reset(ar);
2506        if (ret) {
2507                if (ath10k_pci_has_fw_crashed(ar)) {
2508                        ath10k_warn(ar, "firmware crashed during chip reset\n");
2509                        ath10k_pci_fw_crashed_clear(ar);
2510                        ath10k_pci_fw_crashed_dump(ar);
2511                }
2512
2513                ath10k_err(ar, "failed to reset chip: %d\n", ret);
2514                goto err_sleep;
2515        }
2516
2517        ret = ath10k_pci_init_pipes(ar);
2518        if (ret) {
2519                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2520                goto err_sleep;
2521        }
2522
2523        ret = ath10k_pci_init_config(ar);
2524        if (ret) {
2525                ath10k_err(ar, "failed to setup init config: %d\n", ret);
2526                goto err_ce;
2527        }
2528
2529        ret = ath10k_pci_wake_target_cpu(ar);
2530        if (ret) {
2531                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2532                goto err_ce;
2533        }
2534        napi_enable(&ar->napi);
2535
2536        return 0;
2537
2538err_ce:
2539        ath10k_pci_ce_deinit(ar);
2540
2541err_sleep:
2542        return ret;
2543}
2544
2545void ath10k_pci_hif_power_down(struct ath10k *ar)
2546{
2547        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2548
2549        /* Currently hif_power_up performs effectively a reset and hif_stop
2550         * resets the chip as well so there's no point in resetting here.
2551         */
2552}
2553
2554#ifdef CONFIG_PM
2555
2556static int ath10k_pci_hif_suspend(struct ath10k *ar)
2557{
2558        /* The grace timer can still be counting down and ar->ps_awake be true.
2559         * It is known that the device may be asleep after resuming regardless
2560         * of the SoC powersave state before suspending. Hence make sure the
2561         * device is asleep before proceeding.
2562         */
2563        ath10k_pci_sleep_sync(ar);
2564
2565        return 0;
2566}
2567
2568static int ath10k_pci_hif_resume(struct ath10k *ar)
2569{
2570        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2571        struct pci_dev *pdev = ar_pci->pdev;
2572        u32 val;
2573        int ret = 0;
2574
2575        ret = ath10k_pci_force_wake(ar);
2576        if (ret) {
2577                ath10k_err(ar, "failed to wake up target: %d\n", ret);
2578                return ret;
2579        }
2580
2581        /* Suspend/Resume resets the PCI configuration space, so we have to
2582         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2583         * from interfering with C3 CPU state. pci_restore_state won't help
2584         * here since it only restores the first 64 bytes pci config header.
2585         */
2586        pci_read_config_dword(pdev, 0x40, &val);
2587        if ((val & 0x0000ff00) != 0)
2588                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2589
2590        return ret;
2591}
2592#endif
2593
2594static bool ath10k_pci_validate_cal(void *data, size_t size)
2595{
2596        __le16 *cal_words = data;
2597        u16 checksum = 0;
2598        size_t i;
2599
2600        if (size % 2 != 0)
2601                return false;
2602
2603        for (i = 0; i < size / 2; i++)
2604                checksum ^= le16_to_cpu(cal_words[i]);
2605
2606        return checksum == 0xffff;
2607}
2608
2609static void ath10k_pci_enable_eeprom(struct ath10k *ar)
2610{
2611        /* Enable SI clock */
2612        ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
2613
2614        /* Configure GPIOs for I2C operation */
2615        ath10k_pci_write32(ar,
2616                           GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2617                           4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
2618                           SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
2619                              GPIO_PIN0_CONFIG) |
2620                           SM(1, GPIO_PIN0_PAD_PULL));
2621
2622        ath10k_pci_write32(ar,
2623                           GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
2624                           4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
2625                           SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
2626                           SM(1, GPIO_PIN0_PAD_PULL));
2627
2628        ath10k_pci_write32(ar,
2629                           GPIO_BASE_ADDRESS +
2630                           QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
2631                           1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
2632
2633        /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
2634        ath10k_pci_write32(ar,
2635                           SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
2636                           SM(1, SI_CONFIG_ERR_INT) |
2637                           SM(1, SI_CONFIG_BIDIR_OD_DATA) |
2638                           SM(1, SI_CONFIG_I2C) |
2639                           SM(1, SI_CONFIG_POS_SAMPLE) |
2640                           SM(1, SI_CONFIG_INACTIVE_DATA) |
2641                           SM(1, SI_CONFIG_INACTIVE_CLK) |
2642                           SM(8, SI_CONFIG_DIVIDER));
2643}
2644
2645static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
2646{
2647        u32 reg;
2648        int wait_limit;
2649
2650        /* set device select byte and for the read operation */
2651        reg = QCA9887_EEPROM_SELECT_READ |
2652              SM(addr, QCA9887_EEPROM_ADDR_LO) |
2653              SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
2654        ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
2655
2656        /* write transmit data, transfer length, and START bit */
2657        ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
2658                           SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
2659                           SM(4, SI_CS_TX_CNT));
2660
2661        /* wait max 1 sec */
2662        wait_limit = 100000;
2663
2664        /* wait for SI_CS_DONE_INT */
2665        do {
2666                reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
2667                if (MS(reg, SI_CS_DONE_INT))
2668                        break;
2669
2670                wait_limit--;
2671                udelay(10);
2672        } while (wait_limit > 0);
2673
2674        if (!MS(reg, SI_CS_DONE_INT)) {
2675                ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
2676                           addr);
2677                return -ETIMEDOUT;
2678        }
2679
2680        /* clear SI_CS_DONE_INT */
2681        ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
2682
2683        if (MS(reg, SI_CS_DONE_ERR)) {
2684                ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
2685                return -EIO;
2686        }
2687
2688        /* extract receive data */
2689        reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
2690        *out = reg;
2691
2692        return 0;
2693}
2694
2695static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
2696                                           size_t *data_len)
2697{
2698        u8 *caldata = NULL;
2699        size_t calsize, i;
2700        int ret;
2701
2702        if (!QCA_REV_9887(ar))
2703                return -EOPNOTSUPP;
2704
2705        calsize = ar->hw_params.cal_data_len;
2706        caldata = kmalloc(calsize, GFP_KERNEL);
2707        if (!caldata)
2708                return -ENOMEM;
2709
2710        ath10k_pci_enable_eeprom(ar);
2711
2712        for (i = 0; i < calsize; i++) {
2713                ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
2714                if (ret)
2715                        goto err_free;
2716        }
2717
2718        if (!ath10k_pci_validate_cal(caldata, calsize))
2719                goto err_free;
2720
2721        *data = caldata;
2722        *data_len = calsize;
2723
2724        return 0;
2725
2726err_free:
2727        kfree(caldata);
2728
2729        return -EINVAL;
2730}
2731
2732static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2733        .tx_sg                  = ath10k_pci_hif_tx_sg,
2734        .diag_read              = ath10k_pci_hif_diag_read,
2735        .diag_write             = ath10k_pci_diag_write_mem,
2736        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2737        .start                  = ath10k_pci_hif_start,
2738        .stop                   = ath10k_pci_hif_stop,
2739        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2740        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2741        .send_complete_check    = ath10k_pci_hif_send_complete_check,
2742        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2743        .power_up               = ath10k_pci_hif_power_up,
2744        .power_down             = ath10k_pci_hif_power_down,
2745        .read32                 = ath10k_pci_read32,
2746        .write32                = ath10k_pci_write32,
2747#ifdef CONFIG_PM
2748        .suspend                = ath10k_pci_hif_suspend,
2749        .resume                 = ath10k_pci_hif_resume,
2750#endif
2751        .fetch_cal_eeprom       = ath10k_pci_hif_fetch_cal_eeprom,
2752};
2753
2754/*
2755 * Top-level interrupt handler for all PCI interrupts from a Target.
2756 * When a block of MSI interrupts is allocated, this top-level handler
2757 * is not used; instead, we directly call the correct sub-handler.
2758 */
2759static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2760{
2761        struct ath10k *ar = arg;
2762        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2763        int ret;
2764
2765        if (ath10k_pci_has_device_gone(ar))
2766                return IRQ_NONE;
2767
2768        ret = ath10k_pci_force_wake(ar);
2769        if (ret) {
2770                ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
2771                return IRQ_NONE;
2772        }
2773
2774        if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
2775            !ath10k_pci_irq_pending(ar))
2776                return IRQ_NONE;
2777
2778        ath10k_pci_disable_and_clear_legacy_irq(ar);
2779        ath10k_pci_irq_msi_fw_mask(ar);
2780        napi_schedule(&ar->napi);
2781
2782        return IRQ_HANDLED;
2783}
2784
2785static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
2786{
2787        struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2788        int done = 0;
2789
2790        if (ath10k_pci_has_fw_crashed(ar)) {
2791                ath10k_pci_fw_crashed_clear(ar);
2792                ath10k_pci_fw_crashed_dump(ar);
2793                napi_complete(ctx);
2794                return done;
2795        }
2796
2797        ath10k_ce_per_engine_service_any(ar);
2798
2799        done = ath10k_htt_txrx_compl_task(ar, budget);
2800
2801        if (done < budget) {
2802                napi_complete(ctx);
2803                /* In case of MSI, it is possible that interrupts are received
2804                 * while NAPI poll is inprogress. So pending interrupts that are
2805                 * received after processing all copy engine pipes by NAPI poll
2806                 * will not be handled again. This is causing failure to
2807                 * complete boot sequence in x86 platform. So before enabling
2808                 * interrupts safer to check for pending interrupts for
2809                 * immediate servicing.
2810                 */
2811                if (CE_INTERRUPT_SUMMARY(ar)) {
2812                        napi_reschedule(ctx);
2813                        goto out;
2814                }
2815                ath10k_pci_enable_legacy_irq(ar);
2816                ath10k_pci_irq_msi_fw_unmask(ar);
2817        }
2818
2819out:
2820        return done;
2821}
2822
2823static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2824{
2825        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2826        int ret;
2827
2828        ret = request_irq(ar_pci->pdev->irq,
2829                          ath10k_pci_interrupt_handler,
2830                          IRQF_SHARED, "ath10k_pci", ar);
2831        if (ret) {
2832                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2833                            ar_pci->pdev->irq, ret);
2834                return ret;
2835        }
2836
2837        return 0;
2838}
2839
2840static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2841{
2842        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2843        int ret;
2844
2845        ret = request_irq(ar_pci->pdev->irq,
2846                          ath10k_pci_interrupt_handler,
2847                          IRQF_SHARED, "ath10k_pci", ar);
2848        if (ret) {
2849                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2850                            ar_pci->pdev->irq, ret);
2851                return ret;
2852        }
2853
2854        return 0;
2855}
2856
2857static int ath10k_pci_request_irq(struct ath10k *ar)
2858{
2859        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2860
2861        switch (ar_pci->oper_irq_mode) {
2862        case ATH10K_PCI_IRQ_LEGACY:
2863                return ath10k_pci_request_irq_legacy(ar);
2864        case ATH10K_PCI_IRQ_MSI:
2865                return ath10k_pci_request_irq_msi(ar);
2866        default:
2867                return -EINVAL;
2868        }
2869}
2870
2871static void ath10k_pci_free_irq(struct ath10k *ar)
2872{
2873        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2874
2875        free_irq(ar_pci->pdev->irq, ar);
2876}
2877
2878void ath10k_pci_init_napi(struct ath10k *ar)
2879{
2880        netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
2881                       ATH10K_NAPI_BUDGET);
2882}
2883
2884static int ath10k_pci_init_irq(struct ath10k *ar)
2885{
2886        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2887        int ret;
2888
2889        ath10k_pci_init_napi(ar);
2890
2891        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2892                ath10k_info(ar, "limiting irq mode to: %d\n",
2893                            ath10k_pci_irq_mode);
2894
2895        /* Try MSI */
2896        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2897                ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
2898                ret = pci_enable_msi(ar_pci->pdev);
2899                if (ret == 0)
2900                        return 0;
2901
2902                /* fall-through */
2903        }
2904
2905        /* Try legacy irq
2906         *
2907         * A potential race occurs here: The CORE_BASE write
2908         * depends on target correctly decoding AXI address but
2909         * host won't know when target writes BAR to CORE_CTRL.
2910         * This write might get lost if target has NOT written BAR.
2911         * For now, fix the race by repeating the write in below
2912         * synchronization checking. */
2913        ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
2914
2915        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2916                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2917
2918        return 0;
2919}
2920
2921static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2922{
2923        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2924                           0);
2925}
2926
2927static int ath10k_pci_deinit_irq(struct ath10k *ar)
2928{
2929        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2930
2931        switch (ar_pci->oper_irq_mode) {
2932        case ATH10K_PCI_IRQ_LEGACY:
2933                ath10k_pci_deinit_irq_legacy(ar);
2934                break;
2935        default:
2936                pci_disable_msi(ar_pci->pdev);
2937                break;
2938        }
2939
2940        return 0;
2941}
2942
2943int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2944{
2945        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2946        unsigned long timeout;
2947        u32 val;
2948
2949        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2950
2951        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2952
2953        do {
2954                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2955
2956                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2957                           val);
2958
2959                /* target should never return this */
2960                if (val == 0xffffffff)
2961                        continue;
2962
2963                /* the device has crashed so don't bother trying anymore */
2964                if (val & FW_IND_EVENT_PENDING)
2965                        break;
2966
2967                if (val & FW_IND_INITIALIZED)
2968                        break;
2969
2970                if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
2971                        /* Fix potential race by repeating CORE_BASE writes */
2972                        ath10k_pci_enable_legacy_irq(ar);
2973
2974                mdelay(10);
2975        } while (time_before(jiffies, timeout));
2976
2977        ath10k_pci_disable_and_clear_legacy_irq(ar);
2978        ath10k_pci_irq_msi_fw_mask(ar);
2979
2980        if (val == 0xffffffff) {
2981                ath10k_err(ar, "failed to read device register, device is gone\n");
2982                return -EIO;
2983        }
2984
2985        if (val & FW_IND_EVENT_PENDING) {
2986                ath10k_warn(ar, "device has crashed during init\n");
2987                return -ECOMM;
2988        }
2989
2990        if (!(val & FW_IND_INITIALIZED)) {
2991                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2992                           val);
2993                return -ETIMEDOUT;
2994        }
2995
2996        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2997        return 0;
2998}
2999
3000static int ath10k_pci_cold_reset(struct ath10k *ar)
3001{
3002        u32 val;
3003
3004        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
3005
3006        spin_lock_bh(&ar->data_lock);
3007
3008        ar->stats.fw_cold_reset_counter++;
3009
3010        spin_unlock_bh(&ar->data_lock);
3011
3012        /* Put Target, including PCIe, into RESET. */
3013        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
3014        val |= 1;
3015        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3016
3017        /* After writing into SOC_GLOBAL_RESET to put device into
3018         * reset and pulling out of reset pcie may not be stable
3019         * for any immediate pcie register access and cause bus error,
3020         * add delay before any pcie access request to fix this issue.
3021         */
3022        msleep(20);
3023
3024        /* Pull Target, including PCIe, out of RESET. */
3025        val &= ~1;
3026        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
3027
3028        msleep(20);
3029
3030        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
3031
3032        return 0;
3033}
3034
3035static int ath10k_pci_claim(struct ath10k *ar)
3036{
3037        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3038        struct pci_dev *pdev = ar_pci->pdev;
3039        int ret;
3040
3041        pci_set_drvdata(pdev, ar);
3042
3043        ret = pci_enable_device(pdev);
3044        if (ret) {
3045                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
3046                return ret;
3047        }
3048
3049        ret = pci_request_region(pdev, BAR_NUM, "ath");
3050        if (ret) {
3051                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
3052                           ret);
3053                goto err_device;
3054        }
3055
3056        /* Target expects 32 bit DMA. Enforce it. */
3057        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3058        if (ret) {
3059                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
3060                goto err_region;
3061        }
3062
3063        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3064        if (ret) {
3065                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
3066                           ret);
3067                goto err_region;
3068        }
3069
3070        pci_set_master(pdev);
3071
3072        /* Arrange for access to Target SoC registers. */
3073        ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
3074        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
3075        if (!ar_pci->mem) {
3076                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
3077                ret = -EIO;
3078                goto err_master;
3079        }
3080
3081        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
3082        return 0;
3083
3084err_master:
3085        pci_clear_master(pdev);
3086
3087err_region:
3088        pci_release_region(pdev, BAR_NUM);
3089
3090err_device:
3091        pci_disable_device(pdev);
3092
3093        return ret;
3094}
3095
3096static void ath10k_pci_release(struct ath10k *ar)
3097{
3098        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3099        struct pci_dev *pdev = ar_pci->pdev;
3100
3101        pci_iounmap(pdev, ar_pci->mem);
3102        pci_release_region(pdev, BAR_NUM);
3103        pci_clear_master(pdev);
3104        pci_disable_device(pdev);
3105}
3106
3107static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
3108{
3109        const struct ath10k_pci_supp_chip *supp_chip;
3110        int i;
3111        u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
3112
3113        for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
3114                supp_chip = &ath10k_pci_supp_chips[i];
3115
3116                if (supp_chip->dev_id == dev_id &&
3117                    supp_chip->rev_id == rev_id)
3118                        return true;
3119        }
3120
3121        return false;
3122}
3123
3124int ath10k_pci_setup_resource(struct ath10k *ar)
3125{
3126        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
3127        int ret;
3128
3129        spin_lock_init(&ar_pci->ce_lock);
3130        spin_lock_init(&ar_pci->ps_lock);
3131
3132        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
3133                    (unsigned long)ar);
3134
3135        if (QCA_REV_6174(ar))
3136                ath10k_pci_override_ce_config(ar);
3137
3138        ret = ath10k_pci_alloc_pipes(ar);
3139        if (ret) {
3140                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
3141                           ret);
3142                return ret;
3143        }
3144
3145        return 0;
3146}
3147
3148void ath10k_pci_release_resource(struct ath10k *ar)
3149{
3150        ath10k_pci_rx_retry_sync(ar);
3151        netif_napi_del(&ar->napi);
3152        ath10k_pci_ce_deinit(ar);
3153        ath10k_pci_free_pipes(ar);
3154}
3155
3156static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
3157        .read32         = ath10k_bus_pci_read32,
3158        .write32        = ath10k_bus_pci_write32,
3159        .get_num_banks  = ath10k_pci_get_num_banks,
3160};
3161
3162static int ath10k_pci_probe(struct pci_dev *pdev,
3163                            const struct pci_device_id *pci_dev)
3164{
3165        int ret = 0;
3166        struct ath10k *ar;
3167        struct ath10k_pci *ar_pci;
3168        enum ath10k_hw_rev hw_rev;
3169        u32 chip_id;
3170        bool pci_ps;
3171        int (*pci_soft_reset)(struct ath10k *ar);
3172        int (*pci_hard_reset)(struct ath10k *ar);
3173
3174        switch (pci_dev->device) {
3175        case QCA988X_2_0_DEVICE_ID:
3176                hw_rev = ATH10K_HW_QCA988X;
3177                pci_ps = false;
3178                pci_soft_reset = ath10k_pci_warm_reset;
3179                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3180                break;
3181        case QCA9887_1_0_DEVICE_ID:
3182                hw_rev = ATH10K_HW_QCA9887;
3183                pci_ps = false;
3184                pci_soft_reset = ath10k_pci_warm_reset;
3185                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
3186                break;
3187        case QCA6164_2_1_DEVICE_ID:
3188        case QCA6174_2_1_DEVICE_ID:
3189                hw_rev = ATH10K_HW_QCA6174;
3190                pci_ps = true;
3191                pci_soft_reset = ath10k_pci_warm_reset;
3192                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3193                break;
3194        case QCA99X0_2_0_DEVICE_ID:
3195                hw_rev = ATH10K_HW_QCA99X0;
3196                pci_ps = false;
3197                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3198                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3199                break;
3200        case QCA9984_1_0_DEVICE_ID:
3201                hw_rev = ATH10K_HW_QCA9984;
3202                pci_ps = false;
3203                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3204                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3205                break;
3206        case QCA9888_2_0_DEVICE_ID:
3207                hw_rev = ATH10K_HW_QCA9888;
3208                pci_ps = false;
3209                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
3210                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
3211                break;
3212        case QCA9377_1_0_DEVICE_ID:
3213                hw_rev = ATH10K_HW_QCA9377;
3214                pci_ps = true;
3215                pci_soft_reset = NULL;
3216                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
3217                break;
3218        default:
3219                WARN_ON(1);
3220                return -ENOTSUPP;
3221        }
3222
3223        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3224                                hw_rev, &ath10k_pci_hif_ops);
3225        if (!ar) {
3226                dev_err(&pdev->dev, "failed to allocate core\n");
3227                return -ENOMEM;
3228        }
3229
3230        ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3231                   pdev->vendor, pdev->device,
3232                   pdev->subsystem_vendor, pdev->subsystem_device);
3233
3234        ar_pci = ath10k_pci_priv(ar);
3235        ar_pci->pdev = pdev;
3236        ar_pci->dev = &pdev->dev;
3237        ar_pci->ar = ar;
3238        ar->dev_id = pci_dev->device;
3239        ar_pci->pci_ps = pci_ps;
3240        ar_pci->bus_ops = &ath10k_pci_bus_ops;
3241        ar_pci->pci_soft_reset = pci_soft_reset;
3242        ar_pci->pci_hard_reset = pci_hard_reset;
3243
3244        ar->id.vendor = pdev->vendor;
3245        ar->id.device = pdev->device;
3246        ar->id.subsystem_vendor = pdev->subsystem_vendor;
3247        ar->id.subsystem_device = pdev->subsystem_device;
3248
3249        setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
3250                    (unsigned long)ar);
3251
3252        ret = ath10k_pci_setup_resource(ar);
3253        if (ret) {
3254                ath10k_err(ar, "failed to setup resource: %d\n", ret);
3255                goto err_core_destroy;
3256        }
3257
3258        ret = ath10k_pci_claim(ar);
3259        if (ret) {
3260                ath10k_err(ar, "failed to claim device: %d\n", ret);
3261                goto err_free_pipes;
3262        }
3263
3264        ret = ath10k_pci_force_wake(ar);
3265        if (ret) {
3266                ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3267                goto err_sleep;
3268        }
3269
3270        ath10k_pci_ce_deinit(ar);
3271        ath10k_pci_irq_disable(ar);
3272
3273        ret = ath10k_pci_init_irq(ar);
3274        if (ret) {
3275                ath10k_err(ar, "failed to init irqs: %d\n", ret);
3276                goto err_sleep;
3277        }
3278
3279        ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3280                    ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3281                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3282
3283        ret = ath10k_pci_request_irq(ar);
3284        if (ret) {
3285                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3286                goto err_deinit_irq;
3287        }
3288
3289        ret = ath10k_pci_chip_reset(ar);
3290        if (ret) {
3291                ath10k_err(ar, "failed to reset chip: %d\n", ret);
3292                goto err_free_irq;
3293        }
3294
3295        chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3296        if (chip_id == 0xffffffff) {
3297                ath10k_err(ar, "failed to get chip id\n");
3298                goto err_free_irq;
3299        }
3300
3301        if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3302                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3303                           pdev->device, chip_id);
3304                goto err_free_irq;
3305        }
3306
3307        ret = ath10k_core_register(ar, chip_id);
3308        if (ret) {
3309                ath10k_err(ar, "failed to register driver core: %d\n", ret);
3310                goto err_free_irq;
3311        }
3312
3313        return 0;
3314
3315err_free_irq:
3316        ath10k_pci_free_irq(ar);
3317        ath10k_pci_rx_retry_sync(ar);
3318
3319err_deinit_irq:
3320        ath10k_pci_deinit_irq(ar);
3321
3322err_sleep:
3323        ath10k_pci_sleep_sync(ar);
3324        ath10k_pci_release(ar);
3325
3326err_free_pipes:
3327        ath10k_pci_free_pipes(ar);
3328
3329err_core_destroy:
3330        ath10k_core_destroy(ar);
3331
3332        return ret;
3333}
3334
3335static void ath10k_pci_remove(struct pci_dev *pdev)
3336{
3337        struct ath10k *ar = pci_get_drvdata(pdev);
3338        struct ath10k_pci *ar_pci;
3339
3340        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3341
3342        if (!ar)
3343                return;
3344
3345        ar_pci = ath10k_pci_priv(ar);
3346
3347        if (!ar_pci)
3348                return;
3349
3350        ath10k_core_unregister(ar);
3351        ath10k_pci_free_irq(ar);
3352        ath10k_pci_deinit_irq(ar);
3353        ath10k_pci_release_resource(ar);
3354        ath10k_pci_sleep_sync(ar);
3355        ath10k_pci_release(ar);
3356        ath10k_core_destroy(ar);
3357}
3358
3359MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3360
3361static struct pci_driver ath10k_pci_driver = {
3362        .name = "ath10k_pci",
3363        .id_table = ath10k_pci_id_table,
3364        .probe = ath10k_pci_probe,
3365        .remove = ath10k_pci_remove,
3366};
3367
3368static int __init ath10k_pci_init(void)
3369{
3370        int ret;
3371
3372        ret = pci_register_driver(&ath10k_pci_driver);
3373        if (ret)
3374                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3375                       ret);
3376
3377        ret = ath10k_ahb_init();
3378        if (ret)
3379                printk(KERN_ERR "ahb init failed: %d\n", ret);
3380
3381        return ret;
3382}
3383module_init(ath10k_pci_init);
3384
3385static void __exit ath10k_pci_exit(void)
3386{
3387        pci_unregister_driver(&ath10k_pci_driver);
3388        ath10k_ahb_exit();
3389}
3390
3391module_exit(ath10k_pci_exit);
3392
3393MODULE_AUTHOR("Qualcomm Atheros");
3394MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
3395MODULE_LICENSE("Dual BSD/GPL");
3396
3397/* QCA988x 2.0 firmware files */
3398MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3399MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3400MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3401MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3402MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3403MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3404
3405/* QCA9887 1.0 firmware files */
3406MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3407MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
3408MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3409
3410/* QCA6174 2.1 firmware files */
3411MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3412MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3413MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3414MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3415
3416/* QCA6174 3.1 firmware files */
3417MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3418MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3419MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3420MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3421
3422/* QCA9377 1.0 firmware files */
3423MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3424MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3425
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.