linux/drivers/net/wireless/ath/ath10k/pci.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2005-2011 Atheros Communications Inc.
   3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
   4 *
   5 * Permission to use, copy, modify, and/or distribute this software for any
   6 * purpose with or without fee is hereby granted, provided that the above
   7 * copyright notice and this permission notice appear in all copies.
   8 *
   9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16 */
  17
  18#include <linux/pci.h>
  19#include <linux/module.h>
  20#include <linux/interrupt.h>
  21#include <linux/spinlock.h>
  22#include <linux/bitops.h>
  23
  24#include "core.h"
  25#include "debug.h"
  26
  27#include "targaddrs.h"
  28#include "bmi.h"
  29
  30#include "hif.h"
  31#include "htc.h"
  32
  33#include "ce.h"
  34#include "pci.h"
  35
  36enum ath10k_pci_reset_mode {
  37        ATH10K_PCI_RESET_AUTO = 0,
  38        ATH10K_PCI_RESET_WARM_ONLY = 1,
  39};
  40
  41static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
  42static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
  43
  44module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
  45MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
  46
  47module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
  48MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
  49
  50/* how long wait to wait for target to initialise, in ms */
  51#define ATH10K_PCI_TARGET_WAIT 3000
  52#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
  53
  54static const struct pci_device_id ath10k_pci_id_table[] = {
  55        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
  56        { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
  57        { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
  58        { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
  59        { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
  60        {0}
  61};
  62
  63static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
  64        /* QCA988X pre 2.0 chips are not supported because they need some nasty
  65         * hacks. ath10k doesn't have them and these devices crash horribly
  66         * because of that.
  67         */
  68        { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
  69
  70        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  71        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  72        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  73        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  74        { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  75
  76        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
  77        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
  78        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
  79        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
  80        { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
  81
  82        { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
  83
  84        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
  85        { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
  86};
  87
  88static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
  89static int ath10k_pci_cold_reset(struct ath10k *ar);
  90static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
  91static int ath10k_pci_init_irq(struct ath10k *ar);
  92static int ath10k_pci_deinit_irq(struct ath10k *ar);
  93static int ath10k_pci_request_irq(struct ath10k *ar);
  94static void ath10k_pci_free_irq(struct ath10k *ar);
  95static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
  96                               struct ath10k_ce_pipe *rx_pipe,
  97                               struct bmi_xfer *xfer);
  98static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
  99static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
 100static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 101static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
 102static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
 103static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
 104static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
 105
 106static struct ce_attr host_ce_config_wlan[] = {
 107        /* CE0: host->target HTC control and raw streams */
 108        {
 109                .flags = CE_ATTR_FLAGS,
 110                .src_nentries = 16,
 111                .src_sz_max = 256,
 112                .dest_nentries = 0,
 113                .send_cb = ath10k_pci_htc_tx_cb,
 114        },
 115
 116        /* CE1: target->host HTT + HTC control */
 117        {
 118                .flags = CE_ATTR_FLAGS,
 119                .src_nentries = 0,
 120                .src_sz_max = 2048,
 121                .dest_nentries = 512,
 122                .recv_cb = ath10k_pci_htt_htc_rx_cb,
 123        },
 124
 125        /* CE2: target->host WMI */
 126        {
 127                .flags = CE_ATTR_FLAGS,
 128                .src_nentries = 0,
 129                .src_sz_max = 2048,
 130                .dest_nentries = 128,
 131                .recv_cb = ath10k_pci_htc_rx_cb,
 132        },
 133
 134        /* CE3: host->target WMI */
 135        {
 136                .flags = CE_ATTR_FLAGS,
 137                .src_nentries = 32,
 138                .src_sz_max = 2048,
 139                .dest_nentries = 0,
 140                .send_cb = ath10k_pci_htc_tx_cb,
 141        },
 142
 143        /* CE4: host->target HTT */
 144        {
 145                .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
 146                .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
 147                .src_sz_max = 256,
 148                .dest_nentries = 0,
 149                .send_cb = ath10k_pci_htt_tx_cb,
 150        },
 151
 152        /* CE5: target->host HTT (HIF->HTT) */
 153        {
 154                .flags = CE_ATTR_FLAGS,
 155                .src_nentries = 0,
 156                .src_sz_max = 512,
 157                .dest_nentries = 512,
 158                .recv_cb = ath10k_pci_htt_rx_cb,
 159        },
 160
 161        /* CE6: target autonomous hif_memcpy */
 162        {
 163                .flags = CE_ATTR_FLAGS,
 164                .src_nentries = 0,
 165                .src_sz_max = 0,
 166                .dest_nentries = 0,
 167        },
 168
 169        /* CE7: ce_diag, the Diagnostic Window */
 170        {
 171                .flags = CE_ATTR_FLAGS,
 172                .src_nentries = 2,
 173                .src_sz_max = DIAG_TRANSFER_LIMIT,
 174                .dest_nentries = 2,
 175        },
 176
 177        /* CE8: target->host pktlog */
 178        {
 179                .flags = CE_ATTR_FLAGS,
 180                .src_nentries = 0,
 181                .src_sz_max = 2048,
 182                .dest_nentries = 128,
 183                .recv_cb = ath10k_pci_pktlog_rx_cb,
 184        },
 185
 186        /* CE9 target autonomous qcache memcpy */
 187        {
 188                .flags = CE_ATTR_FLAGS,
 189                .src_nentries = 0,
 190                .src_sz_max = 0,
 191                .dest_nentries = 0,
 192        },
 193
 194        /* CE10: target autonomous hif memcpy */
 195        {
 196                .flags = CE_ATTR_FLAGS,
 197                .src_nentries = 0,
 198                .src_sz_max = 0,
 199                .dest_nentries = 0,
 200        },
 201
 202        /* CE11: target autonomous hif memcpy */
 203        {
 204                .flags = CE_ATTR_FLAGS,
 205                .src_nentries = 0,
 206                .src_sz_max = 0,
 207                .dest_nentries = 0,
 208        },
 209};
 210
 211/* Target firmware's Copy Engine configuration. */
 212static struct ce_pipe_config target_ce_config_wlan[] = {
 213        /* CE0: host->target HTC control and raw streams */
 214        {
 215                .pipenum = __cpu_to_le32(0),
 216                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 217                .nentries = __cpu_to_le32(32),
 218                .nbytes_max = __cpu_to_le32(256),
 219                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 220                .reserved = __cpu_to_le32(0),
 221        },
 222
 223        /* CE1: target->host HTT + HTC control */
 224        {
 225                .pipenum = __cpu_to_le32(1),
 226                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 227                .nentries = __cpu_to_le32(32),
 228                .nbytes_max = __cpu_to_le32(2048),
 229                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 230                .reserved = __cpu_to_le32(0),
 231        },
 232
 233        /* CE2: target->host WMI */
 234        {
 235                .pipenum = __cpu_to_le32(2),
 236                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 237                .nentries = __cpu_to_le32(64),
 238                .nbytes_max = __cpu_to_le32(2048),
 239                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 240                .reserved = __cpu_to_le32(0),
 241        },
 242
 243        /* CE3: host->target WMI */
 244        {
 245                .pipenum = __cpu_to_le32(3),
 246                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 247                .nentries = __cpu_to_le32(32),
 248                .nbytes_max = __cpu_to_le32(2048),
 249                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 250                .reserved = __cpu_to_le32(0),
 251        },
 252
 253        /* CE4: host->target HTT */
 254        {
 255                .pipenum = __cpu_to_le32(4),
 256                .pipedir = __cpu_to_le32(PIPEDIR_OUT),
 257                .nentries = __cpu_to_le32(256),
 258                .nbytes_max = __cpu_to_le32(256),
 259                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 260                .reserved = __cpu_to_le32(0),
 261        },
 262
 263        /* NB: 50% of src nentries, since tx has 2 frags */
 264
 265        /* CE5: target->host HTT (HIF->HTT) */
 266        {
 267                .pipenum = __cpu_to_le32(5),
 268                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 269                .nentries = __cpu_to_le32(32),
 270                .nbytes_max = __cpu_to_le32(512),
 271                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 272                .reserved = __cpu_to_le32(0),
 273        },
 274
 275        /* CE6: Reserved for target autonomous hif_memcpy */
 276        {
 277                .pipenum = __cpu_to_le32(6),
 278                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 279                .nentries = __cpu_to_le32(32),
 280                .nbytes_max = __cpu_to_le32(4096),
 281                .flags = __cpu_to_le32(CE_ATTR_FLAGS),
 282                .reserved = __cpu_to_le32(0),
 283        },
 284
 285        /* CE7 used only by Host */
 286        {
 287                .pipenum = __cpu_to_le32(7),
 288                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 289                .nentries = __cpu_to_le32(0),
 290                .nbytes_max = __cpu_to_le32(0),
 291                .flags = __cpu_to_le32(0),
 292                .reserved = __cpu_to_le32(0),
 293        },
 294
 295        /* CE8 target->host packtlog */
 296        {
 297                .pipenum = __cpu_to_le32(8),
 298                .pipedir = __cpu_to_le32(PIPEDIR_IN),
 299                .nentries = __cpu_to_le32(64),
 300                .nbytes_max = __cpu_to_le32(2048),
 301                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 302                .reserved = __cpu_to_le32(0),
 303        },
 304
 305        /* CE9 target autonomous qcache memcpy */
 306        {
 307                .pipenum = __cpu_to_le32(9),
 308                .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
 309                .nentries = __cpu_to_le32(32),
 310                .nbytes_max = __cpu_to_le32(2048),
 311                .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
 312                .reserved = __cpu_to_le32(0),
 313        },
 314
 315        /* It not necessary to send target wlan configuration for CE10 & CE11
 316         * as these CEs are not actively used in target.
 317         */
 318};
 319
 320/*
 321 * Map from service/endpoint to Copy Engine.
 322 * This table is derived from the CE_PCI TABLE, above.
 323 * It is passed to the Target at startup for use by firmware.
 324 */
 325static struct service_to_pipe target_service_to_ce_map_wlan[] = {
 326        {
 327                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 328                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 329                __cpu_to_le32(3),
 330        },
 331        {
 332                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
 333                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 334                __cpu_to_le32(2),
 335        },
 336        {
 337                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 338                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 339                __cpu_to_le32(3),
 340        },
 341        {
 342                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
 343                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 344                __cpu_to_le32(2),
 345        },
 346        {
 347                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 348                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 349                __cpu_to_le32(3),
 350        },
 351        {
 352                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
 353                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 354                __cpu_to_le32(2),
 355        },
 356        {
 357                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 358                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 359                __cpu_to_le32(3),
 360        },
 361        {
 362                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
 363                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 364                __cpu_to_le32(2),
 365        },
 366        {
 367                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 368                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 369                __cpu_to_le32(3),
 370        },
 371        {
 372                __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
 373                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 374                __cpu_to_le32(2),
 375        },
 376        {
 377                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 378                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 379                __cpu_to_le32(0),
 380        },
 381        {
 382                __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
 383                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 384                __cpu_to_le32(1),
 385        },
 386        { /* not used */
 387                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 388                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 389                __cpu_to_le32(0),
 390        },
 391        { /* not used */
 392                __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
 393                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 394                __cpu_to_le32(1),
 395        },
 396        {
 397                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 398                __cpu_to_le32(PIPEDIR_OUT),     /* out = UL = host -> target */
 399                __cpu_to_le32(4),
 400        },
 401        {
 402                __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
 403                __cpu_to_le32(PIPEDIR_IN),      /* in = DL = target -> host */
 404                __cpu_to_le32(5),
 405        },
 406
 407        /* (Additions here) */
 408
 409        { /* must be last */
 410                __cpu_to_le32(0),
 411                __cpu_to_le32(0),
 412                __cpu_to_le32(0),
 413        },
 414};
 415
 416static bool ath10k_pci_is_awake(struct ath10k *ar)
 417{
 418        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 419        u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 420                           RTC_STATE_ADDRESS);
 421
 422        return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
 423}
 424
 425static void __ath10k_pci_wake(struct ath10k *ar)
 426{
 427        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 428
 429        lockdep_assert_held(&ar_pci->ps_lock);
 430
 431        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
 432                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 433
 434        iowrite32(PCIE_SOC_WAKE_V_MASK,
 435                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 436                  PCIE_SOC_WAKE_ADDRESS);
 437}
 438
 439static void __ath10k_pci_sleep(struct ath10k *ar)
 440{
 441        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 442
 443        lockdep_assert_held(&ar_pci->ps_lock);
 444
 445        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
 446                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 447
 448        iowrite32(PCIE_SOC_WAKE_RESET,
 449                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 450                  PCIE_SOC_WAKE_ADDRESS);
 451        ar_pci->ps_awake = false;
 452}
 453
 454static int ath10k_pci_wake_wait(struct ath10k *ar)
 455{
 456        int tot_delay = 0;
 457        int curr_delay = 5;
 458
 459        while (tot_delay < PCIE_WAKE_TIMEOUT) {
 460                if (ath10k_pci_is_awake(ar)) {
 461                        if (tot_delay > PCIE_WAKE_LATE_US)
 462                                ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n",
 463                                            tot_delay / 1000);
 464                        return 0;
 465                }
 466
 467                udelay(curr_delay);
 468                tot_delay += curr_delay;
 469
 470                if (curr_delay < 50)
 471                        curr_delay += 5;
 472        }
 473
 474        return -ETIMEDOUT;
 475}
 476
 477static int ath10k_pci_force_wake(struct ath10k *ar)
 478{
 479        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 480        unsigned long flags;
 481        int ret = 0;
 482
 483        if (ar_pci->pci_ps)
 484                return ret;
 485
 486        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 487
 488        if (!ar_pci->ps_awake) {
 489                iowrite32(PCIE_SOC_WAKE_V_MASK,
 490                          ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 491                          PCIE_SOC_WAKE_ADDRESS);
 492
 493                ret = ath10k_pci_wake_wait(ar);
 494                if (ret == 0)
 495                        ar_pci->ps_awake = true;
 496        }
 497
 498        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 499
 500        return ret;
 501}
 502
 503static void ath10k_pci_force_sleep(struct ath10k *ar)
 504{
 505        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 506        unsigned long flags;
 507
 508        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 509
 510        iowrite32(PCIE_SOC_WAKE_RESET,
 511                  ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
 512                  PCIE_SOC_WAKE_ADDRESS);
 513        ar_pci->ps_awake = false;
 514
 515        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 516}
 517
 518static int ath10k_pci_wake(struct ath10k *ar)
 519{
 520        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 521        unsigned long flags;
 522        int ret = 0;
 523
 524        if (ar_pci->pci_ps == 0)
 525                return ret;
 526
 527        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 528
 529        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
 530                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 531
 532        /* This function can be called very frequently. To avoid excessive
 533         * CPU stalls for MMIO reads use a cache var to hold the device state.
 534         */
 535        if (!ar_pci->ps_awake) {
 536                __ath10k_pci_wake(ar);
 537
 538                ret = ath10k_pci_wake_wait(ar);
 539                if (ret == 0)
 540                        ar_pci->ps_awake = true;
 541        }
 542
 543        if (ret == 0) {
 544                ar_pci->ps_wake_refcount++;
 545                WARN_ON(ar_pci->ps_wake_refcount == 0);
 546        }
 547
 548        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 549
 550        return ret;
 551}
 552
 553static void ath10k_pci_sleep(struct ath10k *ar)
 554{
 555        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 556        unsigned long flags;
 557
 558        if (ar_pci->pci_ps == 0)
 559                return;
 560
 561        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 562
 563        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
 564                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 565
 566        if (WARN_ON(ar_pci->ps_wake_refcount == 0))
 567                goto skip;
 568
 569        ar_pci->ps_wake_refcount--;
 570
 571        mod_timer(&ar_pci->ps_timer, jiffies +
 572                  msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
 573
 574skip:
 575        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 576}
 577
 578static void ath10k_pci_ps_timer(unsigned long ptr)
 579{
 580        struct ath10k *ar = (void *)ptr;
 581        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 582        unsigned long flags;
 583
 584        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 585
 586        ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
 587                   ar_pci->ps_wake_refcount, ar_pci->ps_awake);
 588
 589        if (ar_pci->ps_wake_refcount > 0)
 590                goto skip;
 591
 592        __ath10k_pci_sleep(ar);
 593
 594skip:
 595        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 596}
 597
 598static void ath10k_pci_sleep_sync(struct ath10k *ar)
 599{
 600        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 601        unsigned long flags;
 602
 603        if (ar_pci->pci_ps == 0) {
 604                ath10k_pci_force_sleep(ar);
 605                return;
 606        }
 607
 608        del_timer_sync(&ar_pci->ps_timer);
 609
 610        spin_lock_irqsave(&ar_pci->ps_lock, flags);
 611        WARN_ON(ar_pci->ps_wake_refcount > 0);
 612        __ath10k_pci_sleep(ar);
 613        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
 614}
 615
 616static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 617{
 618        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 619        int ret;
 620
 621        if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
 622                ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 623                            offset, offset + sizeof(value), ar_pci->mem_len);
 624                return;
 625        }
 626
 627        ret = ath10k_pci_wake(ar);
 628        if (ret) {
 629                ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
 630                            value, offset, ret);
 631                return;
 632        }
 633
 634        iowrite32(value, ar_pci->mem + offset);
 635        ath10k_pci_sleep(ar);
 636}
 637
 638static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
 639{
 640        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 641        u32 val;
 642        int ret;
 643
 644        if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
 645                ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
 646                            offset, offset + sizeof(val), ar_pci->mem_len);
 647                return 0;
 648        }
 649
 650        ret = ath10k_pci_wake(ar);
 651        if (ret) {
 652                ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
 653                            offset, ret);
 654                return 0xffffffff;
 655        }
 656
 657        val = ioread32(ar_pci->mem + offset);
 658        ath10k_pci_sleep(ar);
 659
 660        return val;
 661}
 662
 663inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
 664{
 665        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 666
 667        ar_pci->bus_ops->write32(ar, offset, value);
 668}
 669
 670inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
 671{
 672        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 673
 674        return ar_pci->bus_ops->read32(ar, offset);
 675}
 676
 677u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
 678{
 679        return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
 680}
 681
 682void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
 683{
 684        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
 685}
 686
 687u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 688{
 689        return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
 690}
 691
 692void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 693{
 694        ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
 695}
 696
 697bool ath10k_pci_irq_pending(struct ath10k *ar)
 698{
 699        u32 cause;
 700
 701        /* Check if the shared legacy irq is for us */
 702        cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 703                                  PCIE_INTR_CAUSE_ADDRESS);
 704        if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
 705                return true;
 706
 707        return false;
 708}
 709
 710void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
 711{
 712        /* IMPORTANT: INTR_CLR register has to be set after
 713         * INTR_ENABLE is set to 0, otherwise interrupt can not be
 714         * really cleared. */
 715        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
 716                           0);
 717        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
 718                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 719
 720        /* IMPORTANT: this extra read transaction is required to
 721         * flush the posted write buffer. */
 722        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 723                                PCIE_INTR_ENABLE_ADDRESS);
 724}
 725
 726void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
 727{
 728        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
 729                           PCIE_INTR_ENABLE_ADDRESS,
 730                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
 731
 732        /* IMPORTANT: this extra read transaction is required to
 733         * flush the posted write buffer. */
 734        (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 735                                PCIE_INTR_ENABLE_ADDRESS);
 736}
 737
 738static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
 739{
 740        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 741
 742        if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
 743                return "msi";
 744
 745        return "legacy";
 746}
 747
 748static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
 749{
 750        struct ath10k *ar = pipe->hif_ce_state;
 751        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 752        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 753        struct sk_buff *skb;
 754        dma_addr_t paddr;
 755        int ret;
 756
 757        skb = dev_alloc_skb(pipe->buf_sz);
 758        if (!skb)
 759                return -ENOMEM;
 760
 761        WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
 762
 763        paddr = dma_map_single(ar->dev, skb->data,
 764                               skb->len + skb_tailroom(skb),
 765                               DMA_FROM_DEVICE);
 766        if (unlikely(dma_mapping_error(ar->dev, paddr))) {
 767                ath10k_warn(ar, "failed to dma map pci rx buf\n");
 768                dev_kfree_skb_any(skb);
 769                return -EIO;
 770        }
 771
 772        ATH10K_SKB_RXCB(skb)->paddr = paddr;
 773
 774        spin_lock_bh(&ar_pci->ce_lock);
 775        ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
 776        spin_unlock_bh(&ar_pci->ce_lock);
 777        if (ret) {
 778                dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
 779                                 DMA_FROM_DEVICE);
 780                dev_kfree_skb_any(skb);
 781                return ret;
 782        }
 783
 784        return 0;
 785}
 786
 787static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
 788{
 789        struct ath10k *ar = pipe->hif_ce_state;
 790        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 791        struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
 792        int ret, num;
 793
 794        if (pipe->buf_sz == 0)
 795                return;
 796
 797        if (!ce_pipe->dest_ring)
 798                return;
 799
 800        spin_lock_bh(&ar_pci->ce_lock);
 801        num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
 802        spin_unlock_bh(&ar_pci->ce_lock);
 803
 804        while (num >= 0) {
 805                ret = __ath10k_pci_rx_post_buf(pipe);
 806                if (ret) {
 807                        if (ret == -ENOSPC)
 808                                break;
 809                        ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
 810                        mod_timer(&ar_pci->rx_post_retry, jiffies +
 811                                  ATH10K_PCI_RX_POST_RETRY_MS);
 812                        break;
 813                }
 814                num--;
 815        }
 816}
 817
 818void ath10k_pci_rx_post(struct ath10k *ar)
 819{
 820        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 821        int i;
 822
 823        for (i = 0; i < CE_COUNT; i++)
 824                ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
 825}
 826
 827void ath10k_pci_rx_replenish_retry(unsigned long ptr)
 828{
 829        struct ath10k *ar = (void *)ptr;
 830
 831        ath10k_pci_rx_post(ar);
 832}
 833
 834static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 835{
 836        u32 val = 0;
 837
 838        switch (ar->hw_rev) {
 839        case ATH10K_HW_QCA988X:
 840        case ATH10K_HW_QCA6174:
 841        case ATH10K_HW_QCA9377:
 842                val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 843                                          CORE_CTRL_ADDRESS) &
 844                       0x7ff) << 21;
 845                break;
 846        case ATH10K_HW_QCA99X0:
 847        case ATH10K_HW_QCA4019:
 848                val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
 849                break;
 850        }
 851
 852        val |= 0x100000 | (addr & 0xfffff);
 853        return val;
 854}
 855
 856/*
 857 * Diagnostic read/write access is provided for startup/config/debug usage.
 858 * Caller must guarantee proper alignment, when applicable, and single user
 859 * at any moment.
 860 */
 861static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
 862                                    int nbytes)
 863{
 864        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 865        int ret = 0;
 866        u32 *buf;
 867        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
 868        struct ath10k_ce_pipe *ce_diag;
 869        /* Host buffer address in CE space */
 870        u32 ce_data;
 871        dma_addr_t ce_data_base = 0;
 872        void *data_buf = NULL;
 873        int i;
 874
 875        spin_lock_bh(&ar_pci->ce_lock);
 876
 877        ce_diag = ar_pci->ce_diag;
 878
 879        /*
 880         * Allocate a temporary bounce buffer to hold caller's data
 881         * to be DMA'ed from Target. This guarantees
 882         *   1) 4-byte alignment
 883         *   2) Buffer in DMA-able space
 884         */
 885        orig_nbytes = nbytes;
 886        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
 887                                                       orig_nbytes,
 888                                                       &ce_data_base,
 889                                                       GFP_ATOMIC);
 890
 891        if (!data_buf) {
 892                ret = -ENOMEM;
 893                goto done;
 894        }
 895        memset(data_buf, 0, orig_nbytes);
 896
 897        remaining_bytes = orig_nbytes;
 898        ce_data = ce_data_base;
 899        while (remaining_bytes) {
 900                nbytes = min_t(unsigned int, remaining_bytes,
 901                               DIAG_TRANSFER_LIMIT);
 902
 903                ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
 904                if (ret != 0)
 905                        goto done;
 906
 907                /* Request CE to send from Target(!) address to Host buffer */
 908                /*
 909                 * The address supplied by the caller is in the
 910                 * Target CPU virtual address space.
 911                 *
 912                 * In order to use this address with the diagnostic CE,
 913                 * convert it from Target CPU virtual address space
 914                 * to CE address space
 915                 */
 916                address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 917
 918                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 919                                            0);
 920                if (ret)
 921                        goto done;
 922
 923                i = 0;
 924                while (ath10k_ce_completed_send_next_nolock(ce_diag,
 925                                                            NULL) != 0) {
 926                        mdelay(1);
 927                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 928                                ret = -EBUSY;
 929                                goto done;
 930                        }
 931                }
 932
 933                i = 0;
 934                while (ath10k_ce_completed_recv_next_nolock(ce_diag,
 935                                                            (void **)&buf,
 936                                                            &completed_nbytes)
 937                                                                != 0) {
 938                        mdelay(1);
 939
 940                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
 941                                ret = -EBUSY;
 942                                goto done;
 943                        }
 944                }
 945
 946                if (nbytes != completed_nbytes) {
 947                        ret = -EIO;
 948                        goto done;
 949                }
 950
 951                if (*buf != ce_data) {
 952                        ret = -EIO;
 953                        goto done;
 954                }
 955
 956                remaining_bytes -= nbytes;
 957                address += nbytes;
 958                ce_data += nbytes;
 959        }
 960
 961done:
 962        if (ret == 0)
 963                memcpy(data, data_buf, orig_nbytes);
 964        else
 965                ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
 966                            address, ret);
 967
 968        if (data_buf)
 969                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
 970                                  ce_data_base);
 971
 972        spin_unlock_bh(&ar_pci->ce_lock);
 973
 974        return ret;
 975}
 976
 977static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
 978{
 979        __le32 val = 0;
 980        int ret;
 981
 982        ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
 983        *value = __le32_to_cpu(val);
 984
 985        return ret;
 986}
 987
 988static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
 989                                     u32 src, u32 len)
 990{
 991        u32 host_addr, addr;
 992        int ret;
 993
 994        host_addr = host_interest_item_address(src);
 995
 996        ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
 997        if (ret != 0) {
 998                ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
 999                            src, ret);
1000                return ret;
1001        }
1002
1003        ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
1004        if (ret != 0) {
1005                ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
1006                            addr, len, ret);
1007                return ret;
1008        }
1009
1010        return 0;
1011}
1012
1013#define ath10k_pci_diag_read_hi(ar, dest, src, len)             \
1014        __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
1015
1016int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
1017                              const void *data, int nbytes)
1018{
1019        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1020        int ret = 0;
1021        u32 *buf;
1022        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
1023        struct ath10k_ce_pipe *ce_diag;
1024        void *data_buf = NULL;
1025        u32 ce_data;    /* Host buffer address in CE space */
1026        dma_addr_t ce_data_base = 0;
1027        int i;
1028
1029        spin_lock_bh(&ar_pci->ce_lock);
1030
1031        ce_diag = ar_pci->ce_diag;
1032
1033        /*
1034         * Allocate a temporary bounce buffer to hold caller's data
1035         * to be DMA'ed to Target. This guarantees
1036         *   1) 4-byte alignment
1037         *   2) Buffer in DMA-able space
1038         */
1039        orig_nbytes = nbytes;
1040        data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
1041                                                       orig_nbytes,
1042                                                       &ce_data_base,
1043                                                       GFP_ATOMIC);
1044        if (!data_buf) {
1045                ret = -ENOMEM;
1046                goto done;
1047        }
1048
1049        /* Copy caller's data to allocated DMA buf */
1050        memcpy(data_buf, data, orig_nbytes);
1051
1052        /*
1053         * The address supplied by the caller is in the
1054         * Target CPU virtual address space.
1055         *
1056         * In order to use this address with the diagnostic CE,
1057         * convert it from
1058         *    Target CPU virtual address space
1059         * to
1060         *    CE address space
1061         */
1062        address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
1063
1064        remaining_bytes = orig_nbytes;
1065        ce_data = ce_data_base;
1066        while (remaining_bytes) {
1067                /* FIXME: check cast */
1068                nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
1069
1070                /* Set up to receive directly into Target(!) address */
1071                ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
1072                if (ret != 0)
1073                        goto done;
1074
1075                /*
1076                 * Request CE to send caller-supplied data that
1077                 * was copied to bounce buffer to Target(!) address.
1078                 */
1079                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
1080                                            nbytes, 0, 0);
1081                if (ret != 0)
1082                        goto done;
1083
1084                i = 0;
1085                while (ath10k_ce_completed_send_next_nolock(ce_diag,
1086                                                            NULL) != 0) {
1087                        mdelay(1);
1088
1089                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1090                                ret = -EBUSY;
1091                                goto done;
1092                        }
1093                }
1094
1095                i = 0;
1096                while (ath10k_ce_completed_recv_next_nolock(ce_diag,
1097                                                            (void **)&buf,
1098                                                            &completed_nbytes)
1099                                                                != 0) {
1100                        mdelay(1);
1101
1102                        if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
1103                                ret = -EBUSY;
1104                                goto done;
1105                        }
1106                }
1107
1108                if (nbytes != completed_nbytes) {
1109                        ret = -EIO;
1110                        goto done;
1111                }
1112
1113                if (*buf != address) {
1114                        ret = -EIO;
1115                        goto done;
1116                }
1117
1118                remaining_bytes -= nbytes;
1119                address += nbytes;
1120                ce_data += nbytes;
1121        }
1122
1123done:
1124        if (data_buf) {
1125                dma_free_coherent(ar->dev, orig_nbytes, data_buf,
1126                                  ce_data_base);
1127        }
1128
1129        if (ret != 0)
1130                ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
1131                            address, ret);
1132
1133        spin_unlock_bh(&ar_pci->ce_lock);
1134
1135        return ret;
1136}
1137
1138static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
1139{
1140        __le32 val = __cpu_to_le32(value);
1141
1142        return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
1143}
1144
1145/* Called by lower (CE) layer when a send to Target completes. */
1146static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
1147{
1148        struct ath10k *ar = ce_state->ar;
1149        struct sk_buff_head list;
1150        struct sk_buff *skb;
1151
1152        __skb_queue_head_init(&list);
1153        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1154                /* no need to call tx completion for NULL pointers */
1155                if (skb == NULL)
1156                        continue;
1157
1158                __skb_queue_tail(&list, skb);
1159        }
1160
1161        while ((skb = __skb_dequeue(&list)))
1162                ath10k_htc_tx_completion_handler(ar, skb);
1163}
1164
1165static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
1166                                     void (*callback)(struct ath10k *ar,
1167                                                      struct sk_buff *skb))
1168{
1169        struct ath10k *ar = ce_state->ar;
1170        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1171        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1172        struct sk_buff *skb;
1173        struct sk_buff_head list;
1174        void *transfer_context;
1175        unsigned int nbytes, max_nbytes;
1176
1177        __skb_queue_head_init(&list);
1178        while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
1179                                             &nbytes) == 0) {
1180                skb = transfer_context;
1181                max_nbytes = skb->len + skb_tailroom(skb);
1182                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1183                                 max_nbytes, DMA_FROM_DEVICE);
1184
1185                if (unlikely(max_nbytes < nbytes)) {
1186                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1187                                    nbytes, max_nbytes);
1188                        dev_kfree_skb_any(skb);
1189                        continue;
1190                }
1191
1192                skb_put(skb, nbytes);
1193                __skb_queue_tail(&list, skb);
1194        }
1195
1196        while ((skb = __skb_dequeue(&list))) {
1197                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1198                           ce_state->id, skb->len);
1199                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1200                                skb->data, skb->len);
1201
1202                callback(ar, skb);
1203        }
1204
1205        ath10k_pci_rx_post_pipe(pipe_info);
1206}
1207
1208static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
1209                                         void (*callback)(struct ath10k *ar,
1210                                                          struct sk_buff *skb))
1211{
1212        struct ath10k *ar = ce_state->ar;
1213        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1214        struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
1215        struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
1216        struct sk_buff *skb;
1217        struct sk_buff_head list;
1218        void *transfer_context;
1219        unsigned int nbytes, max_nbytes, nentries;
1220        int orig_len;
1221
1222        /* No need to aquire ce_lock for CE5, since this is the only place CE5
1223         * is processed other than init and deinit. Before releasing CE5
1224         * buffers, interrupts are disabled. Thus CE5 access is serialized.
1225         */
1226        __skb_queue_head_init(&list);
1227        while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
1228                                                    &nbytes) == 0) {
1229                skb = transfer_context;
1230                max_nbytes = skb->len + skb_tailroom(skb);
1231
1232                if (unlikely(max_nbytes < nbytes)) {
1233                        ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
1234                                    nbytes, max_nbytes);
1235                        continue;
1236                }
1237
1238                dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1239                                        max_nbytes, DMA_FROM_DEVICE);
1240                skb_put(skb, nbytes);
1241                __skb_queue_tail(&list, skb);
1242        }
1243
1244        nentries = skb_queue_len(&list);
1245        while ((skb = __skb_dequeue(&list))) {
1246                ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
1247                           ce_state->id, skb->len);
1248                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
1249                                skb->data, skb->len);
1250
1251                orig_len = skb->len;
1252                callback(ar, skb);
1253                skb_push(skb, orig_len - skb->len);
1254                skb_reset_tail_pointer(skb);
1255                skb_trim(skb, 0);
1256
1257                /*let device gain the buffer again*/
1258                dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1259                                           skb->len + skb_tailroom(skb),
1260                                           DMA_FROM_DEVICE);
1261        }
1262        ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
1263}
1264
1265/* Called by lower (CE) layer when data is received from the Target. */
1266static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1267{
1268        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1269}
1270
1271static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
1272{
1273        /* CE4 polling needs to be done whenever CE pipe which transports
1274         * HTT Rx (target->host) is processed.
1275         */
1276        ath10k_ce_per_engine_service(ce_state->ar, 4);
1277
1278        ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
1279}
1280
1281/* Called by lower (CE) layer when data is received from the Target.
1282 * Only 10.4 firmware uses separate CE to transfer pktlog data.
1283 */
1284static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
1285{
1286        ath10k_pci_process_rx_cb(ce_state,
1287                                 ath10k_htt_rx_pktlog_completion_handler);
1288}
1289
1290/* Called by lower (CE) layer when a send to HTT Target completes. */
1291static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
1292{
1293        struct ath10k *ar = ce_state->ar;
1294        struct sk_buff *skb;
1295
1296        while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
1297                /* no need to call tx completion for NULL pointers */
1298                if (!skb)
1299                        continue;
1300
1301                dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
1302                                 skb->len, DMA_TO_DEVICE);
1303                ath10k_htt_hif_tx_complete(ar, skb);
1304        }
1305}
1306
1307static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
1308{
1309        skb_pull(skb, sizeof(struct ath10k_htc_hdr));
1310        ath10k_htt_t2h_msg_handler(ar, skb);
1311}
1312
1313/* Called by lower (CE) layer when HTT data is received from the Target. */
1314static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
1315{
1316        /* CE4 polling needs to be done whenever CE pipe which transports
1317         * HTT Rx (target->host) is processed.
1318         */
1319        ath10k_ce_per_engine_service(ce_state->ar, 4);
1320
1321        ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
1322}
1323
1324int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
1325                         struct ath10k_hif_sg_item *items, int n_items)
1326{
1327        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1328        struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
1329        struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
1330        struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
1331        unsigned int nentries_mask;
1332        unsigned int sw_index;
1333        unsigned int write_index;
1334        int err, i = 0;
1335
1336        spin_lock_bh(&ar_pci->ce_lock);
1337
1338        nentries_mask = src_ring->nentries_mask;
1339        sw_index = src_ring->sw_index;
1340        write_index = src_ring->write_index;
1341
1342        if (unlikely(CE_RING_DELTA(nentries_mask,
1343                                   write_index, sw_index - 1) < n_items)) {
1344                err = -ENOBUFS;
1345                goto err;
1346        }
1347
1348        for (i = 0; i < n_items - 1; i++) {
1349                ath10k_dbg(ar, ATH10K_DBG_PCI,
1350                           "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1351                           i, items[i].paddr, items[i].len, n_items);
1352                ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1353                                items[i].vaddr, items[i].len);
1354
1355                err = ath10k_ce_send_nolock(ce_pipe,
1356                                            items[i].transfer_context,
1357                                            items[i].paddr,
1358                                            items[i].len,
1359                                            items[i].transfer_id,
1360                                            CE_SEND_FLAG_GATHER);
1361                if (err)
1362                        goto err;
1363        }
1364
1365        /* `i` is equal to `n_items -1` after for() */
1366
1367        ath10k_dbg(ar, ATH10K_DBG_PCI,
1368                   "pci tx item %d paddr 0x%08x len %d n_items %d\n",
1369                   i, items[i].paddr, items[i].len, n_items);
1370        ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
1371                        items[i].vaddr, items[i].len);
1372
1373        err = ath10k_ce_send_nolock(ce_pipe,
1374                                    items[i].transfer_context,
1375                                    items[i].paddr,
1376                                    items[i].len,
1377                                    items[i].transfer_id,
1378                                    0);
1379        if (err)
1380                goto err;
1381
1382        spin_unlock_bh(&ar_pci->ce_lock);
1383        return 0;
1384
1385err:
1386        for (; i > 0; i--)
1387                __ath10k_ce_send_revert(ce_pipe);
1388
1389        spin_unlock_bh(&ar_pci->ce_lock);
1390        return err;
1391}
1392
1393int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
1394                             size_t buf_len)
1395{
1396        return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
1397}
1398
1399u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
1400{
1401        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1402
1403        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
1404
1405        return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
1406}
1407
1408static void ath10k_pci_dump_registers(struct ath10k *ar,
1409                                      struct ath10k_fw_crash_data *crash_data)
1410{
1411        __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
1412        int i, ret;
1413
1414        lockdep_assert_held(&ar->data_lock);
1415
1416        ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
1417                                      hi_failure_state,
1418                                      REG_DUMP_COUNT_QCA988X * sizeof(__le32));
1419        if (ret) {
1420                ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
1421                return;
1422        }
1423
1424        BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
1425
1426        ath10k_err(ar, "firmware register dump:\n");
1427        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
1428                ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
1429                           i,
1430                           __le32_to_cpu(reg_dump_values[i]),
1431                           __le32_to_cpu(reg_dump_values[i + 1]),
1432                           __le32_to_cpu(reg_dump_values[i + 2]),
1433                           __le32_to_cpu(reg_dump_values[i + 3]));
1434
1435        if (!crash_data)
1436                return;
1437
1438        for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
1439                crash_data->registers[i] = reg_dump_values[i];
1440}
1441
1442static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
1443{
1444        struct ath10k_fw_crash_data *crash_data;
1445        char uuid[50];
1446
1447        spin_lock_bh(&ar->data_lock);
1448
1449        ar->stats.fw_crash_counter++;
1450
1451        crash_data = ath10k_debug_get_new_fw_crash_data(ar);
1452
1453        if (crash_data)
1454                scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
1455        else
1456                scnprintf(uuid, sizeof(uuid), "n/a");
1457
1458        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
1459        ath10k_print_driver_info(ar);
1460        ath10k_pci_dump_registers(ar, crash_data);
1461
1462        spin_unlock_bh(&ar->data_lock);
1463
1464        queue_work(ar->workqueue, &ar->restart_work);
1465}
1466
1467void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1468                                        int force)
1469{
1470        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1471
1472        if (!force) {
1473                int resources;
1474                /*
1475                 * Decide whether to actually poll for completions, or just
1476                 * wait for a later chance.
1477                 * If there seem to be plenty of resources left, then just wait
1478                 * since checking involves reading a CE register, which is a
1479                 * relatively expensive operation.
1480                 */
1481                resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1482
1483                /*
1484                 * If at least 50% of the total resources are still available,
1485                 * don't bother checking again yet.
1486                 */
1487                if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1488                        return;
1489        }
1490        ath10k_ce_per_engine_service(ar, pipe);
1491}
1492
1493void ath10k_pci_kill_tasklet(struct ath10k *ar)
1494{
1495        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1496
1497        tasklet_kill(&ar_pci->intr_tq);
1498
1499        del_timer_sync(&ar_pci->rx_post_retry);
1500}
1501
1502int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
1503                                       u8 *ul_pipe, u8 *dl_pipe)
1504{
1505        const struct service_to_pipe *entry;
1506        bool ul_set = false, dl_set = false;
1507        int i;
1508
1509        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1510
1511        for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1512                entry = &target_service_to_ce_map_wlan[i];
1513
1514                if (__le32_to_cpu(entry->service_id) != service_id)
1515                        continue;
1516
1517                switch (__le32_to_cpu(entry->pipedir)) {
1518                case PIPEDIR_NONE:
1519                        break;
1520                case PIPEDIR_IN:
1521                        WARN_ON(dl_set);
1522                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1523                        dl_set = true;
1524                        break;
1525                case PIPEDIR_OUT:
1526                        WARN_ON(ul_set);
1527                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1528                        ul_set = true;
1529                        break;
1530                case PIPEDIR_INOUT:
1531                        WARN_ON(dl_set);
1532                        WARN_ON(ul_set);
1533                        *dl_pipe = __le32_to_cpu(entry->pipenum);
1534                        *ul_pipe = __le32_to_cpu(entry->pipenum);
1535                        dl_set = true;
1536                        ul_set = true;
1537                        break;
1538                }
1539        }
1540
1541        if (WARN_ON(!ul_set || !dl_set))
1542                return -ENOENT;
1543
1544        return 0;
1545}
1546
1547void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1548                                     u8 *ul_pipe, u8 *dl_pipe)
1549{
1550        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1551
1552        (void)ath10k_pci_hif_map_service_to_pipe(ar,
1553                                                 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1554                                                 ul_pipe, dl_pipe);
1555}
1556
1557static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
1558{
1559        u32 val;
1560
1561        switch (ar->hw_rev) {
1562        case ATH10K_HW_QCA988X:
1563        case ATH10K_HW_QCA6174:
1564        case ATH10K_HW_QCA9377:
1565                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1566                                        CORE_CTRL_ADDRESS);
1567                val &= ~CORE_CTRL_PCIE_REG_31_MASK;
1568                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1569                                   CORE_CTRL_ADDRESS, val);
1570                break;
1571        case ATH10K_HW_QCA99X0:
1572        case ATH10K_HW_QCA4019:
1573                /* TODO: Find appropriate register configuration for QCA99X0
1574                 *  to mask irq/MSI.
1575                 */
1576                 break;
1577        }
1578}
1579
1580static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
1581{
1582        u32 val;
1583
1584        switch (ar->hw_rev) {
1585        case ATH10K_HW_QCA988X:
1586        case ATH10K_HW_QCA6174:
1587        case ATH10K_HW_QCA9377:
1588                val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1589                                        CORE_CTRL_ADDRESS);
1590                val |= CORE_CTRL_PCIE_REG_31_MASK;
1591                ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1592                                   CORE_CTRL_ADDRESS, val);
1593                break;
1594        case ATH10K_HW_QCA99X0:
1595        case ATH10K_HW_QCA4019:
1596                /* TODO: Find appropriate register configuration for QCA99X0
1597                 *  to unmask irq/MSI.
1598                 */
1599                break;
1600        }
1601}
1602
1603static void ath10k_pci_irq_disable(struct ath10k *ar)
1604{
1605        ath10k_ce_disable_interrupts(ar);
1606        ath10k_pci_disable_and_clear_legacy_irq(ar);
1607        ath10k_pci_irq_msi_fw_mask(ar);
1608}
1609
1610static void ath10k_pci_irq_sync(struct ath10k *ar)
1611{
1612        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1613
1614        synchronize_irq(ar_pci->pdev->irq);
1615}
1616
1617static void ath10k_pci_irq_enable(struct ath10k *ar)
1618{
1619        ath10k_ce_enable_interrupts(ar);
1620        ath10k_pci_enable_legacy_irq(ar);
1621        ath10k_pci_irq_msi_fw_unmask(ar);
1622}
1623
1624static int ath10k_pci_hif_start(struct ath10k *ar)
1625{
1626        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1627
1628        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1629
1630        ath10k_pci_irq_enable(ar);
1631        ath10k_pci_rx_post(ar);
1632
1633        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
1634                                   ar_pci->link_ctl);
1635
1636        return 0;
1637}
1638
1639static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1640{
1641        struct ath10k *ar;
1642        struct ath10k_ce_pipe *ce_pipe;
1643        struct ath10k_ce_ring *ce_ring;
1644        struct sk_buff *skb;
1645        int i;
1646
1647        ar = pci_pipe->hif_ce_state;
1648        ce_pipe = pci_pipe->ce_hdl;
1649        ce_ring = ce_pipe->dest_ring;
1650
1651        if (!ce_ring)
1652                return;
1653
1654        if (!pci_pipe->buf_sz)
1655                return;
1656
1657        for (i = 0; i < ce_ring->nentries; i++) {
1658                skb = ce_ring->per_transfer_context[i];
1659                if (!skb)
1660                        continue;
1661
1662                ce_ring->per_transfer_context[i] = NULL;
1663
1664                dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
1665                                 skb->len + skb_tailroom(skb),
1666                                 DMA_FROM_DEVICE);
1667                dev_kfree_skb_any(skb);
1668        }
1669}
1670
1671static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
1672{
1673        struct ath10k *ar;
1674        struct ath10k_pci *ar_pci;
1675        struct ath10k_ce_pipe *ce_pipe;
1676        struct ath10k_ce_ring *ce_ring;
1677        struct sk_buff *skb;
1678        int i;
1679
1680        ar = pci_pipe->hif_ce_state;
1681        ar_pci = ath10k_pci_priv(ar);
1682        ce_pipe = pci_pipe->ce_hdl;
1683        ce_ring = ce_pipe->src_ring;
1684
1685        if (!ce_ring)
1686                return;
1687
1688        if (!pci_pipe->buf_sz)
1689                return;
1690
1691        for (i = 0; i < ce_ring->nentries; i++) {
1692                skb = ce_ring->per_transfer_context[i];
1693                if (!skb)
1694                        continue;
1695
1696                ce_ring->per_transfer_context[i] = NULL;
1697
1698                ath10k_htc_tx_completion_handler(ar, skb);
1699        }
1700}
1701
1702/*
1703 * Cleanup residual buffers for device shutdown:
1704 *    buffers that were enqueued for receive
1705 *    buffers that were to be sent
1706 * Note: Buffers that had completed but which were
1707 * not yet processed are on a completion queue. They
1708 * are handled when the completion thread shuts down.
1709 */
1710static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1711{
1712        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1713        int pipe_num;
1714
1715        for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1716                struct ath10k_pci_pipe *pipe_info;
1717
1718                pipe_info = &ar_pci->pipe_info[pipe_num];
1719                ath10k_pci_rx_pipe_cleanup(pipe_info);
1720                ath10k_pci_tx_pipe_cleanup(pipe_info);
1721        }
1722}
1723
1724void ath10k_pci_ce_deinit(struct ath10k *ar)
1725{
1726        int i;
1727
1728        for (i = 0; i < CE_COUNT; i++)
1729                ath10k_ce_deinit_pipe(ar, i);
1730}
1731
1732void ath10k_pci_flush(struct ath10k *ar)
1733{
1734        ath10k_pci_kill_tasklet(ar);
1735        ath10k_pci_buffer_cleanup(ar);
1736}
1737
1738static void ath10k_pci_hif_stop(struct ath10k *ar)
1739{
1740        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1741        unsigned long flags;
1742
1743        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1744
1745        /* Most likely the device has HTT Rx ring configured. The only way to
1746         * prevent the device from accessing (and possible corrupting) host
1747         * memory is to reset the chip now.
1748         *
1749         * There's also no known way of masking MSI interrupts on the device.
1750         * For ranged MSI the CE-related interrupts can be masked. However
1751         * regardless how many MSI interrupts are assigned the first one
1752         * is always used for firmware indications (crashes) and cannot be
1753         * masked. To prevent the device from asserting the interrupt reset it
1754         * before proceeding with cleanup.
1755         */
1756        ath10k_pci_safe_chip_reset(ar);
1757
1758        ath10k_pci_irq_disable(ar);
1759        ath10k_pci_irq_sync(ar);
1760        ath10k_pci_flush(ar);
1761
1762        spin_lock_irqsave(&ar_pci->ps_lock, flags);
1763        WARN_ON(ar_pci->ps_wake_refcount > 0);
1764        spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
1765}
1766
1767int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1768                                    void *req, u32 req_len,
1769                                    void *resp, u32 *resp_len)
1770{
1771        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1772        struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1773        struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1774        struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1775        struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1776        dma_addr_t req_paddr = 0;
1777        dma_addr_t resp_paddr = 0;
1778        struct bmi_xfer xfer = {};
1779        void *treq, *tresp = NULL;
1780        int ret = 0;
1781
1782        might_sleep();
1783
1784        if (resp && !resp_len)
1785                return -EINVAL;
1786
1787        if (resp && resp_len && *resp_len == 0)
1788                return -EINVAL;
1789
1790        treq = kmemdup(req, req_len, GFP_KERNEL);
1791        if (!treq)
1792                return -ENOMEM;
1793
1794        req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1795        ret = dma_mapping_error(ar->dev, req_paddr);
1796        if (ret) {
1797                ret = -EIO;
1798                goto err_dma;
1799        }
1800
1801        if (resp && resp_len) {
1802                tresp = kzalloc(*resp_len, GFP_KERNEL);
1803                if (!tresp) {
1804                        ret = -ENOMEM;
1805                        goto err_req;
1806                }
1807
1808                resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1809                                            DMA_FROM_DEVICE);
1810                ret = dma_mapping_error(ar->dev, resp_paddr);
1811                if (ret) {
1812                        ret = -EIO;
1813                        goto err_req;
1814                }
1815
1816                xfer.wait_for_resp = true;
1817                xfer.resp_len = 0;
1818
1819                ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1820        }
1821
1822        ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1823        if (ret)
1824                goto err_resp;
1825
1826        ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1827        if (ret) {
1828                u32 unused_buffer;
1829                unsigned int unused_nbytes;
1830                unsigned int unused_id;
1831
1832                ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1833                                           &unused_nbytes, &unused_id);
1834        } else {
1835                /* non-zero means we did not time out */
1836                ret = 0;
1837        }
1838
1839err_resp:
1840        if (resp) {
1841                u32 unused_buffer;
1842
1843                ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1844                dma_unmap_single(ar->dev, resp_paddr,
1845                                 *resp_len, DMA_FROM_DEVICE);
1846        }
1847err_req:
1848        dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1849
1850        if (ret == 0 && resp_len) {
1851                *resp_len = min(*resp_len, xfer.resp_len);
1852                memcpy(resp, tresp, xfer.resp_len);
1853        }
1854err_dma:
1855        kfree(treq);
1856        kfree(tresp);
1857
1858        return ret;
1859}
1860
1861static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1862{
1863        struct bmi_xfer *xfer;
1864
1865        if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
1866                return;
1867
1868        xfer->tx_done = true;
1869}
1870
1871static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1872{
1873        struct ath10k *ar = ce_state->ar;
1874        struct bmi_xfer *xfer;
1875        unsigned int nbytes;
1876
1877        if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
1878                                          &nbytes))
1879                return;
1880
1881        if (WARN_ON_ONCE(!xfer))
1882                return;
1883
1884        if (!xfer->wait_for_resp) {
1885                ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1886                return;
1887        }
1888
1889        xfer->resp_len = nbytes;
1890        xfer->rx_done = true;
1891}
1892
1893static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1894                               struct ath10k_ce_pipe *rx_pipe,
1895                               struct bmi_xfer *xfer)
1896{
1897        unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1898
1899        while (time_before_eq(jiffies, timeout)) {
1900                ath10k_pci_bmi_send_done(tx_pipe);
1901                ath10k_pci_bmi_recv_data(rx_pipe);
1902
1903                if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1904                        return 0;
1905
1906                schedule();
1907        }
1908
1909        return -ETIMEDOUT;
1910}
1911
1912/*
1913 * Send an interrupt to the device to wake up the Target CPU
1914 * so it has an opportunity to notice any changed state.
1915 */
1916static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1917{
1918        u32 addr, val;
1919
1920        addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1921        val = ath10k_pci_read32(ar, addr);
1922        val |= CORE_CTRL_CPU_INTR_MASK;
1923        ath10k_pci_write32(ar, addr, val);
1924
1925        return 0;
1926}
1927
1928static int ath10k_pci_get_num_banks(struct ath10k *ar)
1929{
1930        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1931
1932        switch (ar_pci->pdev->device) {
1933        case QCA988X_2_0_DEVICE_ID:
1934        case QCA99X0_2_0_DEVICE_ID:
1935                return 1;
1936        case QCA6164_2_1_DEVICE_ID:
1937        case QCA6174_2_1_DEVICE_ID:
1938                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
1939                case QCA6174_HW_1_0_CHIP_ID_REV:
1940                case QCA6174_HW_1_1_CHIP_ID_REV:
1941                case QCA6174_HW_2_1_CHIP_ID_REV:
1942                case QCA6174_HW_2_2_CHIP_ID_REV:
1943                        return 3;
1944                case QCA6174_HW_1_3_CHIP_ID_REV:
1945                        return 2;
1946                case QCA6174_HW_3_0_CHIP_ID_REV:
1947                case QCA6174_HW_3_1_CHIP_ID_REV:
1948                case QCA6174_HW_3_2_CHIP_ID_REV:
1949                        return 9;
1950                }
1951                break;
1952        case QCA9377_1_0_DEVICE_ID:
1953                return 2;
1954        }
1955
1956        ath10k_warn(ar, "unknown number of banks, assuming 1\n");
1957        return 1;
1958}
1959
1960static int ath10k_bus_get_num_banks(struct ath10k *ar)
1961{
1962        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1963
1964        return ar_pci->bus_ops->get_num_banks(ar);
1965}
1966
1967int ath10k_pci_init_config(struct ath10k *ar)
1968{
1969        u32 interconnect_targ_addr;
1970        u32 pcie_state_targ_addr = 0;
1971        u32 pipe_cfg_targ_addr = 0;
1972        u32 svc_to_pipe_map = 0;
1973        u32 pcie_config_flags = 0;
1974        u32 ealloc_value;
1975        u32 ealloc_targ_addr;
1976        u32 flag2_value;
1977        u32 flag2_targ_addr;
1978        int ret = 0;
1979
1980        /* Download to Target the CE Config and the service-to-CE map */
1981        interconnect_targ_addr =
1982                host_interest_item_address(HI_ITEM(hi_interconnect_state));
1983
1984        /* Supply Target-side CE configuration */
1985        ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1986                                     &pcie_state_targ_addr);
1987        if (ret != 0) {
1988                ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1989                return ret;
1990        }
1991
1992        if (pcie_state_targ_addr == 0) {
1993                ret = -EIO;
1994                ath10k_err(ar, "Invalid pcie state addr\n");
1995                return ret;
1996        }
1997
1998        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1999                                          offsetof(struct pcie_state,
2000                                                   pipe_cfg_addr)),
2001                                     &pipe_cfg_targ_addr);
2002        if (ret != 0) {
2003                ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
2004                return ret;
2005        }
2006
2007        if (pipe_cfg_targ_addr == 0) {
2008                ret = -EIO;
2009                ath10k_err(ar, "Invalid pipe cfg addr\n");
2010                return ret;
2011        }
2012
2013        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
2014                                        target_ce_config_wlan,
2015                                        sizeof(struct ce_pipe_config) *
2016                                        NUM_TARGET_CE_CONFIG_WLAN);
2017
2018        if (ret != 0) {
2019                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
2020                return ret;
2021        }
2022
2023        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2024                                          offsetof(struct pcie_state,
2025                                                   svc_to_pipe_map)),
2026                                     &svc_to_pipe_map);
2027        if (ret != 0) {
2028                ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
2029                return ret;
2030        }
2031
2032        if (svc_to_pipe_map == 0) {
2033                ret = -EIO;
2034                ath10k_err(ar, "Invalid svc_to_pipe map\n");
2035                return ret;
2036        }
2037
2038        ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
2039                                        target_service_to_ce_map_wlan,
2040                                        sizeof(target_service_to_ce_map_wlan));
2041        if (ret != 0) {
2042                ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
2043                return ret;
2044        }
2045
2046        ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
2047                                          offsetof(struct pcie_state,
2048                                                   config_flags)),
2049                                     &pcie_config_flags);
2050        if (ret != 0) {
2051                ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
2052                return ret;
2053        }
2054
2055        pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
2056
2057        ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
2058                                           offsetof(struct pcie_state,
2059                                                    config_flags)),
2060                                      pcie_config_flags);
2061        if (ret != 0) {
2062                ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
2063                return ret;
2064        }
2065
2066        /* configure early allocation */
2067        ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
2068
2069        ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
2070        if (ret != 0) {
2071                ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
2072                return ret;
2073        }
2074
2075        /* first bank is switched to IRAM */
2076        ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
2077                         HI_EARLY_ALLOC_MAGIC_MASK);
2078        ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
2079                          HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
2080                         HI_EARLY_ALLOC_IRAM_BANKS_MASK);
2081
2082        ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
2083        if (ret != 0) {
2084                ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
2085                return ret;
2086        }
2087
2088        /* Tell Target to proceed with initialization */
2089        flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
2090
2091        ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
2092        if (ret != 0) {
2093                ath10k_err(ar, "Failed to get option val: %d\n", ret);
2094                return ret;
2095        }
2096
2097        flag2_value |= HI_OPTION_EARLY_CFG_DONE;
2098
2099        ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
2100        if (ret != 0) {
2101                ath10k_err(ar, "Failed to set option val: %d\n", ret);
2102                return ret;
2103        }
2104
2105        return 0;
2106}
2107
2108static void ath10k_pci_override_ce_config(struct ath10k *ar)
2109{
2110        struct ce_attr *attr;
2111        struct ce_pipe_config *config;
2112
2113        /* For QCA6174 we're overriding the Copy Engine 5 configuration,
2114         * since it is currently used for other feature.
2115         */
2116
2117        /* Override Host's Copy Engine 5 configuration */
2118        attr = &host_ce_config_wlan[5];
2119        attr->src_sz_max = 0;
2120        attr->dest_nentries = 0;
2121
2122        /* Override Target firmware's Copy Engine configuration */
2123        config = &target_ce_config_wlan[5];
2124        config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
2125        config->nbytes_max = __cpu_to_le32(2048);
2126
2127        /* Map from service/endpoint to Copy Engine */
2128        target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
2129}
2130
2131int ath10k_pci_alloc_pipes(struct ath10k *ar)
2132{
2133        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2134        struct ath10k_pci_pipe *pipe;
2135        int i, ret;
2136
2137        for (i = 0; i < CE_COUNT; i++) {
2138                pipe = &ar_pci->pipe_info[i];
2139                pipe->ce_hdl = &ar_pci->ce_states[i];
2140                pipe->pipe_num = i;
2141                pipe->hif_ce_state = ar;
2142
2143                ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
2144                if (ret) {
2145                        ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
2146                                   i, ret);
2147                        return ret;
2148                }
2149
2150                /* Last CE is Diagnostic Window */
2151                if (i == CE_DIAG_PIPE) {
2152                        ar_pci->ce_diag = pipe->ce_hdl;
2153                        continue;
2154                }
2155
2156                pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
2157        }
2158
2159        return 0;
2160}
2161
2162void ath10k_pci_free_pipes(struct ath10k *ar)
2163{
2164        int i;
2165
2166        for (i = 0; i < CE_COUNT; i++)
2167                ath10k_ce_free_pipe(ar, i);
2168}
2169
2170int ath10k_pci_init_pipes(struct ath10k *ar)
2171{
2172        int i, ret;
2173
2174        for (i = 0; i < CE_COUNT; i++) {
2175                ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
2176                if (ret) {
2177                        ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
2178                                   i, ret);
2179                        return ret;
2180                }
2181        }
2182
2183        return 0;
2184}
2185
2186static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
2187{
2188        return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
2189               FW_IND_EVENT_PENDING;
2190}
2191
2192static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
2193{
2194        u32 val;
2195
2196        val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2197        val &= ~FW_IND_EVENT_PENDING;
2198        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
2199}
2200
2201/* this function effectively clears target memory controller assert line */
2202static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
2203{
2204        u32 val;
2205
2206        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2207        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2208                               val | SOC_RESET_CONTROL_SI0_RST_MASK);
2209        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2210
2211        msleep(10);
2212
2213        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2214        ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
2215                               val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
2216        val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
2217
2218        msleep(10);
2219}
2220
2221static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
2222{
2223        u32 val;
2224
2225        ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
2226
2227        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2228                                SOC_RESET_CONTROL_ADDRESS);
2229        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2230                           val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
2231}
2232
2233static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
2234{
2235        u32 val;
2236
2237        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2238                                SOC_RESET_CONTROL_ADDRESS);
2239
2240        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2241                           val | SOC_RESET_CONTROL_CE_RST_MASK);
2242        msleep(10);
2243        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
2244                           val & ~SOC_RESET_CONTROL_CE_RST_MASK);
2245}
2246
2247static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
2248{
2249        u32 val;
2250
2251        val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
2252                                SOC_LF_TIMER_CONTROL0_ADDRESS);
2253        ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
2254                           SOC_LF_TIMER_CONTROL0_ADDRESS,
2255                           val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
2256}
2257
2258static int ath10k_pci_warm_reset(struct ath10k *ar)
2259{
2260        int ret;
2261
2262        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
2263
2264        spin_lock_bh(&ar->data_lock);
2265        ar->stats.fw_warm_reset_counter++;
2266        spin_unlock_bh(&ar->data_lock);
2267
2268        ath10k_pci_irq_disable(ar);
2269
2270        /* Make sure the target CPU is not doing anything dangerous, e.g. if it
2271         * were to access copy engine while host performs copy engine reset
2272         * then it is possible for the device to confuse pci-e controller to
2273         * the point of bringing host system to a complete stop (i.e. hang).
2274         */
2275        ath10k_pci_warm_reset_si0(ar);
2276        ath10k_pci_warm_reset_cpu(ar);
2277        ath10k_pci_init_pipes(ar);
2278        ath10k_pci_wait_for_target_init(ar);
2279
2280        ath10k_pci_warm_reset_clear_lf(ar);
2281        ath10k_pci_warm_reset_ce(ar);
2282        ath10k_pci_warm_reset_cpu(ar);
2283        ath10k_pci_init_pipes(ar);
2284
2285        ret = ath10k_pci_wait_for_target_init(ar);
2286        if (ret) {
2287                ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
2288                return ret;
2289        }
2290
2291        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
2292
2293        return 0;
2294}
2295
2296static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
2297{
2298        if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
2299                return ath10k_pci_warm_reset(ar);
2300        } else if (QCA_REV_99X0(ar)) {
2301                ath10k_pci_irq_disable(ar);
2302                return ath10k_pci_qca99x0_chip_reset(ar);
2303        } else {
2304                return -ENOTSUPP;
2305        }
2306}
2307
2308static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
2309{
2310        int i, ret;
2311        u32 val;
2312
2313        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
2314
2315        /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
2316         * It is thus preferred to use warm reset which is safer but may not be
2317         * able to recover the device from all possible fail scenarios.
2318         *
2319         * Warm reset doesn't always work on first try so attempt it a few
2320         * times before giving up.
2321         */
2322        for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
2323                ret = ath10k_pci_warm_reset(ar);
2324                if (ret) {
2325                        ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
2326                                    i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
2327                                    ret);
2328                        continue;
2329                }
2330
2331                /* FIXME: Sometimes copy engine doesn't recover after warm
2332                 * reset. In most cases this needs cold reset. In some of these
2333                 * cases the device is in such a state that a cold reset may
2334                 * lock up the host.
2335                 *
2336                 * Reading any host interest register via copy engine is
2337                 * sufficient to verify if device is capable of booting
2338                 * firmware blob.
2339                 */
2340                ret = ath10k_pci_init_pipes(ar);
2341                if (ret) {
2342                        ath10k_warn(ar, "failed to init copy engine: %d\n",
2343                                    ret);
2344                        continue;
2345                }
2346
2347                ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
2348                                             &val);
2349                if (ret) {
2350                        ath10k_warn(ar, "failed to poke copy engine: %d\n",
2351                                    ret);
2352                        continue;
2353                }
2354
2355                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
2356                return 0;
2357        }
2358
2359        if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
2360                ath10k_warn(ar, "refusing cold reset as requested\n");
2361                return -EPERM;
2362        }
2363
2364        ret = ath10k_pci_cold_reset(ar);
2365        if (ret) {
2366                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2367                return ret;
2368        }
2369
2370        ret = ath10k_pci_wait_for_target_init(ar);
2371        if (ret) {
2372                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2373                            ret);
2374                return ret;
2375        }
2376
2377        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
2378
2379        return 0;
2380}
2381
2382static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
2383{
2384        int ret;
2385
2386        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
2387
2388        /* FIXME: QCA6174 requires cold + warm reset to work. */
2389
2390        ret = ath10k_pci_cold_reset(ar);
2391        if (ret) {
2392                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2393                return ret;
2394        }
2395
2396        ret = ath10k_pci_wait_for_target_init(ar);
2397        if (ret) {
2398                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2399                            ret);
2400                return ret;
2401        }
2402
2403        ret = ath10k_pci_warm_reset(ar);
2404        if (ret) {
2405                ath10k_warn(ar, "failed to warm reset: %d\n", ret);
2406                return ret;
2407        }
2408
2409        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
2410
2411        return 0;
2412}
2413
2414static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
2415{
2416        int ret;
2417
2418        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
2419
2420        ret = ath10k_pci_cold_reset(ar);
2421        if (ret) {
2422                ath10k_warn(ar, "failed to cold reset: %d\n", ret);
2423                return ret;
2424        }
2425
2426        ret = ath10k_pci_wait_for_target_init(ar);
2427        if (ret) {
2428                ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
2429                            ret);
2430                return ret;
2431        }
2432
2433        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
2434
2435        return 0;
2436}
2437
2438static int ath10k_pci_chip_reset(struct ath10k *ar)
2439{
2440        if (QCA_REV_988X(ar))
2441                return ath10k_pci_qca988x_chip_reset(ar);
2442        else if (QCA_REV_6174(ar))
2443                return ath10k_pci_qca6174_chip_reset(ar);
2444        else if (QCA_REV_9377(ar))
2445                return ath10k_pci_qca6174_chip_reset(ar);
2446        else if (QCA_REV_99X0(ar))
2447                return ath10k_pci_qca99x0_chip_reset(ar);
2448        else
2449                return -ENOTSUPP;
2450}
2451
2452static int ath10k_pci_hif_power_up(struct ath10k *ar)
2453{
2454        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2455        int ret;
2456
2457        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
2458
2459        pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2460                                  &ar_pci->link_ctl);
2461        pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
2462                                   ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
2463
2464        /*
2465         * Bring the target up cleanly.
2466         *
2467         * The target may be in an undefined state with an AUX-powered Target
2468         * and a Host in WoW mode. If the Host crashes, loses power, or is
2469         * restarted (without unloading the driver) then the Target is left
2470         * (aux) powered and running. On a subsequent driver load, the Target
2471         * is in an unexpected state. We try to catch that here in order to
2472         * reset the Target and retry the probe.
2473         */
2474        ret = ath10k_pci_chip_reset(ar);
2475        if (ret) {
2476                if (ath10k_pci_has_fw_crashed(ar)) {
2477                        ath10k_warn(ar, "firmware crashed during chip reset\n");
2478                        ath10k_pci_fw_crashed_clear(ar);
2479                        ath10k_pci_fw_crashed_dump(ar);
2480                }
2481
2482                ath10k_err(ar, "failed to reset chip: %d\n", ret);
2483                goto err_sleep;
2484        }
2485
2486        ret = ath10k_pci_init_pipes(ar);
2487        if (ret) {
2488                ath10k_err(ar, "failed to initialize CE: %d\n", ret);
2489                goto err_sleep;
2490        }
2491
2492        ret = ath10k_pci_init_config(ar);
2493        if (ret) {
2494                ath10k_err(ar, "failed to setup init config: %d\n", ret);
2495                goto err_ce;
2496        }
2497
2498        ret = ath10k_pci_wake_target_cpu(ar);
2499        if (ret) {
2500                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
2501                goto err_ce;
2502        }
2503
2504        return 0;
2505
2506err_ce:
2507        ath10k_pci_ce_deinit(ar);
2508
2509err_sleep:
2510        return ret;
2511}
2512
2513void ath10k_pci_hif_power_down(struct ath10k *ar)
2514{
2515        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
2516
2517        /* Currently hif_power_up performs effectively a reset and hif_stop
2518         * resets the chip as well so there's no point in resetting here.
2519         */
2520}
2521
2522#ifdef CONFIG_PM
2523
2524static int ath10k_pci_hif_suspend(struct ath10k *ar)
2525{
2526        /* The grace timer can still be counting down and ar->ps_awake be true.
2527         * It is known that the device may be asleep after resuming regardless
2528         * of the SoC powersave state before suspending. Hence make sure the
2529         * device is asleep before proceeding.
2530         */
2531        ath10k_pci_sleep_sync(ar);
2532
2533        return 0;
2534}
2535
2536static int ath10k_pci_hif_resume(struct ath10k *ar)
2537{
2538        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2539        struct pci_dev *pdev = ar_pci->pdev;
2540        u32 val;
2541        int ret = 0;
2542
2543        ret = ath10k_pci_force_wake(ar);
2544        if (ret) {
2545                ath10k_err(ar, "failed to wake up target: %d\n", ret);
2546                return ret;
2547        }
2548
2549        /* Suspend/Resume resets the PCI configuration space, so we have to
2550         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
2551         * from interfering with C3 CPU state. pci_restore_state won't help
2552         * here since it only restores the first 64 bytes pci config header.
2553         */
2554        pci_read_config_dword(pdev, 0x40, &val);
2555        if ((val & 0x0000ff00) != 0)
2556                pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2557
2558        return ret;
2559}
2560#endif
2561
2562static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2563        .tx_sg                  = ath10k_pci_hif_tx_sg,
2564        .diag_read              = ath10k_pci_hif_diag_read,
2565        .diag_write             = ath10k_pci_diag_write_mem,
2566        .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2567        .start                  = ath10k_pci_hif_start,
2568        .stop                   = ath10k_pci_hif_stop,
2569        .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2570        .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2571        .send_complete_check    = ath10k_pci_hif_send_complete_check,
2572        .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2573        .power_up               = ath10k_pci_hif_power_up,
2574        .power_down             = ath10k_pci_hif_power_down,
2575        .read32                 = ath10k_pci_read32,
2576        .write32                = ath10k_pci_write32,
2577#ifdef CONFIG_PM
2578        .suspend                = ath10k_pci_hif_suspend,
2579        .resume                 = ath10k_pci_hif_resume,
2580#endif
2581};
2582
2583/*
2584 * Top-level interrupt handler for all PCI interrupts from a Target.
2585 * When a block of MSI interrupts is allocated, this top-level handler
2586 * is not used; instead, we directly call the correct sub-handler.
2587 */
2588static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2589{
2590        struct ath10k *ar = arg;
2591        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2592        int ret;
2593
2594        ret = ath10k_pci_force_wake(ar);
2595        if (ret) {
2596                ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
2597                return IRQ_NONE;
2598        }
2599
2600        if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
2601                if (!ath10k_pci_irq_pending(ar))
2602                        return IRQ_NONE;
2603
2604                ath10k_pci_disable_and_clear_legacy_irq(ar);
2605        }
2606
2607        tasklet_schedule(&ar_pci->intr_tq);
2608
2609        return IRQ_HANDLED;
2610}
2611
2612static void ath10k_pci_tasklet(unsigned long data)
2613{
2614        struct ath10k *ar = (struct ath10k *)data;
2615        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2616
2617        if (ath10k_pci_has_fw_crashed(ar)) {
2618                ath10k_pci_irq_disable(ar);
2619                ath10k_pci_fw_crashed_clear(ar);
2620                ath10k_pci_fw_crashed_dump(ar);
2621                return;
2622        }
2623
2624        ath10k_ce_per_engine_service_any(ar);
2625
2626        /* Re-enable legacy irq that was disabled in the irq handler */
2627        if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
2628                ath10k_pci_enable_legacy_irq(ar);
2629}
2630
2631static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2632{
2633        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2634        int ret;
2635
2636        ret = request_irq(ar_pci->pdev->irq,
2637                          ath10k_pci_interrupt_handler,
2638                          IRQF_SHARED, "ath10k_pci", ar);
2639        if (ret) {
2640                ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2641                            ar_pci->pdev->irq, ret);
2642                return ret;
2643        }
2644
2645        return 0;
2646}
2647
2648static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2649{
2650        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2651        int ret;
2652
2653        ret = request_irq(ar_pci->pdev->irq,
2654                          ath10k_pci_interrupt_handler,
2655                          IRQF_SHARED, "ath10k_pci", ar);
2656        if (ret) {
2657                ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2658                            ar_pci->pdev->irq, ret);
2659                return ret;
2660        }
2661
2662        return 0;
2663}
2664
2665static int ath10k_pci_request_irq(struct ath10k *ar)
2666{
2667        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2668
2669        switch (ar_pci->oper_irq_mode) {
2670        case ATH10K_PCI_IRQ_LEGACY:
2671                return ath10k_pci_request_irq_legacy(ar);
2672        case ATH10K_PCI_IRQ_MSI:
2673                return ath10k_pci_request_irq_msi(ar);
2674        default:
2675                return -EINVAL;
2676        }
2677}
2678
2679static void ath10k_pci_free_irq(struct ath10k *ar)
2680{
2681        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2682
2683        free_irq(ar_pci->pdev->irq, ar);
2684}
2685
2686void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2687{
2688        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2689
2690        tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2691}
2692
2693static int ath10k_pci_init_irq(struct ath10k *ar)
2694{
2695        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2696        int ret;
2697
2698        ath10k_pci_init_irq_tasklets(ar);
2699
2700        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2701                ath10k_info(ar, "limiting irq mode to: %d\n",
2702                            ath10k_pci_irq_mode);
2703
2704        /* Try MSI */
2705        if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2706                ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
2707                ret = pci_enable_msi(ar_pci->pdev);
2708                if (ret == 0)
2709                        return 0;
2710
2711                /* fall-through */
2712        }
2713
2714        /* Try legacy irq
2715         *
2716         * A potential race occurs here: The CORE_BASE write
2717         * depends on target correctly decoding AXI address but
2718         * host won't know when target writes BAR to CORE_CTRL.
2719         * This write might get lost if target has NOT written BAR.
2720         * For now, fix the race by repeating the write in below
2721         * synchronization checking. */
2722        ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
2723
2724        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2725                           PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2726
2727        return 0;
2728}
2729
2730static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2731{
2732        ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2733                           0);
2734}
2735
2736static int ath10k_pci_deinit_irq(struct ath10k *ar)
2737{
2738        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2739
2740        switch (ar_pci->oper_irq_mode) {
2741        case ATH10K_PCI_IRQ_LEGACY:
2742                ath10k_pci_deinit_irq_legacy(ar);
2743                break;
2744        default:
2745                pci_disable_msi(ar_pci->pdev);
2746                break;
2747        }
2748
2749        return 0;
2750}
2751
2752int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2753{
2754        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2755        unsigned long timeout;
2756        u32 val;
2757
2758        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2759
2760        timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2761
2762        do {
2763                val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2764
2765                ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2766                           val);
2767
2768                /* target should never return this */
2769                if (val == 0xffffffff)
2770                        continue;
2771
2772                /* the device has crashed so don't bother trying anymore */
2773                if (val & FW_IND_EVENT_PENDING)
2774                        break;
2775
2776                if (val & FW_IND_INITIALIZED)
2777                        break;
2778
2779                if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
2780                        /* Fix potential race by repeating CORE_BASE writes */
2781                        ath10k_pci_enable_legacy_irq(ar);
2782
2783                mdelay(10);
2784        } while (time_before(jiffies, timeout));
2785
2786        ath10k_pci_disable_and_clear_legacy_irq(ar);
2787        ath10k_pci_irq_msi_fw_mask(ar);
2788
2789        if (val == 0xffffffff) {
2790                ath10k_err(ar, "failed to read device register, device is gone\n");
2791                return -EIO;
2792        }
2793
2794        if (val & FW_IND_EVENT_PENDING) {
2795                ath10k_warn(ar, "device has crashed during init\n");
2796                return -ECOMM;
2797        }
2798
2799        if (!(val & FW_IND_INITIALIZED)) {
2800                ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2801                           val);
2802                return -ETIMEDOUT;
2803        }
2804
2805        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2806        return 0;
2807}
2808
2809static int ath10k_pci_cold_reset(struct ath10k *ar)
2810{
2811        u32 val;
2812
2813        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2814
2815        spin_lock_bh(&ar->data_lock);
2816
2817        ar->stats.fw_cold_reset_counter++;
2818
2819        spin_unlock_bh(&ar->data_lock);
2820
2821        /* Put Target, including PCIe, into RESET. */
2822        val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2823        val |= 1;
2824        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2825
2826        /* After writing into SOC_GLOBAL_RESET to put device into
2827         * reset and pulling out of reset pcie may not be stable
2828         * for any immediate pcie register access and cause bus error,
2829         * add delay before any pcie access request to fix this issue.
2830         */
2831        msleep(20);
2832
2833        /* Pull Target, including PCIe, out of RESET. */
2834        val &= ~1;
2835        ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2836
2837        msleep(20);
2838
2839        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2840
2841        return 0;
2842}
2843
2844static int ath10k_pci_claim(struct ath10k *ar)
2845{
2846        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2847        struct pci_dev *pdev = ar_pci->pdev;
2848        int ret;
2849
2850        pci_set_drvdata(pdev, ar);
2851
2852        ret = pci_enable_device(pdev);
2853        if (ret) {
2854                ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2855                return ret;
2856        }
2857
2858        ret = pci_request_region(pdev, BAR_NUM, "ath");
2859        if (ret) {
2860                ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2861                           ret);
2862                goto err_device;
2863        }
2864
2865        /* Target expects 32 bit DMA. Enforce it. */
2866        ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2867        if (ret) {
2868                ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2869                goto err_region;
2870        }
2871
2872        ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2873        if (ret) {
2874                ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2875                           ret);
2876                goto err_region;
2877        }
2878
2879        pci_set_master(pdev);
2880
2881        /* Arrange for access to Target SoC registers. */
2882        ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
2883        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2884        if (!ar_pci->mem) {
2885                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2886                ret = -EIO;
2887                goto err_master;
2888        }
2889
2890        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2891        return 0;
2892
2893err_master:
2894        pci_clear_master(pdev);
2895
2896err_region:
2897        pci_release_region(pdev, BAR_NUM);
2898
2899err_device:
2900        pci_disable_device(pdev);
2901
2902        return ret;
2903}
2904
2905static void ath10k_pci_release(struct ath10k *ar)
2906{
2907        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2908        struct pci_dev *pdev = ar_pci->pdev;
2909
2910        pci_iounmap(pdev, ar_pci->mem);
2911        pci_release_region(pdev, BAR_NUM);
2912        pci_clear_master(pdev);
2913        pci_disable_device(pdev);
2914}
2915
2916static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
2917{
2918        const struct ath10k_pci_supp_chip *supp_chip;
2919        int i;
2920        u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
2921
2922        for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
2923                supp_chip = &ath10k_pci_supp_chips[i];
2924
2925                if (supp_chip->dev_id == dev_id &&
2926                    supp_chip->rev_id == rev_id)
2927                        return true;
2928        }
2929
2930        return false;
2931}
2932
2933int ath10k_pci_setup_resource(struct ath10k *ar)
2934{
2935        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2936        int ret;
2937
2938        spin_lock_init(&ar_pci->ce_lock);
2939        spin_lock_init(&ar_pci->ps_lock);
2940
2941        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2942                    (unsigned long)ar);
2943
2944        if (QCA_REV_6174(ar))
2945                ath10k_pci_override_ce_config(ar);
2946
2947        ret = ath10k_pci_alloc_pipes(ar);
2948        if (ret) {
2949                ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2950                           ret);
2951                return ret;
2952        }
2953
2954        return 0;
2955}
2956
2957void ath10k_pci_release_resource(struct ath10k *ar)
2958{
2959        ath10k_pci_kill_tasklet(ar);
2960        ath10k_pci_ce_deinit(ar);
2961        ath10k_pci_free_pipes(ar);
2962}
2963
2964static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
2965        .read32         = ath10k_bus_pci_read32,
2966        .write32        = ath10k_bus_pci_write32,
2967        .get_num_banks  = ath10k_pci_get_num_banks,
2968};
2969
2970static int ath10k_pci_probe(struct pci_dev *pdev,
2971                            const struct pci_device_id *pci_dev)
2972{
2973        int ret = 0;
2974        struct ath10k *ar;
2975        struct ath10k_pci *ar_pci;
2976        enum ath10k_hw_rev hw_rev;
2977        u32 chip_id;
2978        bool pci_ps;
2979
2980        switch (pci_dev->device) {
2981        case QCA988X_2_0_DEVICE_ID:
2982                hw_rev = ATH10K_HW_QCA988X;
2983                pci_ps = false;
2984                break;
2985        case QCA6164_2_1_DEVICE_ID:
2986        case QCA6174_2_1_DEVICE_ID:
2987                hw_rev = ATH10K_HW_QCA6174;
2988                pci_ps = true;
2989                break;
2990        case QCA99X0_2_0_DEVICE_ID:
2991                hw_rev = ATH10K_HW_QCA99X0;
2992                pci_ps = false;
2993                break;
2994        case QCA9377_1_0_DEVICE_ID:
2995                hw_rev = ATH10K_HW_QCA9377;
2996                pci_ps = true;
2997                break;
2998        default:
2999                WARN_ON(1);
3000                return -ENOTSUPP;
3001        }
3002
3003        ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
3004                                hw_rev, &ath10k_pci_hif_ops);
3005        if (!ar) {
3006                dev_err(&pdev->dev, "failed to allocate core\n");
3007                return -ENOMEM;
3008        }
3009
3010        ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
3011                   pdev->vendor, pdev->device,
3012                   pdev->subsystem_vendor, pdev->subsystem_device);
3013
3014        ar_pci = ath10k_pci_priv(ar);
3015        ar_pci->pdev = pdev;
3016        ar_pci->dev = &pdev->dev;
3017        ar_pci->ar = ar;
3018        ar->dev_id = pci_dev->device;
3019        ar_pci->pci_ps = pci_ps;
3020        ar_pci->bus_ops = &ath10k_pci_bus_ops;
3021
3022        ar->id.vendor = pdev->vendor;
3023        ar->id.device = pdev->device;
3024        ar->id.subsystem_vendor = pdev->subsystem_vendor;
3025        ar->id.subsystem_device = pdev->subsystem_device;
3026
3027        setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer,
3028                    (unsigned long)ar);
3029
3030        ret = ath10k_pci_setup_resource(ar);
3031        if (ret) {
3032                ath10k_err(ar, "failed to setup resource: %d\n", ret);
3033                goto err_core_destroy;
3034        }
3035
3036        ret = ath10k_pci_claim(ar);
3037        if (ret) {
3038                ath10k_err(ar, "failed to claim device: %d\n", ret);
3039                goto err_free_pipes;
3040        }
3041
3042        ret = ath10k_pci_force_wake(ar);
3043        if (ret) {
3044                ath10k_warn(ar, "failed to wake up device : %d\n", ret);
3045                goto err_sleep;
3046        }
3047
3048        ath10k_pci_ce_deinit(ar);
3049        ath10k_pci_irq_disable(ar);
3050
3051        ret = ath10k_pci_init_irq(ar);
3052        if (ret) {
3053                ath10k_err(ar, "failed to init irqs: %d\n", ret);
3054                goto err_sleep;
3055        }
3056
3057        ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
3058                    ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
3059                    ath10k_pci_irq_mode, ath10k_pci_reset_mode);
3060
3061        ret = ath10k_pci_request_irq(ar);
3062        if (ret) {
3063                ath10k_warn(ar, "failed to request irqs: %d\n", ret);
3064                goto err_deinit_irq;
3065        }
3066
3067        ret = ath10k_pci_chip_reset(ar);
3068        if (ret) {
3069                ath10k_err(ar, "failed to reset chip: %d\n", ret);
3070                goto err_free_irq;
3071        }
3072
3073        chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
3074        if (chip_id == 0xffffffff) {
3075                ath10k_err(ar, "failed to get chip id\n");
3076                goto err_free_irq;
3077        }
3078
3079        if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
3080                ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
3081                           pdev->device, chip_id);
3082                goto err_free_irq;
3083        }
3084
3085        ret = ath10k_core_register(ar, chip_id);
3086        if (ret) {
3087                ath10k_err(ar, "failed to register driver core: %d\n", ret);
3088                goto err_free_irq;
3089        }
3090
3091        return 0;
3092
3093err_free_irq:
3094        ath10k_pci_free_irq(ar);
3095        ath10k_pci_kill_tasklet(ar);
3096
3097err_deinit_irq:
3098        ath10k_pci_deinit_irq(ar);
3099
3100err_sleep:
3101        ath10k_pci_sleep_sync(ar);
3102        ath10k_pci_release(ar);
3103
3104err_free_pipes:
3105        ath10k_pci_free_pipes(ar);
3106
3107err_core_destroy:
3108        ath10k_core_destroy(ar);
3109
3110        return ret;
3111}
3112
3113static void ath10k_pci_remove(struct pci_dev *pdev)
3114{
3115        struct ath10k *ar = pci_get_drvdata(pdev);
3116        struct ath10k_pci *ar_pci;
3117
3118        ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
3119
3120        if (!ar)
3121                return;
3122
3123        ar_pci = ath10k_pci_priv(ar);
3124
3125        if (!ar_pci)
3126                return;
3127
3128        ath10k_core_unregister(ar);
3129        ath10k_pci_free_irq(ar);
3130        ath10k_pci_deinit_irq(ar);
3131        ath10k_pci_release_resource(ar);
3132        ath10k_pci_sleep_sync(ar);
3133        ath10k_pci_release(ar);
3134        ath10k_core_destroy(ar);
3135}
3136
3137MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3138
3139static struct pci_driver ath10k_pci_driver = {
3140        .name = "ath10k_pci",
3141        .id_table = ath10k_pci_id_table,
3142        .probe = ath10k_pci_probe,
3143        .remove = ath10k_pci_remove,
3144};
3145
3146static int __init ath10k_pci_init(void)
3147{
3148        int ret;
3149
3150        ret = pci_register_driver(&ath10k_pci_driver);
3151        if (ret)
3152                printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
3153                       ret);
3154
3155        ret = ath10k_ahb_init();
3156        if (ret)
3157                printk(KERN_ERR "ahb init failed: %d\n", ret);
3158
3159        return ret;
3160}
3161module_init(ath10k_pci_init);
3162
3163static void __exit ath10k_pci_exit(void)
3164{
3165        pci_unregister_driver(&ath10k_pci_driver);
3166        ath10k_ahb_exit();
3167}
3168
3169module_exit(ath10k_pci_exit);
3170
3171MODULE_AUTHOR("Qualcomm Atheros");
3172MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
3173MODULE_LICENSE("Dual BSD/GPL");
3174
3175/* QCA988x 2.0 firmware files */
3176MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
3177MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
3178MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3179MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3180MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
3181MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3182
3183/* QCA6174 2.1 firmware files */
3184MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
3185MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
3186MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
3187MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3188
3189/* QCA6174 3.1 firmware files */
3190MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
3191MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3192MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
3193MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
3194
3195/* QCA9377 1.0 firmware files */
3196MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
3197MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
3198
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.