linux/drivers/mmc/host/cqhci-core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
   3 */
   4
   5#include <linux/delay.h>
   6#include <linux/highmem.h>
   7#include <linux/io.h>
   8#include <linux/iopoll.h>
   9#include <linux/module.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/slab.h>
  12#include <linux/scatterlist.h>
  13#include <linux/platform_device.h>
  14#include <linux/ktime.h>
  15
  16#include <linux/mmc/mmc.h>
  17#include <linux/mmc/host.h>
  18#include <linux/mmc/card.h>
  19
  20#include "cqhci.h"
  21#include "cqhci-crypto.h"
  22
  23#define DCMD_SLOT 31
  24#define NUM_SLOTS 32
  25
  26struct cqhci_slot {
  27        struct mmc_request *mrq;
  28        unsigned int flags;
  29#define CQHCI_EXTERNAL_TIMEOUT  BIT(0)
  30#define CQHCI_COMPLETED         BIT(1)
  31#define CQHCI_HOST_CRC          BIT(2)
  32#define CQHCI_HOST_TIMEOUT      BIT(3)
  33#define CQHCI_HOST_OTHER        BIT(4)
  34};
  35
  36static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
  37{
  38        return cq_host->desc_base + (tag * cq_host->slot_sz);
  39}
  40
  41static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
  42{
  43        u8 *desc = get_desc(cq_host, tag);
  44
  45        return desc + cq_host->task_desc_len;
  46}
  47
  48static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
  49{
  50        return cq_host->trans_desc_len * cq_host->mmc->max_segs * tag;
  51}
  52
  53static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
  54{
  55        size_t offset = get_trans_desc_offset(cq_host, tag);
  56
  57        return cq_host->trans_desc_dma_base + offset;
  58}
  59
  60static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
  61{
  62        size_t offset = get_trans_desc_offset(cq_host, tag);
  63
  64        return cq_host->trans_desc_base + offset;
  65}
  66
  67static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
  68{
  69        u8 *link_temp;
  70        dma_addr_t trans_temp;
  71
  72        link_temp = get_link_desc(cq_host, tag);
  73        trans_temp = get_trans_desc_dma(cq_host, tag);
  74
  75        memset(link_temp, 0, cq_host->link_desc_len);
  76        if (cq_host->link_desc_len > 8)
  77                *(link_temp + 8) = 0;
  78
  79        if (tag == DCMD_SLOT && (cq_host->mmc->caps2 & MMC_CAP2_CQE_DCMD)) {
  80                *link_temp = CQHCI_VALID(0) | CQHCI_ACT(0) | CQHCI_END(1);
  81                return;
  82        }
  83
  84        *link_temp = CQHCI_VALID(1) | CQHCI_ACT(0x6) | CQHCI_END(0);
  85
  86        if (cq_host->dma64) {
  87                __le64 *data_addr = (__le64 __force *)(link_temp + 4);
  88
  89                data_addr[0] = cpu_to_le64(trans_temp);
  90        } else {
  91                __le32 *data_addr = (__le32 __force *)(link_temp + 4);
  92
  93                data_addr[0] = cpu_to_le32(trans_temp);
  94        }
  95}
  96
  97static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
  98{
  99        cqhci_writel(cq_host, set, CQHCI_ISTE);
 100        cqhci_writel(cq_host, set, CQHCI_ISGE);
 101}
 102
 103#define DRV_NAME "cqhci"
 104
 105#define CQHCI_DUMP(f, x...) \
 106        pr_err("%s: " DRV_NAME ": " f, mmc_hostname(mmc), ## x)
 107
 108static void cqhci_dumpregs(struct cqhci_host *cq_host)
 109{
 110        struct mmc_host *mmc = cq_host->mmc;
 111
 112        CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
 113
 114        CQHCI_DUMP("Caps:      0x%08x | Version:  0x%08x\n",
 115                   cqhci_readl(cq_host, CQHCI_CAP),
 116                   cqhci_readl(cq_host, CQHCI_VER));
 117        CQHCI_DUMP("Config:    0x%08x | Control:  0x%08x\n",
 118                   cqhci_readl(cq_host, CQHCI_CFG),
 119                   cqhci_readl(cq_host, CQHCI_CTL));
 120        CQHCI_DUMP("Int stat:  0x%08x | Int enab: 0x%08x\n",
 121                   cqhci_readl(cq_host, CQHCI_IS),
 122                   cqhci_readl(cq_host, CQHCI_ISTE));
 123        CQHCI_DUMP("Int sig:   0x%08x | Int Coal: 0x%08x\n",
 124                   cqhci_readl(cq_host, CQHCI_ISGE),
 125                   cqhci_readl(cq_host, CQHCI_IC));
 126        CQHCI_DUMP("TDL base:  0x%08x | TDL up32: 0x%08x\n",
 127                   cqhci_readl(cq_host, CQHCI_TDLBA),
 128                   cqhci_readl(cq_host, CQHCI_TDLBAU));
 129        CQHCI_DUMP("Doorbell:  0x%08x | TCN:      0x%08x\n",
 130                   cqhci_readl(cq_host, CQHCI_TDBR),
 131                   cqhci_readl(cq_host, CQHCI_TCN));
 132        CQHCI_DUMP("Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
 133                   cqhci_readl(cq_host, CQHCI_DQS),
 134                   cqhci_readl(cq_host, CQHCI_DPT));
 135        CQHCI_DUMP("Task clr:  0x%08x | SSC1:     0x%08x\n",
 136                   cqhci_readl(cq_host, CQHCI_TCLR),
 137                   cqhci_readl(cq_host, CQHCI_SSC1));
 138        CQHCI_DUMP("SSC2:      0x%08x | DCMD rsp: 0x%08x\n",
 139                   cqhci_readl(cq_host, CQHCI_SSC2),
 140                   cqhci_readl(cq_host, CQHCI_CRDCT));
 141        CQHCI_DUMP("RED mask:  0x%08x | TERRI:    0x%08x\n",
 142                   cqhci_readl(cq_host, CQHCI_RMEM),
 143                   cqhci_readl(cq_host, CQHCI_TERRI));
 144        CQHCI_DUMP("Resp idx:  0x%08x | Resp arg: 0x%08x\n",
 145                   cqhci_readl(cq_host, CQHCI_CRI),
 146                   cqhci_readl(cq_host, CQHCI_CRA));
 147
 148        if (cq_host->ops->dumpregs)
 149                cq_host->ops->dumpregs(mmc);
 150        else
 151                CQHCI_DUMP(": ===========================================\n");
 152}
 153
 154/*
 155 * The allocated descriptor table for task, link & transfer descriptors
 156 * looks like:
 157 * |----------|
 158 * |task desc |  |->|----------|
 159 * |----------|  |  |trans desc|
 160 * |link desc-|->|  |----------|
 161 * |----------|          .
 162 *      .                .
 163 *  no. of slots      max-segs
 164 *      .           |----------|
 165 * |----------|
 166 * The idea here is to create the [task+trans] table and mark & point the
 167 * link desc to the transfer desc table on a per slot basis.
 168 */
 169static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
 170{
 171        int i = 0;
 172
 173        /* task descriptor can be 64/128 bit irrespective of arch */
 174        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 175                cqhci_writel(cq_host, cqhci_readl(cq_host, CQHCI_CFG) |
 176                               CQHCI_TASK_DESC_SZ, CQHCI_CFG);
 177                cq_host->task_desc_len = 16;
 178        } else {
 179                cq_host->task_desc_len = 8;
 180        }
 181
 182        /*
 183         * 96 bits length of transfer desc instead of 128 bits which means
 184         * ADMA would expect next valid descriptor at the 96th bit
 185         * or 128th bit
 186         */
 187        if (cq_host->dma64) {
 188                if (cq_host->quirks & CQHCI_QUIRK_SHORT_TXFR_DESC_SZ)
 189                        cq_host->trans_desc_len = 12;
 190                else
 191                        cq_host->trans_desc_len = 16;
 192                cq_host->link_desc_len = 16;
 193        } else {
 194                cq_host->trans_desc_len = 8;
 195                cq_host->link_desc_len = 8;
 196        }
 197
 198        /* total size of a slot: 1 task & 1 transfer (link) */
 199        cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
 200
 201        cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
 202
 203        cq_host->data_size = get_trans_desc_offset(cq_host, cq_host->mmc->cqe_qdepth);
 204
 205        pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
 206                 mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
 207                 cq_host->slot_sz);
 208
 209        /*
 210         * allocate a dma-mapped chunk of memory for the descriptors
 211         * allocate a dma-mapped chunk of memory for link descriptors
 212         * setup each link-desc memory offset per slot-number to
 213         * the descriptor table.
 214         */
 215        cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 216                                                 cq_host->desc_size,
 217                                                 &cq_host->desc_dma_base,
 218                                                 GFP_KERNEL);
 219        if (!cq_host->desc_base)
 220                return -ENOMEM;
 221
 222        cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
 223                                              cq_host->data_size,
 224                                              &cq_host->trans_desc_dma_base,
 225                                              GFP_KERNEL);
 226        if (!cq_host->trans_desc_base) {
 227                dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
 228                                   cq_host->desc_base,
 229                                   cq_host->desc_dma_base);
 230                cq_host->desc_base = NULL;
 231                cq_host->desc_dma_base = 0;
 232                return -ENOMEM;
 233        }
 234
 235        pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
 236                 mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
 237                (unsigned long long)cq_host->desc_dma_base,
 238                (unsigned long long)cq_host->trans_desc_dma_base);
 239
 240        for (; i < (cq_host->num_slots); i++)
 241                setup_trans_desc(cq_host, i);
 242
 243        return 0;
 244}
 245
 246static void __cqhci_enable(struct cqhci_host *cq_host)
 247{
 248        struct mmc_host *mmc = cq_host->mmc;
 249        u32 cqcfg;
 250
 251        cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 252
 253        /* Configuration must not be changed while enabled */
 254        if (cqcfg & CQHCI_ENABLE) {
 255                cqcfg &= ~CQHCI_ENABLE;
 256                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 257        }
 258
 259        cqcfg &= ~(CQHCI_DCMD | CQHCI_TASK_DESC_SZ);
 260
 261        if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
 262                cqcfg |= CQHCI_DCMD;
 263
 264        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128)
 265                cqcfg |= CQHCI_TASK_DESC_SZ;
 266
 267        if (mmc->caps2 & MMC_CAP2_CRYPTO)
 268                cqcfg |= CQHCI_CRYPTO_GENERAL_ENABLE;
 269
 270        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 271
 272        cqhci_writel(cq_host, lower_32_bits(cq_host->desc_dma_base),
 273                     CQHCI_TDLBA);
 274        cqhci_writel(cq_host, upper_32_bits(cq_host->desc_dma_base),
 275                     CQHCI_TDLBAU);
 276
 277        cqhci_writel(cq_host, cq_host->rca, CQHCI_SSC2);
 278
 279        cqhci_set_irqs(cq_host, 0);
 280
 281        cqcfg |= CQHCI_ENABLE;
 282
 283        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 284
 285        mmc->cqe_on = true;
 286
 287        if (cq_host->ops->enable)
 288                cq_host->ops->enable(mmc);
 289
 290        /* Ensure all writes are done before interrupts are enabled */
 291        wmb();
 292
 293        cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
 294
 295        cq_host->activated = true;
 296}
 297
 298static void __cqhci_disable(struct cqhci_host *cq_host)
 299{
 300        u32 cqcfg;
 301
 302        cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
 303        cqcfg &= ~CQHCI_ENABLE;
 304        cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
 305
 306        cq_host->mmc->cqe_on = false;
 307
 308        cq_host->activated = false;
 309}
 310
 311int cqhci_deactivate(struct mmc_host *mmc)
 312{
 313        struct cqhci_host *cq_host = mmc->cqe_private;
 314
 315        if (cq_host->enabled && cq_host->activated)
 316                __cqhci_disable(cq_host);
 317
 318        return 0;
 319}
 320EXPORT_SYMBOL(cqhci_deactivate);
 321
 322int cqhci_resume(struct mmc_host *mmc)
 323{
 324        /* Re-enable is done upon first request */
 325        return 0;
 326}
 327EXPORT_SYMBOL(cqhci_resume);
 328
 329static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 330{
 331        struct cqhci_host *cq_host = mmc->cqe_private;
 332        int err;
 333
 334        if (!card->ext_csd.cmdq_en)
 335                return -EINVAL;
 336
 337        if (cq_host->enabled)
 338                return 0;
 339
 340        cq_host->rca = card->rca;
 341
 342        err = cqhci_host_alloc_tdl(cq_host);
 343        if (err) {
 344                pr_err("%s: Failed to enable CQE, error %d\n",
 345                       mmc_hostname(mmc), err);
 346                return err;
 347        }
 348
 349        __cqhci_enable(cq_host);
 350
 351        cq_host->enabled = true;
 352
 353#ifdef DEBUG
 354        cqhci_dumpregs(cq_host);
 355#endif
 356        return 0;
 357}
 358
 359/* CQHCI is idle and should halt immediately, so set a small timeout */
 360#define CQHCI_OFF_TIMEOUT 100
 361
 362static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
 363{
 364        return cqhci_readl(cq_host, CQHCI_CTL);
 365}
 366
 367static void cqhci_off(struct mmc_host *mmc)
 368{
 369        struct cqhci_host *cq_host = mmc->cqe_private;
 370        u32 reg;
 371        int err;
 372
 373        if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
 374                return;
 375
 376        if (cq_host->ops->disable)
 377                cq_host->ops->disable(mmc, false);
 378
 379        cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 380
 381        err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
 382                                 reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
 383        if (err < 0)
 384                pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
 385        else
 386                pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
 387
 388        if (cq_host->ops->post_disable)
 389                cq_host->ops->post_disable(mmc);
 390
 391        mmc->cqe_on = false;
 392}
 393
 394static void cqhci_disable(struct mmc_host *mmc)
 395{
 396        struct cqhci_host *cq_host = mmc->cqe_private;
 397
 398        if (!cq_host->enabled)
 399                return;
 400
 401        cqhci_off(mmc);
 402
 403        __cqhci_disable(cq_host);
 404
 405        dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
 406                           cq_host->trans_desc_base,
 407                           cq_host->trans_desc_dma_base);
 408
 409        dmam_free_coherent(mmc_dev(mmc), cq_host->desc_size,
 410                           cq_host->desc_base,
 411                           cq_host->desc_dma_base);
 412
 413        cq_host->trans_desc_base = NULL;
 414        cq_host->desc_base = NULL;
 415
 416        cq_host->enabled = false;
 417}
 418
 419static void cqhci_prep_task_desc(struct mmc_request *mrq,
 420                                 struct cqhci_host *cq_host, int tag)
 421{
 422        __le64 *task_desc = (__le64 __force *)get_desc(cq_host, tag);
 423        u32 req_flags = mrq->data->flags;
 424        u64 desc0;
 425
 426        desc0 = CQHCI_VALID(1) |
 427                CQHCI_END(1) |
 428                CQHCI_INT(1) |
 429                CQHCI_ACT(0x5) |
 430                CQHCI_FORCED_PROG(!!(req_flags & MMC_DATA_FORCED_PRG)) |
 431                CQHCI_DATA_TAG(!!(req_flags & MMC_DATA_DAT_TAG)) |
 432                CQHCI_DATA_DIR(!!(req_flags & MMC_DATA_READ)) |
 433                CQHCI_PRIORITY(!!(req_flags & MMC_DATA_PRIO)) |
 434                CQHCI_QBAR(!!(req_flags & MMC_DATA_QBR)) |
 435                CQHCI_REL_WRITE(!!(req_flags & MMC_DATA_REL_WR)) |
 436                CQHCI_BLK_COUNT(mrq->data->blocks) |
 437                CQHCI_BLK_ADDR((u64)mrq->data->blk_addr);
 438
 439        task_desc[0] = cpu_to_le64(desc0);
 440
 441        if (cq_host->caps & CQHCI_TASK_DESC_SZ_128) {
 442                u64 desc1 = cqhci_crypto_prep_task_desc(mrq);
 443
 444                task_desc[1] = cpu_to_le64(desc1);
 445
 446                pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx%016llx\n",
 447                         mmc_hostname(mrq->host), mrq->tag, desc1, desc0);
 448        } else {
 449                pr_debug("%s: cqhci: tag %d task descriptor 0x%016llx\n",
 450                         mmc_hostname(mrq->host), mrq->tag, desc0);
 451        }
 452}
 453
 454static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
 455{
 456        int sg_count;
 457        struct mmc_data *data = mrq->data;
 458
 459        if (!data)
 460                return -EINVAL;
 461
 462        sg_count = dma_map_sg(mmc_dev(host), data->sg,
 463                              data->sg_len,
 464                              (data->flags & MMC_DATA_WRITE) ?
 465                              DMA_TO_DEVICE : DMA_FROM_DEVICE);
 466        if (!sg_count) {
 467                pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
 468                return -ENOMEM;
 469        }
 470
 471        return sg_count;
 472}
 473
 474static void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
 475                                bool dma64)
 476{
 477        __le32 *attr = (__le32 __force *)desc;
 478
 479        *attr = (CQHCI_VALID(1) |
 480                 CQHCI_END(end ? 1 : 0) |
 481                 CQHCI_INT(0) |
 482                 CQHCI_ACT(0x4) |
 483                 CQHCI_DAT_LENGTH(len));
 484
 485        if (dma64) {
 486                __le64 *dataddr = (__le64 __force *)(desc + 4);
 487
 488                dataddr[0] = cpu_to_le64(addr);
 489        } else {
 490                __le32 *dataddr = (__le32 __force *)(desc + 4);
 491
 492                dataddr[0] = cpu_to_le32(addr);
 493        }
 494}
 495
 496static int cqhci_prep_tran_desc(struct mmc_request *mrq,
 497                               struct cqhci_host *cq_host, int tag)
 498{
 499        struct mmc_data *data = mrq->data;
 500        int i, sg_count, len;
 501        bool end = false;
 502        bool dma64 = cq_host->dma64;
 503        dma_addr_t addr;
 504        u8 *desc;
 505        struct scatterlist *sg;
 506
 507        sg_count = cqhci_dma_map(mrq->host, mrq);
 508        if (sg_count < 0) {
 509                pr_err("%s: %s: unable to map sg lists, %d\n",
 510                                mmc_hostname(mrq->host), __func__, sg_count);
 511                return sg_count;
 512        }
 513
 514        desc = get_trans_desc(cq_host, tag);
 515
 516        for_each_sg(data->sg, sg, sg_count, i) {
 517                addr = sg_dma_address(sg);
 518                len = sg_dma_len(sg);
 519
 520                if ((i+1) == sg_count)
 521                        end = true;
 522                cqhci_set_tran_desc(desc, addr, len, end, dma64);
 523                desc += cq_host->trans_desc_len;
 524        }
 525
 526        return 0;
 527}
 528
 529static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
 530                                   struct mmc_request *mrq)
 531{
 532        u64 *task_desc = NULL;
 533        u64 data = 0;
 534        u8 resp_type;
 535        u8 *desc;
 536        __le64 *dataddr;
 537        struct cqhci_host *cq_host = mmc->cqe_private;
 538        u8 timing;
 539
 540        if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
 541                resp_type = 0x0;
 542                timing = 0x1;
 543        } else {
 544                if (mrq->cmd->flags & MMC_RSP_R1B) {
 545                        resp_type = 0x3;
 546                        timing = 0x0;
 547                } else {
 548                        resp_type = 0x2;
 549                        timing = 0x1;
 550                }
 551        }
 552
 553        task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
 554        memset(task_desc, 0, cq_host->task_desc_len);
 555        data |= (CQHCI_VALID(1) |
 556                 CQHCI_END(1) |
 557                 CQHCI_INT(1) |
 558                 CQHCI_QBAR(1) |
 559                 CQHCI_ACT(0x5) |
 560                 CQHCI_CMD_INDEX(mrq->cmd->opcode) |
 561                 CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
 562        if (cq_host->ops->update_dcmd_desc)
 563                cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
 564        *task_desc |= data;
 565        desc = (u8 *)task_desc;
 566        pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
 567                 mmc_hostname(mmc), mrq->cmd->opcode, timing, resp_type);
 568        dataddr = (__le64 __force *)(desc + 4);
 569        dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
 570
 571}
 572
 573static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
 574{
 575        struct mmc_data *data = mrq->data;
 576
 577        if (data) {
 578                dma_unmap_sg(mmc_dev(host), data->sg, data->sg_len,
 579                             (data->flags & MMC_DATA_READ) ?
 580                             DMA_FROM_DEVICE : DMA_TO_DEVICE);
 581        }
 582}
 583
 584static inline int cqhci_tag(struct mmc_request *mrq)
 585{
 586        return mrq->cmd ? DCMD_SLOT : mrq->tag;
 587}
 588
 589static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 590{
 591        int err = 0;
 592        int tag = cqhci_tag(mrq);
 593        struct cqhci_host *cq_host = mmc->cqe_private;
 594        unsigned long flags;
 595
 596        if (!cq_host->enabled) {
 597                pr_err("%s: cqhci: not enabled\n", mmc_hostname(mmc));
 598                return -EINVAL;
 599        }
 600
 601        /* First request after resume has to re-enable */
 602        if (!cq_host->activated)
 603                __cqhci_enable(cq_host);
 604
 605        if (!mmc->cqe_on) {
 606                if (cq_host->ops->pre_enable)
 607                        cq_host->ops->pre_enable(mmc);
 608
 609                cqhci_writel(cq_host, 0, CQHCI_CTL);
 610                mmc->cqe_on = true;
 611                pr_debug("%s: cqhci: CQE on\n", mmc_hostname(mmc));
 612                if (cqhci_readl(cq_host, CQHCI_CTL) && CQHCI_HALT) {
 613                        pr_err("%s: cqhci: CQE failed to exit halt state\n",
 614                               mmc_hostname(mmc));
 615                }
 616                if (cq_host->ops->enable)
 617                        cq_host->ops->enable(mmc);
 618        }
 619
 620        if (mrq->data) {
 621                cqhci_prep_task_desc(mrq, cq_host, tag);
 622
 623                err = cqhci_prep_tran_desc(mrq, cq_host, tag);
 624                if (err) {
 625                        pr_err("%s: cqhci: failed to setup tx desc: %d\n",
 626                               mmc_hostname(mmc), err);
 627                        return err;
 628                }
 629        } else {
 630                cqhci_prep_dcmd_desc(mmc, mrq);
 631        }
 632
 633        spin_lock_irqsave(&cq_host->lock, flags);
 634
 635        if (cq_host->recovery_halt) {
 636                err = -EBUSY;
 637                goto out_unlock;
 638        }
 639
 640        cq_host->slot[tag].mrq = mrq;
 641        cq_host->slot[tag].flags = 0;
 642
 643        cq_host->qcnt += 1;
 644        /* Make sure descriptors are ready before ringing the doorbell */
 645        wmb();
 646        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
 647        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
 648                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
 649                         mmc_hostname(mmc), tag);
 650out_unlock:
 651        spin_unlock_irqrestore(&cq_host->lock, flags);
 652
 653        if (err)
 654                cqhci_post_req(mmc, mrq);
 655
 656        return err;
 657}
 658
 659static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
 660                                  bool notify)
 661{
 662        struct cqhci_host *cq_host = mmc->cqe_private;
 663
 664        if (!cq_host->recovery_halt) {
 665                cq_host->recovery_halt = true;
 666                pr_debug("%s: cqhci: recovery needed\n", mmc_hostname(mmc));
 667                wake_up(&cq_host->wait_queue);
 668                if (notify && mrq->recovery_notifier)
 669                        mrq->recovery_notifier(mrq);
 670        }
 671}
 672
 673static unsigned int cqhci_error_flags(int error1, int error2)
 674{
 675        int error = error1 ? error1 : error2;
 676
 677        switch (error) {
 678        case -EILSEQ:
 679                return CQHCI_HOST_CRC;
 680        case -ETIMEDOUT:
 681                return CQHCI_HOST_TIMEOUT;
 682        default:
 683                return CQHCI_HOST_OTHER;
 684        }
 685}
 686
 687static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
 688                            int data_error)
 689{
 690        struct cqhci_host *cq_host = mmc->cqe_private;
 691        struct cqhci_slot *slot;
 692        u32 terri;
 693        u32 tdpe;
 694        int tag;
 695
 696        spin_lock(&cq_host->lock);
 697
 698        terri = cqhci_readl(cq_host, CQHCI_TERRI);
 699
 700        pr_debug("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 701                 mmc_hostname(mmc), status, cmd_error, data_error, terri);
 702
 703        /* Forget about errors when recovery has already been triggered */
 704        if (cq_host->recovery_halt)
 705                goto out_unlock;
 706
 707        if (!cq_host->qcnt) {
 708                WARN_ONCE(1, "%s: cqhci: error when idle. IRQ status: 0x%08x cmd error %d data error %d TERRI: 0x%08x\n",
 709                          mmc_hostname(mmc), status, cmd_error, data_error,
 710                          terri);
 711                goto out_unlock;
 712        }
 713
 714        if (CQHCI_TERRI_C_VALID(terri)) {
 715                tag = CQHCI_TERRI_C_TASK(terri);
 716                slot = &cq_host->slot[tag];
 717                if (slot->mrq) {
 718                        slot->flags = cqhci_error_flags(cmd_error, data_error);
 719                        cqhci_recovery_needed(mmc, slot->mrq, true);
 720                }
 721        }
 722
 723        if (CQHCI_TERRI_D_VALID(terri)) {
 724                tag = CQHCI_TERRI_D_TASK(terri);
 725                slot = &cq_host->slot[tag];
 726                if (slot->mrq) {
 727                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 728                        cqhci_recovery_needed(mmc, slot->mrq, true);
 729                }
 730        }
 731
 732        /*
 733         * Handle ICCE ("Invalid Crypto Configuration Error").  This should
 734         * never happen, since the block layer ensures that all crypto-enabled
 735         * I/O requests have a valid keyslot before they reach the driver.
 736         *
 737         * Note that GCE ("General Crypto Error") is different; it already got
 738         * handled above by checking TERRI.
 739         */
 740        if (status & CQHCI_IS_ICCE) {
 741                tdpe = cqhci_readl(cq_host, CQHCI_TDPE);
 742                WARN_ONCE(1,
 743                          "%s: cqhci: invalid crypto configuration error. IRQ status: 0x%08x TDPE: 0x%08x\n",
 744                          mmc_hostname(mmc), status, tdpe);
 745                while (tdpe != 0) {
 746                        tag = __ffs(tdpe);
 747                        tdpe &= ~(1 << tag);
 748                        slot = &cq_host->slot[tag];
 749                        if (!slot->mrq)
 750                                continue;
 751                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 752                        cqhci_recovery_needed(mmc, slot->mrq, true);
 753                }
 754        }
 755
 756        if (!cq_host->recovery_halt) {
 757                /*
 758                 * The only way to guarantee forward progress is to mark at
 759                 * least one task in error, so if none is indicated, pick one.
 760                 */
 761                for (tag = 0; tag < NUM_SLOTS; tag++) {
 762                        slot = &cq_host->slot[tag];
 763                        if (!slot->mrq)
 764                                continue;
 765                        slot->flags = cqhci_error_flags(data_error, cmd_error);
 766                        cqhci_recovery_needed(mmc, slot->mrq, true);
 767                        break;
 768                }
 769        }
 770
 771out_unlock:
 772        spin_unlock(&cq_host->lock);
 773}
 774
 775static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
 776{
 777        struct cqhci_host *cq_host = mmc->cqe_private;
 778        struct cqhci_slot *slot = &cq_host->slot[tag];
 779        struct mmc_request *mrq = slot->mrq;
 780        struct mmc_data *data;
 781
 782        if (!mrq) {
 783                WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
 784                          mmc_hostname(mmc), tag);
 785                return;
 786        }
 787
 788        /* No completions allowed during recovery */
 789        if (cq_host->recovery_halt) {
 790                slot->flags |= CQHCI_COMPLETED;
 791                return;
 792        }
 793
 794        slot->mrq = NULL;
 795
 796        cq_host->qcnt -= 1;
 797
 798        data = mrq->data;
 799        if (data) {
 800                if (data->error)
 801                        data->bytes_xfered = 0;
 802                else
 803                        data->bytes_xfered = data->blksz * data->blocks;
 804        }
 805
 806        mmc_cqe_request_done(mmc, mrq);
 807}
 808
 809irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
 810                      int data_error)
 811{
 812        u32 status;
 813        unsigned long tag = 0, comp_status;
 814        struct cqhci_host *cq_host = mmc->cqe_private;
 815
 816        status = cqhci_readl(cq_host, CQHCI_IS);
 817        cqhci_writel(cq_host, status, CQHCI_IS);
 818
 819        pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
 820
 821        if ((status & (CQHCI_IS_RED | CQHCI_IS_GCE | CQHCI_IS_ICCE)) ||
 822            cmd_error || data_error)
 823                cqhci_error_irq(mmc, status, cmd_error, data_error);
 824
 825        if (status & CQHCI_IS_TCC) {
 826                /* read TCN and complete the request */
 827                comp_status = cqhci_readl(cq_host, CQHCI_TCN);
 828                cqhci_writel(cq_host, comp_status, CQHCI_TCN);
 829                pr_debug("%s: cqhci: TCN: 0x%08lx\n",
 830                         mmc_hostname(mmc), comp_status);
 831
 832                spin_lock(&cq_host->lock);
 833
 834                for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
 835                        /* complete the corresponding mrq */
 836                        pr_debug("%s: cqhci: completing tag %lu\n",
 837                                 mmc_hostname(mmc), tag);
 838                        cqhci_finish_mrq(mmc, tag);
 839                }
 840
 841                if (cq_host->waiting_for_idle && !cq_host->qcnt) {
 842                        cq_host->waiting_for_idle = false;
 843                        wake_up(&cq_host->wait_queue);
 844                }
 845
 846                spin_unlock(&cq_host->lock);
 847        }
 848
 849        if (status & CQHCI_IS_TCL)
 850                wake_up(&cq_host->wait_queue);
 851
 852        if (status & CQHCI_IS_HAC)
 853                wake_up(&cq_host->wait_queue);
 854
 855        return IRQ_HANDLED;
 856}
 857EXPORT_SYMBOL(cqhci_irq);
 858
 859static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
 860{
 861        unsigned long flags;
 862        bool is_idle;
 863
 864        spin_lock_irqsave(&cq_host->lock, flags);
 865        is_idle = !cq_host->qcnt || cq_host->recovery_halt;
 866        *ret = cq_host->recovery_halt ? -EBUSY : 0;
 867        cq_host->waiting_for_idle = !is_idle;
 868        spin_unlock_irqrestore(&cq_host->lock, flags);
 869
 870        return is_idle;
 871}
 872
 873static int cqhci_wait_for_idle(struct mmc_host *mmc)
 874{
 875        struct cqhci_host *cq_host = mmc->cqe_private;
 876        int ret;
 877
 878        wait_event(cq_host->wait_queue, cqhci_is_idle(cq_host, &ret));
 879
 880        return ret;
 881}
 882
 883static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
 884                          bool *recovery_needed)
 885{
 886        struct cqhci_host *cq_host = mmc->cqe_private;
 887        int tag = cqhci_tag(mrq);
 888        struct cqhci_slot *slot = &cq_host->slot[tag];
 889        unsigned long flags;
 890        bool timed_out;
 891
 892        spin_lock_irqsave(&cq_host->lock, flags);
 893        timed_out = slot->mrq == mrq;
 894        if (timed_out) {
 895                slot->flags |= CQHCI_EXTERNAL_TIMEOUT;
 896                cqhci_recovery_needed(mmc, mrq, false);
 897                *recovery_needed = cq_host->recovery_halt;
 898        }
 899        spin_unlock_irqrestore(&cq_host->lock, flags);
 900
 901        if (timed_out) {
 902                pr_err("%s: cqhci: timeout for tag %d\n",
 903                       mmc_hostname(mmc), tag);
 904                cqhci_dumpregs(cq_host);
 905        }
 906
 907        return timed_out;
 908}
 909
 910static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
 911{
 912        return !(cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_CLEAR_ALL_TASKS);
 913}
 914
 915static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
 916{
 917        struct cqhci_host *cq_host = mmc->cqe_private;
 918        bool ret;
 919        u32 ctl;
 920
 921        cqhci_set_irqs(cq_host, CQHCI_IS_TCL);
 922
 923        ctl = cqhci_readl(cq_host, CQHCI_CTL);
 924        ctl |= CQHCI_CLEAR_ALL_TASKS;
 925        cqhci_writel(cq_host, ctl, CQHCI_CTL);
 926
 927        wait_event_timeout(cq_host->wait_queue, cqhci_tasks_cleared(cq_host),
 928                           msecs_to_jiffies(timeout) + 1);
 929
 930        cqhci_set_irqs(cq_host, 0);
 931
 932        ret = cqhci_tasks_cleared(cq_host);
 933
 934        if (!ret)
 935                pr_debug("%s: cqhci: Failed to clear tasks\n",
 936                         mmc_hostname(mmc));
 937
 938        return ret;
 939}
 940
 941static bool cqhci_halted(struct cqhci_host *cq_host)
 942{
 943        return cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT;
 944}
 945
 946static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
 947{
 948        struct cqhci_host *cq_host = mmc->cqe_private;
 949        bool ret;
 950        u32 ctl;
 951
 952        if (cqhci_halted(cq_host))
 953                return true;
 954
 955        cqhci_set_irqs(cq_host, CQHCI_IS_HAC);
 956
 957        ctl = cqhci_readl(cq_host, CQHCI_CTL);
 958        ctl |= CQHCI_HALT;
 959        cqhci_writel(cq_host, ctl, CQHCI_CTL);
 960
 961        wait_event_timeout(cq_host->wait_queue, cqhci_halted(cq_host),
 962                           msecs_to_jiffies(timeout) + 1);
 963
 964        cqhci_set_irqs(cq_host, 0);
 965
 966        ret = cqhci_halted(cq_host);
 967
 968        if (!ret)
 969                pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
 970
 971        return ret;
 972}
 973
 974/*
 975 * After halting we expect to be able to use the command line. We interpret the
 976 * failure to halt to mean the data lines might still be in use (and the upper
 977 * layers will need to send a STOP command), so we set the timeout based on a
 978 * generous command timeout.
 979 */
 980#define CQHCI_START_HALT_TIMEOUT        5
 981
 982static void cqhci_recovery_start(struct mmc_host *mmc)
 983{
 984        struct cqhci_host *cq_host = mmc->cqe_private;
 985
 986        pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
 987
 988        WARN_ON(!cq_host->recovery_halt);
 989
 990        cqhci_halt(mmc, CQHCI_START_HALT_TIMEOUT);
 991
 992        if (cq_host->ops->disable)
 993                cq_host->ops->disable(mmc, true);
 994
 995        mmc->cqe_on = false;
 996}
 997
 998static int cqhci_error_from_flags(unsigned int flags)
 999{
1000        if (!flags)
1001                return 0;
1002
1003        /* CRC errors might indicate re-tuning so prefer to report that */
1004        if (flags & CQHCI_HOST_CRC)
1005                return -EILSEQ;
1006
1007        if (flags & (CQHCI_EXTERNAL_TIMEOUT | CQHCI_HOST_TIMEOUT))
1008                return -ETIMEDOUT;
1009
1010        return -EIO;
1011}
1012
1013static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
1014{
1015        struct cqhci_slot *slot = &cq_host->slot[tag];
1016        struct mmc_request *mrq = slot->mrq;
1017        struct mmc_data *data;
1018
1019        if (!mrq)
1020                return;
1021
1022        slot->mrq = NULL;
1023
1024        cq_host->qcnt -= 1;
1025
1026        data = mrq->data;
1027        if (data) {
1028                data->bytes_xfered = 0;
1029                data->error = cqhci_error_from_flags(slot->flags);
1030        } else {
1031                mrq->cmd->error = cqhci_error_from_flags(slot->flags);
1032        }
1033
1034        mmc_cqe_request_done(cq_host->mmc, mrq);
1035}
1036
1037static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
1038{
1039        int i;
1040
1041        for (i = 0; i < cq_host->num_slots; i++)
1042                cqhci_recover_mrq(cq_host, i);
1043}
1044
1045/*
1046 * By now the command and data lines should be unused so there is no reason for
1047 * CQHCI to take a long time to halt, but if it doesn't halt there could be
1048 * problems clearing tasks, so be generous.
1049 */
1050#define CQHCI_FINISH_HALT_TIMEOUT       20
1051
1052/* CQHCI could be expected to clear it's internal state pretty quickly */
1053#define CQHCI_CLEAR_TIMEOUT             20
1054
1055static void cqhci_recovery_finish(struct mmc_host *mmc)
1056{
1057        struct cqhci_host *cq_host = mmc->cqe_private;
1058        unsigned long flags;
1059        u32 cqcfg;
1060        bool ok;
1061
1062        pr_debug("%s: cqhci: %s\n", mmc_hostname(mmc), __func__);
1063
1064        WARN_ON(!cq_host->recovery_halt);
1065
1066        ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1067
1068        if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1069                ok = false;
1070
1071        /*
1072         * The specification contradicts itself, by saying that tasks cannot be
1073         * cleared if CQHCI does not halt, but if CQHCI does not halt, it should
1074         * be disabled/re-enabled, but not to disable before clearing tasks.
1075         * Have a go anyway.
1076         */
1077        if (!ok) {
1078                pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
1079                cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
1080                cqcfg &= ~CQHCI_ENABLE;
1081                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1082                cqcfg |= CQHCI_ENABLE;
1083                cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
1084                /* Be sure that there are no tasks */
1085                ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
1086                if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
1087                        ok = false;
1088                WARN_ON(!ok);
1089        }
1090
1091        cqhci_recover_mrqs(cq_host);
1092
1093        WARN_ON(cq_host->qcnt);
1094
1095        spin_lock_irqsave(&cq_host->lock, flags);
1096        cq_host->qcnt = 0;
1097        cq_host->recovery_halt = false;
1098        mmc->cqe_on = false;
1099        spin_unlock_irqrestore(&cq_host->lock, flags);
1100
1101        /* Ensure all writes are done before interrupts are re-enabled */
1102        wmb();
1103
1104        cqhci_writel(cq_host, CQHCI_IS_HAC | CQHCI_IS_TCL, CQHCI_IS);
1105
1106        cqhci_set_irqs(cq_host, CQHCI_IS_MASK);
1107
1108        pr_debug("%s: cqhci: recovery done\n", mmc_hostname(mmc));
1109}
1110
1111static const struct mmc_cqe_ops cqhci_cqe_ops = {
1112        .cqe_enable = cqhci_enable,
1113        .cqe_disable = cqhci_disable,
1114        .cqe_request = cqhci_request,
1115        .cqe_post_req = cqhci_post_req,
1116        .cqe_off = cqhci_off,
1117        .cqe_wait_for_idle = cqhci_wait_for_idle,
1118        .cqe_timeout = cqhci_timeout,
1119        .cqe_recovery_start = cqhci_recovery_start,
1120        .cqe_recovery_finish = cqhci_recovery_finish,
1121};
1122
1123struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
1124{
1125        struct cqhci_host *cq_host;
1126        struct resource *cqhci_memres = NULL;
1127
1128        /* check and setup CMDQ interface */
1129        cqhci_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1130                                                   "cqhci");
1131        if (!cqhci_memres) {
1132                dev_dbg(&pdev->dev, "CMDQ not supported\n");
1133                return ERR_PTR(-EINVAL);
1134        }
1135
1136        cq_host = devm_kzalloc(&pdev->dev, sizeof(*cq_host), GFP_KERNEL);
1137        if (!cq_host)
1138                return ERR_PTR(-ENOMEM);
1139        cq_host->mmio = devm_ioremap(&pdev->dev,
1140                                     cqhci_memres->start,
1141                                     resource_size(cqhci_memres));
1142        if (!cq_host->mmio) {
1143                dev_err(&pdev->dev, "failed to remap cqhci regs\n");
1144                return ERR_PTR(-EBUSY);
1145        }
1146        dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
1147
1148        return cq_host;
1149}
1150EXPORT_SYMBOL(cqhci_pltfm_init);
1151
1152static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
1153{
1154        return CQHCI_VER_MAJOR(cqhci_readl(cq_host, CQHCI_VER));
1155}
1156
1157static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
1158{
1159        u32 ver = cqhci_readl(cq_host, CQHCI_VER);
1160
1161        return CQHCI_VER_MINOR1(ver) * 10 + CQHCI_VER_MINOR2(ver);
1162}
1163
1164int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
1165              bool dma64)
1166{
1167        int err;
1168
1169        cq_host->dma64 = dma64;
1170        cq_host->mmc = mmc;
1171        cq_host->mmc->cqe_private = cq_host;
1172
1173        cq_host->num_slots = NUM_SLOTS;
1174        cq_host->dcmd_slot = DCMD_SLOT;
1175
1176        mmc->cqe_ops = &cqhci_cqe_ops;
1177
1178        mmc->cqe_qdepth = NUM_SLOTS;
1179        if (mmc->caps2 & MMC_CAP2_CQE_DCMD)
1180                mmc->cqe_qdepth -= 1;
1181
1182        cq_host->slot = devm_kcalloc(mmc_dev(mmc), cq_host->num_slots,
1183                                     sizeof(*cq_host->slot), GFP_KERNEL);
1184        if (!cq_host->slot) {
1185                err = -ENOMEM;
1186                goto out_err;
1187        }
1188
1189        err = cqhci_crypto_init(cq_host);
1190        if (err) {
1191                pr_err("%s: CQHCI crypto initialization failed\n",
1192                       mmc_hostname(mmc));
1193                goto out_err;
1194        }
1195
1196        spin_lock_init(&cq_host->lock);
1197
1198        init_completion(&cq_host->halt_comp);
1199        init_waitqueue_head(&cq_host->wait_queue);
1200
1201        pr_info("%s: CQHCI version %u.%02u\n",
1202                mmc_hostname(mmc), cqhci_ver_major(cq_host),
1203                cqhci_ver_minor(cq_host));
1204
1205        return 0;
1206
1207out_err:
1208        pr_err("%s: CQHCI version %u.%02u failed to initialize, error %d\n",
1209               mmc_hostname(mmc), cqhci_ver_major(cq_host),
1210               cqhci_ver_minor(cq_host), err);
1211        return err;
1212}
1213EXPORT_SYMBOL(cqhci_init);
1214
1215MODULE_AUTHOR("Venkat Gopalakrishnan <venkatg@codeaurora.org>");
1216MODULE_DESCRIPTION("Command Queue Host Controller Interface driver");
1217MODULE_LICENSE("GPL v2");
1218