linux/drivers/usb/host/xhci-ring.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * xHCI host controller driver
   4 *
   5 * Copyright (C) 2008 Intel Corp.
   6 *
   7 * Author: Sarah Sharp
   8 * Some code borrowed from the Linux EHCI driver.
   9 */
  10
  11/*
  12 * Ring initialization rules:
  13 * 1. Each segment is initialized to zero, except for link TRBs.
  14 * 2. Ring cycle state = 0.  This represents Producer Cycle State (PCS) or
  15 *    Consumer Cycle State (CCS), depending on ring function.
  16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  17 *
  18 * Ring behavior rules:
  19 * 1. A ring is empty if enqueue == dequeue.  This means there will always be at
  20 *    least one free TRB in the ring.  This is useful if you want to turn that
  21 *    into a link TRB and expand the ring.
  22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  23 *    link TRB, then load the pointer with the address in the link TRB.  If the
  24 *    link TRB had its toggle bit set, you may need to update the ring cycle
  25 *    state (see cycle bit rules).  You may have to do this multiple times
  26 *    until you reach a non-link TRB.
  27 * 3. A ring is full if enqueue++ (for the definition of increment above)
  28 *    equals the dequeue pointer.
  29 *
  30 * Cycle bit rules:
  31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  32 *    in a link TRB, it must toggle the ring cycle state.
  33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  34 *    in a link TRB, it must toggle the ring cycle state.
  35 *
  36 * Producer rules:
  37 * 1. Check if ring is full before you enqueue.
  38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  39 *    Update enqueue pointer between each write (which may update the ring
  40 *    cycle state).
  41 * 3. Notify consumer.  If SW is producer, it rings the doorbell for command
  42 *    and endpoint rings.  If HC is the producer for the event ring,
  43 *    and it generates an interrupt according to interrupt modulation rules.
  44 *
  45 * Consumer rules:
  46 * 1. Check if TRB belongs to you.  If the cycle bit == your ring cycle state,
  47 *    the TRB is owned by the consumer.
  48 * 2. Update dequeue pointer (which may update the ring cycle state) and
  49 *    continue processing TRBs until you reach a TRB which is not owned by you.
  50 * 3. Notify the producer.  SW is the consumer for the event ring, and it
  51 *   updates event ring dequeue pointer.  HC is the consumer for the command and
  52 *   endpoint rings; it generates events on the event ring for these.
  53 */
  54
  55#include <linux/scatterlist.h>
  56#include <linux/slab.h>
  57#include <linux/dma-mapping.h>
  58#include "xhci.h"
  59#include "xhci-trace.h"
  60
  61static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
  62                         u32 field1, u32 field2,
  63                         u32 field3, u32 field4, bool command_must_succeed);
  64
  65/*
  66 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  67 * address of the TRB.
  68 */
  69dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  70                union xhci_trb *trb)
  71{
  72        unsigned long segment_offset;
  73
  74        if (!seg || !trb || trb < seg->trbs)
  75                return 0;
  76        /* offset in TRBs */
  77        segment_offset = trb - seg->trbs;
  78        if (segment_offset >= TRBS_PER_SEGMENT)
  79                return 0;
  80        return seg->dma + (segment_offset * sizeof(*trb));
  81}
  82
  83static bool trb_is_noop(union xhci_trb *trb)
  84{
  85        return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
  86}
  87
  88static bool trb_is_link(union xhci_trb *trb)
  89{
  90        return TRB_TYPE_LINK_LE32(trb->link.control);
  91}
  92
  93static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
  94{
  95        return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
  96}
  97
  98static bool last_trb_on_ring(struct xhci_ring *ring,
  99                        struct xhci_segment *seg, union xhci_trb *trb)
 100{
 101        return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
 102}
 103
 104static bool link_trb_toggles_cycle(union xhci_trb *trb)
 105{
 106        return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 107}
 108
 109static bool last_td_in_urb(struct xhci_td *td)
 110{
 111        struct urb_priv *urb_priv = td->urb->hcpriv;
 112
 113        return urb_priv->num_tds_done == urb_priv->num_tds;
 114}
 115
 116static void inc_td_cnt(struct urb *urb)
 117{
 118        struct urb_priv *urb_priv = urb->hcpriv;
 119
 120        urb_priv->num_tds_done++;
 121}
 122
 123static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
 124{
 125        if (trb_is_link(trb)) {
 126                /* unchain chained link TRBs */
 127                trb->link.control &= cpu_to_le32(~TRB_CHAIN);
 128        } else {
 129                trb->generic.field[0] = 0;
 130                trb->generic.field[1] = 0;
 131                trb->generic.field[2] = 0;
 132                /* Preserve only the cycle bit of this TRB */
 133                trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
 134                trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
 135        }
 136}
 137
 138/* Updates trb to point to the next TRB in the ring, and updates seg if the next
 139 * TRB is in a new segment.  This does not skip over link TRBs, and it does not
 140 * effect the ring dequeue or enqueue pointers.
 141 */
 142static void next_trb(struct xhci_hcd *xhci,
 143                struct xhci_ring *ring,
 144                struct xhci_segment **seg,
 145                union xhci_trb **trb)
 146{
 147        if (trb_is_link(*trb)) {
 148                *seg = (*seg)->next;
 149                *trb = ((*seg)->trbs);
 150        } else {
 151                (*trb)++;
 152        }
 153}
 154
 155/*
 156 * See Cycle bit rules. SW is the consumer for the event ring only.
 157 */
 158void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 159{
 160        unsigned int link_trb_count = 0;
 161
 162        /* event ring doesn't have link trbs, check for last trb */
 163        if (ring->type == TYPE_EVENT) {
 164                if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
 165                        ring->dequeue++;
 166                        goto out;
 167                }
 168                if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
 169                        ring->cycle_state ^= 1;
 170                ring->deq_seg = ring->deq_seg->next;
 171                ring->dequeue = ring->deq_seg->trbs;
 172                goto out;
 173        }
 174
 175        /* All other rings have link trbs */
 176        if (!trb_is_link(ring->dequeue)) {
 177                if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
 178                        xhci_warn(xhci, "Missing link TRB at end of segment\n");
 179                } else {
 180                        ring->dequeue++;
 181                        ring->num_trbs_free++;
 182                }
 183        }
 184
 185        while (trb_is_link(ring->dequeue)) {
 186                ring->deq_seg = ring->deq_seg->next;
 187                ring->dequeue = ring->deq_seg->trbs;
 188
 189                if (link_trb_count++ > ring->num_segs) {
 190                        xhci_warn(xhci, "Ring is an endless link TRB loop\n");
 191                        break;
 192                }
 193        }
 194out:
 195        trace_xhci_inc_deq(ring);
 196
 197        return;
 198}
 199
 200/*
 201 * See Cycle bit rules. SW is the consumer for the event ring only.
 202 *
 203 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
 204 * chain bit is set), then set the chain bit in all the following link TRBs.
 205 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
 206 * have their chain bit cleared (so that each Link TRB is a separate TD).
 207 *
 208 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
 209 * set, but other sections talk about dealing with the chain bit set.  This was
 210 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
 211 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
 212 *
 213 * @more_trbs_coming:   Will you enqueue more TRBs before calling
 214 *                      prepare_transfer()?
 215 */
 216static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 217                        bool more_trbs_coming)
 218{
 219        u32 chain;
 220        union xhci_trb *next;
 221        unsigned int link_trb_count = 0;
 222
 223        chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
 224        /* If this is not event ring, there is one less usable TRB */
 225        if (!trb_is_link(ring->enqueue))
 226                ring->num_trbs_free--;
 227
 228        if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
 229                xhci_err(xhci, "Tried to move enqueue past ring segment\n");
 230                return;
 231        }
 232
 233        next = ++(ring->enqueue);
 234
 235        /* Update the dequeue pointer further if that was a link TRB */
 236        while (trb_is_link(next)) {
 237
 238                /*
 239                 * If the caller doesn't plan on enqueueing more TDs before
 240                 * ringing the doorbell, then we don't want to give the link TRB
 241                 * to the hardware just yet. We'll give the link TRB back in
 242                 * prepare_ring() just before we enqueue the TD at the top of
 243                 * the ring.
 244                 */
 245                if (!chain && !more_trbs_coming)
 246                        break;
 247
 248                /* If we're not dealing with 0.95 hardware or isoc rings on
 249                 * AMD 0.96 host, carry over the chain bit of the previous TRB
 250                 * (which may mean the chain bit is cleared).
 251                 */
 252                if (!(ring->type == TYPE_ISOC &&
 253                      (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
 254                    !xhci_link_trb_quirk(xhci)) {
 255                        next->link.control &= cpu_to_le32(~TRB_CHAIN);
 256                        next->link.control |= cpu_to_le32(chain);
 257                }
 258                /* Give this link TRB to the hardware */
 259                wmb();
 260                next->link.control ^= cpu_to_le32(TRB_CYCLE);
 261
 262                /* Toggle the cycle bit after the last ring segment. */
 263                if (link_trb_toggles_cycle(next))
 264                        ring->cycle_state ^= 1;
 265
 266                ring->enq_seg = ring->enq_seg->next;
 267                ring->enqueue = ring->enq_seg->trbs;
 268                next = ring->enqueue;
 269
 270                if (link_trb_count++ > ring->num_segs) {
 271                        xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
 272                        break;
 273                }
 274        }
 275
 276        trace_xhci_inc_enq(ring);
 277}
 278
 279/*
 280 * Check to see if there's room to enqueue num_trbs on the ring and make sure
 281 * enqueue pointer will not advance into dequeue segment. See rules above.
 282 */
 283static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
 284                unsigned int num_trbs)
 285{
 286        int num_trbs_in_deq_seg;
 287
 288        if (ring->num_trbs_free < num_trbs)
 289                return 0;
 290
 291        if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
 292                num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
 293                if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
 294                        return 0;
 295        }
 296
 297        return 1;
 298}
 299
 300/* Ring the host controller doorbell after placing a command on the ring */
 301void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 302{
 303        if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
 304                return;
 305
 306        xhci_dbg(xhci, "// Ding dong!\n");
 307
 308        trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
 309
 310        writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
 311        /* Flush PCI posted writes */
 312        readl(&xhci->dba->doorbell[0]);
 313}
 314
 315static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
 316{
 317        return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
 318}
 319
 320static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
 321{
 322        return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
 323                                        cmd_list);
 324}
 325
 326/*
 327 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
 328 * If there are other commands waiting then restart the ring and kick the timer.
 329 * This must be called with command ring stopped and xhci->lock held.
 330 */
 331static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
 332                                         struct xhci_command *cur_cmd)
 333{
 334        struct xhci_command *i_cmd;
 335
 336        /* Turn all aborted commands in list to no-ops, then restart */
 337        list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
 338
 339                if (i_cmd->status != COMP_COMMAND_ABORTED)
 340                        continue;
 341
 342                i_cmd->status = COMP_COMMAND_RING_STOPPED;
 343
 344                xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
 345                         i_cmd->command_trb);
 346
 347                trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
 348
 349                /*
 350                 * caller waiting for completion is called when command
 351                 *  completion event is received for these no-op commands
 352                 */
 353        }
 354
 355        xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
 356
 357        /* ring command ring doorbell to restart the command ring */
 358        if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
 359            !(xhci->xhc_state & XHCI_STATE_DYING)) {
 360                xhci->current_cmd = cur_cmd;
 361                xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
 362                xhci_ring_cmd_db(xhci);
 363        }
 364}
 365
 366/* Must be called with xhci->lock held, releases and aquires lock back */
 367static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
 368{
 369        u64 temp_64;
 370        int ret;
 371
 372        xhci_dbg(xhci, "Abort command ring\n");
 373
 374        reinit_completion(&xhci->cmd_ring_stop_completion);
 375
 376        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
 377        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
 378                        &xhci->op_regs->cmd_ring);
 379
 380        /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
 381         * completion of the Command Abort operation. If CRR is not negated in 5
 382         * seconds then driver handles it as if host died (-ENODEV).
 383         * In the future we should distinguish between -ENODEV and -ETIMEDOUT
 384         * and try to recover a -ETIMEDOUT with a host controller reset.
 385         */
 386        ret = xhci_handshake(&xhci->op_regs->cmd_ring,
 387                        CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
 388        if (ret < 0) {
 389                xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
 390                xhci_halt(xhci);
 391                xhci_hc_died(xhci);
 392                return ret;
 393        }
 394        /*
 395         * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
 396         * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
 397         * but the completion event in never sent. Wait 2 secs (arbitrary
 398         * number) to handle those cases after negation of CMD_RING_RUNNING.
 399         */
 400        spin_unlock_irqrestore(&xhci->lock, flags);
 401        ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
 402                                          msecs_to_jiffies(2000));
 403        spin_lock_irqsave(&xhci->lock, flags);
 404        if (!ret) {
 405                xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
 406                xhci_cleanup_command_queue(xhci);
 407        } else {
 408                xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
 409        }
 410        return 0;
 411}
 412
 413void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
 414                unsigned int slot_id,
 415                unsigned int ep_index,
 416                unsigned int stream_id)
 417{
 418        __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
 419        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
 420        unsigned int ep_state = ep->ep_state;
 421
 422        /* Don't ring the doorbell for this endpoint if there are pending
 423         * cancellations because we don't want to interrupt processing.
 424         * We don't want to restart any stream rings if there's a set dequeue
 425         * pointer command pending because the device can choose to start any
 426         * stream once the endpoint is on the HW schedule.
 427         */
 428        if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
 429            (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
 430                return;
 431
 432        trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
 433
 434        writel(DB_VALUE(ep_index, stream_id), db_addr);
 435        /* flush the write */
 436        readl(db_addr);
 437}
 438
 439/* Ring the doorbell for any rings with pending URBs */
 440static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 441                unsigned int slot_id,
 442                unsigned int ep_index)
 443{
 444        unsigned int stream_id;
 445        struct xhci_virt_ep *ep;
 446
 447        ep = &xhci->devs[slot_id]->eps[ep_index];
 448
 449        /* A ring has pending URBs if its TD list is not empty */
 450        if (!(ep->ep_state & EP_HAS_STREAMS)) {
 451                if (ep->ring && !(list_empty(&ep->ring->td_list)))
 452                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
 453                return;
 454        }
 455
 456        for (stream_id = 1; stream_id < ep->stream_info->num_streams;
 457                        stream_id++) {
 458                struct xhci_stream_info *stream_info = ep->stream_info;
 459                if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
 460                        xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
 461                                                stream_id);
 462        }
 463}
 464
 465void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
 466                unsigned int slot_id,
 467                unsigned int ep_index)
 468{
 469        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 470}
 471
 472static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
 473                                             unsigned int slot_id,
 474                                             unsigned int ep_index)
 475{
 476        if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
 477                xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
 478                return NULL;
 479        }
 480        if (ep_index >= EP_CTX_PER_DEV) {
 481                xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
 482                return NULL;
 483        }
 484        if (!xhci->devs[slot_id]) {
 485                xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
 486                return NULL;
 487        }
 488
 489        return &xhci->devs[slot_id]->eps[ep_index];
 490}
 491
 492static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
 493                                              struct xhci_virt_ep *ep,
 494                                              unsigned int stream_id)
 495{
 496        /* common case, no streams */
 497        if (!(ep->ep_state & EP_HAS_STREAMS))
 498                return ep->ring;
 499
 500        if (!ep->stream_info)
 501                return NULL;
 502
 503        if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
 504                xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
 505                          stream_id, ep->vdev->slot_id, ep->ep_index);
 506                return NULL;
 507        }
 508
 509        return ep->stream_info->stream_rings[stream_id];
 510}
 511
 512/* Get the right ring for the given slot_id, ep_index and stream_id.
 513 * If the endpoint supports streams, boundary check the URB's stream ID.
 514 * If the endpoint doesn't support streams, return the singular endpoint ring.
 515 */
 516struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
 517                unsigned int slot_id, unsigned int ep_index,
 518                unsigned int stream_id)
 519{
 520        struct xhci_virt_ep *ep;
 521
 522        ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
 523        if (!ep)
 524                return NULL;
 525
 526        return xhci_virt_ep_to_ring(xhci, ep, stream_id);
 527}
 528
 529
 530/*
 531 * Get the hw dequeue pointer xHC stopped on, either directly from the
 532 * endpoint context, or if streams are in use from the stream context.
 533 * The returned hw_dequeue contains the lowest four bits with cycle state
 534 * and possbile stream context type.
 535 */
 536static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
 537                           unsigned int ep_index, unsigned int stream_id)
 538{
 539        struct xhci_ep_ctx *ep_ctx;
 540        struct xhci_stream_ctx *st_ctx;
 541        struct xhci_virt_ep *ep;
 542
 543        ep = &vdev->eps[ep_index];
 544
 545        if (ep->ep_state & EP_HAS_STREAMS) {
 546                st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
 547                return le64_to_cpu(st_ctx->stream_ring);
 548        }
 549        ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
 550        return le64_to_cpu(ep_ctx->deq);
 551}
 552
 553static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
 554                                unsigned int slot_id, unsigned int ep_index,
 555                                unsigned int stream_id, struct xhci_td *td)
 556{
 557        struct xhci_virt_device *dev = xhci->devs[slot_id];
 558        struct xhci_virt_ep *ep = &dev->eps[ep_index];
 559        struct xhci_ring *ep_ring;
 560        struct xhci_command *cmd;
 561        struct xhci_segment *new_seg;
 562        union xhci_trb *new_deq;
 563        int new_cycle;
 564        dma_addr_t addr;
 565        u64 hw_dequeue;
 566        bool cycle_found = false;
 567        bool td_last_trb_found = false;
 568        u32 trb_sct = 0;
 569        int ret;
 570
 571        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
 572                        ep_index, stream_id);
 573        if (!ep_ring) {
 574                xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
 575                          stream_id);
 576                return -ENODEV;
 577        }
 578        /*
 579         * A cancelled TD can complete with a stall if HW cached the trb.
 580         * In this case driver can't find td, but if the ring is empty we
 581         * can move the dequeue pointer to the current enqueue position.
 582         * We shouldn't hit this anymore as cached cancelled TRBs are given back
 583         * after clearing the cache, but be on the safe side and keep it anyway
 584         */
 585        if (!td) {
 586                if (list_empty(&ep_ring->td_list)) {
 587                        new_seg = ep_ring->enq_seg;
 588                        new_deq = ep_ring->enqueue;
 589                        new_cycle = ep_ring->cycle_state;
 590                        xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
 591                        goto deq_found;
 592                } else {
 593                        xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
 594                        return -EINVAL;
 595                }
 596        }
 597
 598        hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
 599        new_seg = ep_ring->deq_seg;
 600        new_deq = ep_ring->dequeue;
 601        new_cycle = hw_dequeue & 0x1;
 602
 603        /*
 604         * We want to find the pointer, segment and cycle state of the new trb
 605         * (the one after current TD's last_trb). We know the cycle state at
 606         * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
 607         * found.
 608         */
 609        do {
 610                if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
 611                    == (dma_addr_t)(hw_dequeue & ~0xf)) {
 612                        cycle_found = true;
 613                        if (td_last_trb_found)
 614                                break;
 615                }
 616                if (new_deq == td->last_trb)
 617                        td_last_trb_found = true;
 618
 619                if (cycle_found && trb_is_link(new_deq) &&
 620                    link_trb_toggles_cycle(new_deq))
 621                        new_cycle ^= 0x1;
 622
 623                next_trb(xhci, ep_ring, &new_seg, &new_deq);
 624
 625                /* Search wrapped around, bail out */
 626                if (new_deq == ep->ring->dequeue) {
 627                        xhci_err(xhci, "Error: Failed finding new dequeue state\n");
 628                        return -EINVAL;
 629                }
 630
 631        } while (!cycle_found || !td_last_trb_found);
 632
 633deq_found:
 634
 635        /* Don't update the ring cycle state for the producer (us). */
 636        addr = xhci_trb_virt_to_dma(new_seg, new_deq);
 637        if (addr == 0) {
 638                xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
 639                xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
 640                return -EINVAL;
 641        }
 642
 643        if ((ep->ep_state & SET_DEQ_PENDING)) {
 644                xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
 645                          &addr);
 646                return -EBUSY;
 647        }
 648
 649        /* This function gets called from contexts where it cannot sleep */
 650        cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
 651        if (!cmd) {
 652                xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
 653                return -ENOMEM;
 654        }
 655
 656        if (stream_id)
 657                trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
 658        ret = queue_command(xhci, cmd,
 659                lower_32_bits(addr) | trb_sct | new_cycle,
 660                upper_32_bits(addr),
 661                STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
 662                EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
 663        if (ret < 0) {
 664                xhci_free_command(xhci, cmd);
 665                return ret;
 666        }
 667        ep->queued_deq_seg = new_seg;
 668        ep->queued_deq_ptr = new_deq;
 669
 670        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 671                       "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
 672
 673        /* Stop the TD queueing code from ringing the doorbell until
 674         * this command completes.  The HC won't set the dequeue pointer
 675         * if the ring is running, and ringing the doorbell starts the
 676         * ring running.
 677         */
 678        ep->ep_state |= SET_DEQ_PENDING;
 679        xhci_ring_cmd_db(xhci);
 680        return 0;
 681}
 682
 683/* flip_cycle means flip the cycle bit of all but the first and last TRB.
 684 * (The last TRB actually points to the ring enqueue pointer, which is not part
 685 * of this TD.)  This is used to remove partially enqueued isoc TDs from a ring.
 686 */
 687static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
 688                       struct xhci_td *td, bool flip_cycle)
 689{
 690        struct xhci_segment *seg        = td->start_seg;
 691        union xhci_trb *trb             = td->first_trb;
 692
 693        while (1) {
 694                trb_to_noop(trb, TRB_TR_NOOP);
 695
 696                /* flip cycle if asked to */
 697                if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
 698                        trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
 699
 700                if (trb == td->last_trb)
 701                        break;
 702
 703                next_trb(xhci, ep_ring, &seg, &trb);
 704        }
 705}
 706
 707static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
 708                struct xhci_virt_ep *ep)
 709{
 710        ep->ep_state &= ~EP_STOP_CMD_PENDING;
 711        /* Can't del_timer_sync in interrupt */
 712        del_timer(&ep->stop_cmd_timer);
 713}
 714
 715/*
 716 * Must be called with xhci->lock held in interrupt context,
 717 * releases and re-acquires xhci->lock
 718 */
 719static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 720                                     struct xhci_td *cur_td, int status)
 721{
 722        struct urb      *urb            = cur_td->urb;
 723        struct urb_priv *urb_priv       = urb->hcpriv;
 724        struct usb_hcd  *hcd            = bus_to_hcd(urb->dev->bus);
 725
 726        if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
 727                xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
 728                if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
 729                        if (xhci->quirks & XHCI_AMD_PLL_FIX)
 730                                usb_amd_quirk_pll_enable();
 731                }
 732        }
 733        xhci_urb_free_priv(urb_priv);
 734        usb_hcd_unlink_urb_from_ep(hcd, urb);
 735        trace_xhci_urb_giveback(urb);
 736        usb_hcd_giveback_urb(hcd, urb, status);
 737}
 738
 739static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
 740                struct xhci_ring *ring, struct xhci_td *td)
 741{
 742        struct device *dev = xhci_to_hcd(xhci)->self.controller;
 743        struct xhci_segment *seg = td->bounce_seg;
 744        struct urb *urb = td->urb;
 745        size_t len;
 746
 747        if (!ring || !seg || !urb)
 748                return;
 749
 750        if (usb_urb_dir_out(urb)) {
 751                dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
 752                                 DMA_TO_DEVICE);
 753                return;
 754        }
 755
 756        dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
 757                         DMA_FROM_DEVICE);
 758        /* for in tranfers we need to copy the data from bounce to sg */
 759        if (urb->num_sgs) {
 760                len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
 761                                           seg->bounce_len, seg->bounce_offs);
 762                if (len != seg->bounce_len)
 763                        xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
 764                                  len, seg->bounce_len);
 765        } else {
 766                memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
 767                       seg->bounce_len);
 768        }
 769        seg->bounce_len = 0;
 770        seg->bounce_offs = 0;
 771}
 772
 773static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
 774                           struct xhci_ring *ep_ring, int status)
 775{
 776        struct urb *urb = NULL;
 777
 778        /* Clean up the endpoint's TD list */
 779        urb = td->urb;
 780
 781        /* if a bounce buffer was used to align this td then unmap it */
 782        xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
 783
 784        /* Do one last check of the actual transfer length.
 785         * If the host controller said we transferred more data than the buffer
 786         * length, urb->actual_length will be a very big number (since it's
 787         * unsigned).  Play it safe and say we didn't transfer anything.
 788         */
 789        if (urb->actual_length > urb->transfer_buffer_length) {
 790                xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
 791                          urb->transfer_buffer_length, urb->actual_length);
 792                urb->actual_length = 0;
 793                status = 0;
 794        }
 795        /* TD might be removed from td_list if we are giving back a cancelled URB */
 796        if (!list_empty(&td->td_list))
 797                list_del_init(&td->td_list);
 798        /* Giving back a cancelled URB, or if a slated TD completed anyway */
 799        if (!list_empty(&td->cancelled_td_list))
 800                list_del_init(&td->cancelled_td_list);
 801
 802        inc_td_cnt(urb);
 803        /* Giveback the urb when all the tds are completed */
 804        if (last_td_in_urb(td)) {
 805                if ((urb->actual_length != urb->transfer_buffer_length &&
 806                     (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
 807                    (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
 808                        xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
 809                                 urb, urb->actual_length,
 810                                 urb->transfer_buffer_length, status);
 811
 812                /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
 813                if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
 814                        status = 0;
 815                xhci_giveback_urb_in_irq(xhci, td, status);
 816        }
 817
 818        return 0;
 819}
 820
 821
 822/* Complete the cancelled URBs we unlinked from td_list. */
 823static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
 824{
 825        struct xhci_ring *ring;
 826        struct xhci_td *td, *tmp_td;
 827
 828        list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
 829                                 cancelled_td_list) {
 830
 831                ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
 832
 833                if (td->cancel_status == TD_CLEARED)
 834                        xhci_td_cleanup(ep->xhci, td, ring, td->status);
 835
 836                if (ep->xhci->xhc_state & XHCI_STATE_DYING)
 837                        return;
 838        }
 839}
 840
 841static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
 842                                unsigned int ep_index, enum xhci_ep_reset_type reset_type)
 843{
 844        struct xhci_command *command;
 845        int ret = 0;
 846
 847        command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
 848        if (!command) {
 849                ret = -ENOMEM;
 850                goto done;
 851        }
 852
 853        ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
 854done:
 855        if (ret)
 856                xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
 857                         slot_id, ep_index, ret);
 858        return ret;
 859}
 860
 861static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
 862                                struct xhci_virt_ep *ep, unsigned int stream_id,
 863                                struct xhci_td *td,
 864                                enum xhci_ep_reset_type reset_type)
 865{
 866        unsigned int slot_id = ep->vdev->slot_id;
 867        int err;
 868
 869        /*
 870         * Avoid resetting endpoint if link is inactive. Can cause host hang.
 871         * Device will be reset soon to recover the link so don't do anything
 872         */
 873        if (ep->vdev->flags & VDEV_PORT_ERROR)
 874                return -ENODEV;
 875
 876        /* add td to cancelled list and let reset ep handler take care of it */
 877        if (reset_type == EP_HARD_RESET) {
 878                ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
 879                if (td && list_empty(&td->cancelled_td_list)) {
 880                        list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
 881                        td->cancel_status = TD_HALTED;
 882                }
 883        }
 884
 885        if (ep->ep_state & EP_HALTED) {
 886                xhci_dbg(xhci, "Reset ep command already pending\n");
 887                return 0;
 888        }
 889
 890        err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
 891        if (err)
 892                return err;
 893
 894        ep->ep_state |= EP_HALTED;
 895
 896        xhci_ring_cmd_db(xhci);
 897
 898        return 0;
 899}
 900
 901/*
 902 * Fix up the ep ring first, so HW stops executing cancelled TDs.
 903 * We have the xHCI lock, so nothing can modify this list until we drop it.
 904 * We're also in the event handler, so we can't get re-interrupted if another
 905 * Stop Endpoint command completes.
 906 *
 907 * only call this when ring is not in a running state
 908 */
 909
 910static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
 911{
 912        struct xhci_hcd         *xhci;
 913        struct xhci_td          *td = NULL;
 914        struct xhci_td          *tmp_td = NULL;
 915        struct xhci_td          *cached_td = NULL;
 916        struct xhci_ring        *ring;
 917        u64                     hw_deq;
 918        unsigned int            slot_id = ep->vdev->slot_id;
 919        int                     err;
 920
 921        xhci = ep->xhci;
 922
 923        list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
 924                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
 925                                "Removing canceled TD starting at 0x%llx (dma).",
 926                                (unsigned long long)xhci_trb_virt_to_dma(
 927                                        td->start_seg, td->first_trb));
 928                list_del_init(&td->td_list);
 929                ring = xhci_urb_to_transfer_ring(xhci, td->urb);
 930                if (!ring) {
 931                        xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
 932                                  td->urb, td->urb->stream_id);
 933                        continue;
 934                }
 935                /*
 936                 * If a ring stopped on the TD we need to cancel then we have to
 937                 * move the xHC endpoint ring dequeue pointer past this TD.
 938                 * Rings halted due to STALL may show hw_deq is past the stalled
 939                 * TD, but still require a set TR Deq command to flush xHC cache.
 940                 */
 941                hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
 942                                         td->urb->stream_id);
 943                hw_deq &= ~0xf;
 944
 945                if (td->cancel_status == TD_HALTED) {
 946                        cached_td = td;
 947                } else if (trb_in_td(xhci, td->start_seg, td->first_trb,
 948                              td->last_trb, hw_deq, false)) {
 949                        switch (td->cancel_status) {
 950                        case TD_CLEARED: /* TD is already no-op */
 951                        case TD_CLEARING_CACHE: /* set TR deq command already queued */
 952                                break;
 953                        case TD_DIRTY: /* TD is cached, clear it */
 954                        case TD_HALTED:
 955                                /* FIXME  stream case, several stopped rings */
 956                                cached_td = td;
 957                                break;
 958                        }
 959                } else {
 960                        td_to_noop(xhci, ring, td, false);
 961                        td->cancel_status = TD_CLEARED;
 962                }
 963        }
 964        if (cached_td) {
 965                cached_td->cancel_status = TD_CLEARING_CACHE;
 966
 967                err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
 968                                                cached_td->urb->stream_id,
 969                                                cached_td);
 970                /* Failed to move past cached td, try just setting it noop */
 971                if (err) {
 972                        td_to_noop(xhci, ring, cached_td, false);
 973                        cached_td->cancel_status = TD_CLEARED;
 974                }
 975                cached_td = NULL;
 976        }
 977        return 0;
 978}
 979
 980/*
 981 * Returns the TD the endpoint ring halted on.
 982 * Only call for non-running rings without streams.
 983 */
 984static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
 985{
 986        struct xhci_td  *td;
 987        u64             hw_deq;
 988
 989        if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
 990                hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
 991                hw_deq &= ~0xf;
 992                td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
 993                if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
 994                                td->last_trb, hw_deq, false))
 995                        return td;
 996        }
 997        return NULL;
 998}
 999
1000/*
1001 * When we get a command completion for a Stop Endpoint Command, we need to
1002 * unlink any cancelled TDs from the ring.  There are two ways to do that:
1003 *
1004 *  1. If the HW was in the middle of processing the TD that needs to be
1005 *     cancelled, then we must move the ring's dequeue pointer past the last TRB
1006 *     in the TD with a Set Dequeue Pointer Command.
1007 *  2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1008 *     bit cleared) so that the HW will skip over them.
1009 */
1010static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
1011                                    union xhci_trb *trb, u32 comp_code)
1012{
1013        unsigned int ep_index;
1014        struct xhci_virt_ep *ep;
1015        struct xhci_ep_ctx *ep_ctx;
1016        struct xhci_td *td = NULL;
1017        enum xhci_ep_reset_type reset_type;
1018        struct xhci_command *command;
1019        int err;
1020
1021        if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
1022                if (!xhci->devs[slot_id])
1023                        xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
1024                                  slot_id);
1025                return;
1026        }
1027
1028        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1029        ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1030        if (!ep)
1031                return;
1032
1033        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1034
1035        trace_xhci_handle_cmd_stop_ep(ep_ctx);
1036
1037        if (comp_code == COMP_CONTEXT_STATE_ERROR) {
1038        /*
1039         * If stop endpoint command raced with a halting endpoint we need to
1040         * reset the host side endpoint first.
1041         * If the TD we halted on isn't cancelled the TD should be given back
1042         * with a proper error code, and the ring dequeue moved past the TD.
1043         * If streams case we can't find hw_deq, or the TD we halted on so do a
1044         * soft reset.
1045         *
1046         * Proper error code is unknown here, it would be -EPIPE if device side
1047         * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
1048         * We use -EPROTO, if device is stalled it should return a stall error on
1049         * next transfer, which then will return -EPIPE, and device side stall is
1050         * noted and cleared by class driver.
1051         */
1052                switch (GET_EP_CTX_STATE(ep_ctx)) {
1053                case EP_STATE_HALTED:
1054                        xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
1055                        if (ep->ep_state & EP_HAS_STREAMS) {
1056                                reset_type = EP_SOFT_RESET;
1057                        } else {
1058                                reset_type = EP_HARD_RESET;
1059                                td = find_halted_td(ep);
1060                                if (td)
1061                                        td->status = -EPROTO;
1062                        }
1063                        /* reset ep, reset handler cleans up cancelled tds */
1064                        err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
1065                                                          reset_type);
1066                        if (err)
1067                                break;
1068                        xhci_stop_watchdog_timer_in_irq(xhci, ep);
1069                        return;
1070                case EP_STATE_RUNNING:
1071                        /* Race, HW handled stop ep cmd before ep was running */
1072                        command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1073                        if (!command)
1074                                xhci_stop_watchdog_timer_in_irq(xhci, ep);
1075
1076                        mod_timer(&ep->stop_cmd_timer,
1077                                  jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
1078                        xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
1079                        xhci_ring_cmd_db(xhci);
1080
1081                        return;
1082                default:
1083                        break;
1084                }
1085        }
1086        /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
1087        xhci_invalidate_cancelled_tds(ep);
1088        xhci_stop_watchdog_timer_in_irq(xhci, ep);
1089
1090        /* Otherwise ring the doorbell(s) to restart queued transfers */
1091        xhci_giveback_invalidated_tds(ep);
1092        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1093}
1094
1095static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
1096{
1097        struct xhci_td *cur_td;
1098        struct xhci_td *tmp;
1099
1100        list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
1101                list_del_init(&cur_td->td_list);
1102
1103                if (!list_empty(&cur_td->cancelled_td_list))
1104                        list_del_init(&cur_td->cancelled_td_list);
1105
1106                xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
1107
1108                inc_td_cnt(cur_td->urb);
1109                if (last_td_in_urb(cur_td))
1110                        xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1111        }
1112}
1113
1114static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
1115                int slot_id, int ep_index)
1116{
1117        struct xhci_td *cur_td;
1118        struct xhci_td *tmp;
1119        struct xhci_virt_ep *ep;
1120        struct xhci_ring *ring;
1121
1122        ep = &xhci->devs[slot_id]->eps[ep_index];
1123        if ((ep->ep_state & EP_HAS_STREAMS) ||
1124                        (ep->ep_state & EP_GETTING_NO_STREAMS)) {
1125                int stream_id;
1126
1127                for (stream_id = 1; stream_id < ep->stream_info->num_streams;
1128                                stream_id++) {
1129                        ring = ep->stream_info->stream_rings[stream_id];
1130                        if (!ring)
1131                                continue;
1132
1133                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1134                                        "Killing URBs for slot ID %u, ep index %u, stream %u",
1135                                        slot_id, ep_index, stream_id);
1136                        xhci_kill_ring_urbs(xhci, ring);
1137                }
1138        } else {
1139                ring = ep->ring;
1140                if (!ring)
1141                        return;
1142                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1143                                "Killing URBs for slot ID %u, ep index %u",
1144                                slot_id, ep_index);
1145                xhci_kill_ring_urbs(xhci, ring);
1146        }
1147
1148        list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
1149                        cancelled_td_list) {
1150                list_del_init(&cur_td->cancelled_td_list);
1151                inc_td_cnt(cur_td->urb);
1152
1153                if (last_td_in_urb(cur_td))
1154                        xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1155        }
1156}
1157
1158/*
1159 * host controller died, register read returns 0xffffffff
1160 * Complete pending commands, mark them ABORTED.
1161 * URBs need to be given back as usb core might be waiting with device locks
1162 * held for the URBs to finish during device disconnect, blocking host remove.
1163 *
1164 * Call with xhci->lock held.
1165 * lock is relased and re-acquired while giving back urb.
1166 */
1167void xhci_hc_died(struct xhci_hcd *xhci)
1168{
1169        int i, j;
1170
1171        if (xhci->xhc_state & XHCI_STATE_DYING)
1172                return;
1173
1174        xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
1175        xhci->xhc_state |= XHCI_STATE_DYING;
1176
1177        xhci_cleanup_command_queue(xhci);
1178
1179        /* return any pending urbs, remove may be waiting for them */
1180        for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1181                if (!xhci->devs[i])
1182                        continue;
1183                for (j = 0; j < 31; j++)
1184                        xhci_kill_endpoint_urbs(xhci, i, j);
1185        }
1186
1187        /* inform usb core hc died if PCI remove isn't already handling it */
1188        if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
1189                usb_hc_died(xhci_to_hcd(xhci));
1190}
1191
1192/* Watchdog timer function for when a stop endpoint command fails to complete.
1193 * In this case, we assume the host controller is broken or dying or dead.  The
1194 * host may still be completing some other events, so we have to be careful to
1195 * let the event ring handler and the URB dequeueing/enqueueing functions know
1196 * through xhci->state.
1197 *
1198 * The timer may also fire if the host takes a very long time to respond to the
1199 * command, and the stop endpoint command completion handler cannot delete the
1200 * timer before the timer function is called.  Another endpoint cancellation may
1201 * sneak in before the timer function can grab the lock, and that may queue
1202 * another stop endpoint command and add the timer back.  So we cannot use a
1203 * simple flag to say whether there is a pending stop endpoint command for a
1204 * particular endpoint.
1205 *
1206 * Instead we use a combination of that flag and checking if a new timer is
1207 * pending.
1208 */
1209void xhci_stop_endpoint_command_watchdog(struct timer_list *t)
1210{
1211        struct xhci_virt_ep *ep = from_timer(ep, t, stop_cmd_timer);
1212        struct xhci_hcd *xhci = ep->xhci;
1213        unsigned long flags;
1214        u32 usbsts;
1215
1216        spin_lock_irqsave(&xhci->lock, flags);
1217
1218        /* bail out if cmd completed but raced with stop ep watchdog timer.*/
1219        if (!(ep->ep_state & EP_STOP_CMD_PENDING) ||
1220            timer_pending(&ep->stop_cmd_timer)) {
1221                spin_unlock_irqrestore(&xhci->lock, flags);
1222                xhci_dbg(xhci, "Stop EP timer raced with cmd completion, exit");
1223                return;
1224        }
1225        usbsts = readl(&xhci->op_regs->status);
1226
1227        xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
1228        xhci_warn(xhci, "USBSTS:%s\n", xhci_decode_usbsts(usbsts));
1229
1230        ep->ep_state &= ~EP_STOP_CMD_PENDING;
1231
1232        xhci_halt(xhci);
1233
1234        /*
1235         * handle a stop endpoint cmd timeout as if host died (-ENODEV).
1236         * In the future we could distinguish between -ENODEV and -ETIMEDOUT
1237         * and try to recover a -ETIMEDOUT with a host controller reset
1238         */
1239        xhci_hc_died(xhci);
1240
1241        spin_unlock_irqrestore(&xhci->lock, flags);
1242        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1243                        "xHCI host controller is dead.");
1244}
1245
1246static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1247                struct xhci_virt_device *dev,
1248                struct xhci_ring *ep_ring,
1249                unsigned int ep_index)
1250{
1251        union xhci_trb *dequeue_temp;
1252        int num_trbs_free_temp;
1253        bool revert = false;
1254
1255        num_trbs_free_temp = ep_ring->num_trbs_free;
1256        dequeue_temp = ep_ring->dequeue;
1257
1258        /* If we get two back-to-back stalls, and the first stalled transfer
1259         * ends just before a link TRB, the dequeue pointer will be left on
1260         * the link TRB by the code in the while loop.  So we have to update
1261         * the dequeue pointer one segment further, or we'll jump off
1262         * the segment into la-la-land.
1263         */
1264        if (trb_is_link(ep_ring->dequeue)) {
1265                ep_ring->deq_seg = ep_ring->deq_seg->next;
1266                ep_ring->dequeue = ep_ring->deq_seg->trbs;
1267        }
1268
1269        while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1270                /* We have more usable TRBs */
1271                ep_ring->num_trbs_free++;
1272                ep_ring->dequeue++;
1273                if (trb_is_link(ep_ring->dequeue)) {
1274                        if (ep_ring->dequeue ==
1275                                        dev->eps[ep_index].queued_deq_ptr)
1276                                break;
1277                        ep_ring->deq_seg = ep_ring->deq_seg->next;
1278                        ep_ring->dequeue = ep_ring->deq_seg->trbs;
1279                }
1280                if (ep_ring->dequeue == dequeue_temp) {
1281                        revert = true;
1282                        break;
1283                }
1284        }
1285
1286        if (revert) {
1287                xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1288                ep_ring->num_trbs_free = num_trbs_free_temp;
1289        }
1290}
1291
1292/*
1293 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1294 * we need to clear the set deq pending flag in the endpoint ring state, so that
1295 * the TD queueing code can ring the doorbell again.  We also need to ring the
1296 * endpoint doorbell to restart the ring, but only if there aren't more
1297 * cancellations pending.
1298 */
1299static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1300                union xhci_trb *trb, u32 cmd_comp_code)
1301{
1302        unsigned int ep_index;
1303        unsigned int stream_id;
1304        struct xhci_ring *ep_ring;
1305        struct xhci_virt_ep *ep;
1306        struct xhci_ep_ctx *ep_ctx;
1307        struct xhci_slot_ctx *slot_ctx;
1308        struct xhci_td *td, *tmp_td;
1309
1310        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1311        stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1312        ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1313        if (!ep)
1314                return;
1315
1316        ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1317        if (!ep_ring) {
1318                xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1319                                stream_id);
1320                /* XXX: Harmless??? */
1321                goto cleanup;
1322        }
1323
1324        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1325        slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
1326        trace_xhci_handle_cmd_set_deq(slot_ctx);
1327        trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1328
1329        if (cmd_comp_code != COMP_SUCCESS) {
1330                unsigned int ep_state;
1331                unsigned int slot_state;
1332
1333                switch (cmd_comp_code) {
1334                case COMP_TRB_ERROR:
1335                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1336                        break;
1337                case COMP_CONTEXT_STATE_ERROR:
1338                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1339                        ep_state = GET_EP_CTX_STATE(ep_ctx);
1340                        slot_state = le32_to_cpu(slot_ctx->dev_state);
1341                        slot_state = GET_SLOT_STATE(slot_state);
1342                        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1343                                        "Slot state = %u, EP state = %u",
1344                                        slot_state, ep_state);
1345                        break;
1346                case COMP_SLOT_NOT_ENABLED_ERROR:
1347                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1348                                        slot_id);
1349                        break;
1350                default:
1351                        xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1352                                        cmd_comp_code);
1353                        break;
1354                }
1355                /* OK what do we do now?  The endpoint state is hosed, and we
1356                 * should never get to this point if the synchronization between
1357                 * queueing, and endpoint state are correct.  This might happen
1358                 * if the device gets disconnected after we've finished
1359                 * cancelling URBs, which might not be an error...
1360                 */
1361        } else {
1362                u64 deq;
1363                /* 4.6.10 deq ptr is written to the stream ctx for streams */
1364                if (ep->ep_state & EP_HAS_STREAMS) {
1365                        struct xhci_stream_ctx *ctx =
1366                                &ep->stream_info->stream_ctx_array[stream_id];
1367                        deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1368                } else {
1369                        deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1370                }
1371                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1372                        "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1373                if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1374                                         ep->queued_deq_ptr) == deq) {
1375                        /* Update the ring's dequeue segment and dequeue pointer
1376                         * to reflect the new position.
1377                         */
1378                        update_ring_for_set_deq_completion(xhci, ep->vdev,
1379                                ep_ring, ep_index);
1380                } else {
1381                        xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1382                        xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1383                                  ep->queued_deq_seg, ep->queued_deq_ptr);
1384                }
1385        }
1386        /* HW cached TDs cleared from cache, give them back */
1387        list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1388                                 cancelled_td_list) {
1389                ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1390                if (td->cancel_status == TD_CLEARING_CACHE) {
1391                        td->cancel_status = TD_CLEARED;
1392                        xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1393                }
1394        }
1395cleanup:
1396        ep->ep_state &= ~SET_DEQ_PENDING;
1397        ep->queued_deq_seg = NULL;
1398        ep->queued_deq_ptr = NULL;
1399        /* Restart any rings with pending URBs */
1400        ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1401}
1402
1403static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1404                union xhci_trb *trb, u32 cmd_comp_code)
1405{
1406        struct xhci_virt_ep *ep;
1407        struct xhci_ep_ctx *ep_ctx;
1408        unsigned int ep_index;
1409
1410        ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1411        ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1412        if (!ep)
1413                return;
1414
1415        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1416        trace_xhci_handle_cmd_reset_ep(ep_ctx);
1417
1418        /* This command will only fail if the endpoint wasn't halted,
1419         * but we don't care.
1420         */
1421        xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1422                "Ignoring reset ep completion code of %u", cmd_comp_code);
1423
1424        /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1425        xhci_invalidate_cancelled_tds(ep);
1426
1427        if (xhci->quirks & XHCI_RESET_EP_QUIRK)
1428                xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
1429        /* Clear our internal halted state */
1430        ep->ep_state &= ~EP_HALTED;
1431
1432        xhci_giveback_invalidated_tds(ep);
1433
1434        /* if this was a soft reset, then restart */
1435        if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1436                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1437}
1438
1439static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1440                struct xhci_command *command, u32 cmd_comp_code)
1441{
1442        if (cmd_comp_code == COMP_SUCCESS)
1443                command->slot_id = slot_id;
1444        else
1445                command->slot_id = 0;
1446}
1447
1448static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1449{
1450        struct xhci_virt_device *virt_dev;
1451        struct xhci_slot_ctx *slot_ctx;
1452
1453        virt_dev = xhci->devs[slot_id];
1454        if (!virt_dev)
1455                return;
1456
1457        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1458        trace_xhci_handle_cmd_disable_slot(slot_ctx);
1459
1460        if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1461                /* Delete default control endpoint resources */
1462                xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1463        xhci_free_virt_device(xhci, slot_id);
1464}
1465
1466static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1467                u32 cmd_comp_code)
1468{
1469        struct xhci_virt_device *virt_dev;
1470        struct xhci_input_control_ctx *ctrl_ctx;
1471        struct xhci_ep_ctx *ep_ctx;
1472        unsigned int ep_index;
1473        unsigned int ep_state;
1474        u32 add_flags, drop_flags;
1475
1476        /*
1477         * Configure endpoint commands can come from the USB core
1478         * configuration or alt setting changes, or because the HW
1479         * needed an extra configure endpoint command after a reset
1480         * endpoint command or streams were being configured.
1481         * If the command was for a halted endpoint, the xHCI driver
1482         * is not waiting on the configure endpoint command.
1483         */
1484        virt_dev = xhci->devs[slot_id];
1485        if (!virt_dev)
1486                return;
1487        ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1488        if (!ctrl_ctx) {
1489                xhci_warn(xhci, "Could not get input context, bad type.\n");
1490                return;
1491        }
1492
1493        add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1494        drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1495        /* Input ctx add_flags are the endpoint index plus one */
1496        ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1497
1498        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1499        trace_xhci_handle_cmd_config_ep(ep_ctx);
1500
1501        /* A usb_set_interface() call directly after clearing a halted
1502         * condition may race on this quirky hardware.  Not worth
1503         * worrying about, since this is prototype hardware.  Not sure
1504         * if this will work for streams, but streams support was
1505         * untested on this prototype.
1506         */
1507        if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
1508                        ep_index != (unsigned int) -1 &&
1509                        add_flags - SLOT_FLAG == drop_flags) {
1510                ep_state = virt_dev->eps[ep_index].ep_state;
1511                if (!(ep_state & EP_HALTED))
1512                        return;
1513                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1514                                "Completed config ep cmd - "
1515                                "last ep index = %d, state = %d",
1516                                ep_index, ep_state);
1517                /* Clear internal halted state and restart ring(s) */
1518                virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
1519                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1520                return;
1521        }
1522        return;
1523}
1524
1525static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1526{
1527        struct xhci_virt_device *vdev;
1528        struct xhci_slot_ctx *slot_ctx;
1529
1530        vdev = xhci->devs[slot_id];
1531        if (!vdev)
1532                return;
1533        slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1534        trace_xhci_handle_cmd_addr_dev(slot_ctx);
1535}
1536
1537static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
1538{
1539        struct xhci_virt_device *vdev;
1540        struct xhci_slot_ctx *slot_ctx;
1541
1542        vdev = xhci->devs[slot_id];
1543        if (!vdev) {
1544                xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
1545                          slot_id);
1546                return;
1547        }
1548        slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1549        trace_xhci_handle_cmd_reset_dev(slot_ctx);
1550
1551        xhci_dbg(xhci, "Completed reset device command.\n");
1552}
1553
1554static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1555                struct xhci_event_cmd *event)
1556{
1557        if (!(xhci->quirks & XHCI_NEC_HOST)) {
1558                xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1559                return;
1560        }
1561        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1562                        "NEC firmware version %2x.%02x",
1563                        NEC_FW_MAJOR(le32_to_cpu(event->status)),
1564                        NEC_FW_MINOR(le32_to_cpu(event->status)));
1565}
1566
1567static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1568{
1569        list_del(&cmd->cmd_list);
1570
1571        if (cmd->completion) {
1572                cmd->status = status;
1573                complete(cmd->completion);
1574        } else {
1575                kfree(cmd);
1576        }
1577}
1578
1579void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1580{
1581        struct xhci_command *cur_cmd, *tmp_cmd;
1582        xhci->current_cmd = NULL;
1583        list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1584                xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1585}
1586
1587void xhci_handle_command_timeout(struct work_struct *work)
1588{
1589        struct xhci_hcd *xhci;
1590        unsigned long flags;
1591        u64 hw_ring_state;
1592
1593        xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1594
1595        spin_lock_irqsave(&xhci->lock, flags);
1596
1597        /*
1598         * If timeout work is pending, or current_cmd is NULL, it means we
1599         * raced with command completion. Command is handled so just return.
1600         */
1601        if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1602                spin_unlock_irqrestore(&xhci->lock, flags);
1603                return;
1604        }
1605        /* mark this command to be cancelled */
1606        xhci->current_cmd->status = COMP_COMMAND_ABORTED;
1607
1608        /* Make sure command ring is running before aborting it */
1609        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1610        if (hw_ring_state == ~(u64)0) {
1611                xhci_hc_died(xhci);
1612                goto time_out_completed;
1613        }
1614
1615        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1616            (hw_ring_state & CMD_RING_RUNNING))  {
1617                /* Prevent new doorbell, and start command abort */
1618                xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1619                xhci_dbg(xhci, "Command timeout\n");
1620                xhci_abort_cmd_ring(xhci, flags);
1621                goto time_out_completed;
1622        }
1623
1624        /* host removed. Bail out */
1625        if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1626                xhci_dbg(xhci, "host removed, ring start fail?\n");
1627                xhci_cleanup_command_queue(xhci);
1628
1629                goto time_out_completed;
1630        }
1631
1632        /* command timeout on stopped ring, ring can't be aborted */
1633        xhci_dbg(xhci, "Command timeout on stopped ring\n");
1634        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1635
1636time_out_completed:
1637        spin_unlock_irqrestore(&xhci->lock, flags);
1638        return;
1639}
1640
1641static void handle_cmd_completion(struct xhci_hcd *xhci,
1642                struct xhci_event_cmd *event)
1643{
1644        unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1645        u64 cmd_dma;
1646        dma_addr_t cmd_dequeue_dma;
1647        u32 cmd_comp_code;
1648        union xhci_trb *cmd_trb;
1649        struct xhci_command *cmd;
1650        u32 cmd_type;
1651
1652        if (slot_id >= MAX_HC_SLOTS) {
1653                xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
1654                return;
1655        }
1656
1657        cmd_dma = le64_to_cpu(event->cmd_trb);
1658        cmd_trb = xhci->cmd_ring->dequeue;
1659
1660        trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1661
1662        cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1663                        cmd_trb);
1664        /*
1665         * Check whether the completion event is for our internal kept
1666         * command.
1667         */
1668        if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1669                xhci_warn(xhci,
1670                          "ERROR mismatched command completion event\n");
1671                return;
1672        }
1673
1674        cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
1675
1676        cancel_delayed_work(&xhci->cmd_timer);
1677
1678        cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1679
1680        /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1681        if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1682                complete_all(&xhci->cmd_ring_stop_completion);
1683                return;
1684        }
1685
1686        if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1687                xhci_err(xhci,
1688                         "Command completion event does not match command\n");
1689                return;
1690        }
1691
1692        /*
1693         * Host aborted the command ring, check if the current command was
1694         * supposed to be aborted, otherwise continue normally.
1695         * The command ring is stopped now, but the xHC will issue a Command
1696         * Ring Stopped event which will cause us to restart it.
1697         */
1698        if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1699                xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1700                if (cmd->status == COMP_COMMAND_ABORTED) {
1701                        if (xhci->current_cmd == cmd)
1702                                xhci->current_cmd = NULL;
1703                        goto event_handled;
1704                }
1705        }
1706
1707        cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1708        switch (cmd_type) {
1709        case TRB_ENABLE_SLOT:
1710                xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1711                break;
1712        case TRB_DISABLE_SLOT:
1713                xhci_handle_cmd_disable_slot(xhci, slot_id);
1714                break;
1715        case TRB_CONFIG_EP:
1716                if (!cmd->completion)
1717                        xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
1718                break;
1719        case TRB_EVAL_CONTEXT:
1720                break;
1721        case TRB_ADDR_DEV:
1722                xhci_handle_cmd_addr_dev(xhci, slot_id);
1723                break;
1724        case TRB_STOP_RING:
1725                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1726                                le32_to_cpu(cmd_trb->generic.field[3])));
1727                if (!cmd->completion)
1728                        xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
1729                                                cmd_comp_code);
1730                break;
1731        case TRB_SET_DEQ:
1732                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1733                                le32_to_cpu(cmd_trb->generic.field[3])));
1734                xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1735                break;
1736        case TRB_CMD_NOOP:
1737                /* Is this an aborted command turned to NO-OP? */
1738                if (cmd->status == COMP_COMMAND_RING_STOPPED)
1739                        cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1740                break;
1741        case TRB_RESET_EP:
1742                WARN_ON(slot_id != TRB_TO_SLOT_ID(
1743                                le32_to_cpu(cmd_trb->generic.field[3])));
1744                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1745                break;
1746        case TRB_RESET_DEV:
1747                /* SLOT_ID field in reset device cmd completion event TRB is 0.
1748                 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1749                 */
1750                slot_id = TRB_TO_SLOT_ID(
1751                                le32_to_cpu(cmd_trb->generic.field[3]));
1752                xhci_handle_cmd_reset_dev(xhci, slot_id);
1753                break;
1754        case TRB_NEC_GET_FW:
1755                xhci_handle_cmd_nec_get_fw(xhci, event);
1756                break;
1757        default:
1758                /* Skip over unknown commands on the event ring */
1759                xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1760                break;
1761        }
1762
1763        /* restart timer if this wasn't the last command */
1764        if (!list_is_singular(&xhci->cmd_list)) {
1765                xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1766                                                struct xhci_command, cmd_list);
1767                xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
1768        } else if (xhci->current_cmd == cmd) {
1769                xhci->current_cmd = NULL;
1770        }
1771
1772event_handled:
1773        xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1774
1775        inc_deq(xhci, xhci->cmd_ring);
1776}
1777
1778static void handle_vendor_event(struct xhci_hcd *xhci,
1779                                union xhci_trb *event, u32 trb_type)
1780{
1781        xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1782        if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1783                handle_cmd_completion(xhci, &event->event_cmd);
1784}
1785
1786static void handle_device_notification(struct xhci_hcd *xhci,
1787                union xhci_trb *event)
1788{
1789        u32 slot_id;
1790        struct usb_device *udev;
1791
1792        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1793        if (!xhci->devs[slot_id]) {
1794                xhci_warn(xhci, "Device Notification event for "
1795                                "unused slot %u\n", slot_id);
1796                return;
1797        }
1798
1799        xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1800                        slot_id);
1801        udev = xhci->devs[slot_id]->udev;
1802        if (udev && udev->parent)
1803                usb_wakeup_notification(udev->parent, udev->portnum);
1804}
1805
1806/*
1807 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1808 * Controller.
1809 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1810 * If a connection to a USB 1 device is followed by another connection
1811 * to a USB 2 device.
1812 *
1813 * Reset the PHY after the USB device is disconnected if device speed
1814 * is less than HCD_USB3.
1815 * Retry the reset sequence max of 4 times checking the PLL lock status.
1816 *
1817 */
1818static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1819{
1820        struct usb_hcd *hcd = xhci_to_hcd(xhci);
1821        u32 pll_lock_check;
1822        u32 retry_count = 4;
1823
1824        do {
1825                /* Assert PHY reset */
1826                writel(0x6F, hcd->regs + 0x1048);
1827                udelay(10);
1828                /* De-assert the PHY reset */
1829                writel(0x7F, hcd->regs + 0x1048);
1830                udelay(200);
1831                pll_lock_check = readl(hcd->regs + 0x1070);
1832        } while (!(pll_lock_check & 0x1) && --retry_count);
1833}
1834
1835static void handle_port_status(struct xhci_hcd *xhci,
1836                union xhci_trb *event)
1837{
1838        struct usb_hcd *hcd;
1839        u32 port_id;
1840        u32 portsc, cmd_reg;
1841        int max_ports;
1842        int slot_id;
1843        unsigned int hcd_portnum;
1844        struct xhci_bus_state *bus_state;
1845        bool bogus_port_status = false;
1846        struct xhci_port *port;
1847
1848        /* Port status change events always have a successful completion code */
1849        if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1850                xhci_warn(xhci,
1851                          "WARN: xHC returned failed port status event\n");
1852
1853        port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1854        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1855
1856        if ((port_id <= 0) || (port_id > max_ports)) {
1857                xhci_warn(xhci, "Port change event with invalid port ID %d\n",
1858                          port_id);
1859                inc_deq(xhci, xhci->event_ring);
1860                return;
1861        }
1862
1863        port = &xhci->hw_ports[port_id - 1];
1864        if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
1865                xhci_warn(xhci, "Port change event, no port for port ID %u\n",
1866                          port_id);
1867                bogus_port_status = true;
1868                goto cleanup;
1869        }
1870
1871        /* We might get interrupts after shared_hcd is removed */
1872        if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1873                xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1874                bogus_port_status = true;
1875                goto cleanup;
1876        }
1877
1878        hcd = port->rhub->hcd;
1879        bus_state = &port->rhub->bus_state;
1880        hcd_portnum = port->hcd_portnum;
1881        portsc = readl(port->addr);
1882
1883        xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
1884                 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
1885
1886        trace_xhci_handle_port_status(hcd_portnum, portsc);
1887
1888        if (hcd->state == HC_STATE_SUSPENDED) {
1889                xhci_dbg(xhci, "resume root hub\n");
1890                usb_hcd_resume_root_hub(hcd);
1891        }
1892
1893        if (hcd->speed >= HCD_USB3 &&
1894            (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
1895                slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1896                if (slot_id && xhci->devs[slot_id])
1897                        xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
1898        }
1899
1900        if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1901                xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1902
1903                cmd_reg = readl(&xhci->op_regs->command);
1904                if (!(cmd_reg & CMD_RUN)) {
1905                        xhci_warn(xhci, "xHC is not running.\n");
1906                        goto cleanup;
1907                }
1908
1909                if (DEV_SUPERSPEED_ANY(portsc)) {
1910                        xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1911                        /* Set a flag to say the port signaled remote wakeup,
1912                         * so we can tell the difference between the end of
1913                         * device and host initiated resume.
1914                         */
1915                        bus_state->port_remote_wakeup |= 1 << hcd_portnum;
1916                        xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1917                        usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1918                        xhci_set_link_state(xhci, port, XDEV_U0);
1919                        /* Need to wait until the next link state change
1920                         * indicates the device is actually in U0.
1921                         */
1922                        bogus_port_status = true;
1923                        goto cleanup;
1924                } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
1925                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
1926                        bus_state->resume_done[hcd_portnum] = jiffies +
1927                                msecs_to_jiffies(USB_RESUME_TIMEOUT);
1928                        set_bit(hcd_portnum, &bus_state->resuming_ports);
1929                        /* Do the rest in GetPortStatus after resume time delay.
1930                         * Avoid polling roothub status before that so that a
1931                         * usb device auto-resume latency around ~40ms.
1932                         */
1933                        set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1934                        mod_timer(&hcd->rh_timer,
1935                                  bus_state->resume_done[hcd_portnum]);
1936                        usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1937                        bogus_port_status = true;
1938                }
1939        }
1940
1941        if ((portsc & PORT_PLC) &&
1942            DEV_SUPERSPEED_ANY(portsc) &&
1943            ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1944             (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1945             (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1946                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1947                complete(&bus_state->u3exit_done[hcd_portnum]);
1948                /* We've just brought the device into U0/1/2 through either the
1949                 * Resume state after a device remote wakeup, or through the
1950                 * U3Exit state after a host-initiated resume.  If it's a device
1951                 * initiated remote wake, don't pass up the link state change,
1952                 * so the roothub behavior is consistent with external
1953                 * USB 3.0 hub behavior.
1954                 */
1955                slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1956                if (slot_id && xhci->devs[slot_id])
1957                        xhci_ring_device(xhci, slot_id);
1958                if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
1959                        xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1960                        usb_wakeup_notification(hcd->self.root_hub,
1961                                        hcd_portnum + 1);
1962                        bogus_port_status = true;
1963                        goto cleanup;
1964                }
1965        }
1966
1967        /*
1968         * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1969         * RExit to a disconnect state).  If so, let the the driver know it's
1970         * out of the RExit state.
1971         */
1972        if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
1973                        test_and_clear_bit(hcd_portnum,
1974                                &bus_state->rexit_ports)) {
1975                complete(&bus_state->rexit_done[hcd_portnum]);
1976                bogus_port_status = true;
1977                goto cleanup;
1978        }
1979
1980        if (hcd->speed < HCD_USB3) {
1981                xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1982                if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
1983                    (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
1984                        xhci_cavium_reset_phy_quirk(xhci);
1985        }
1986
1987cleanup:
1988        /* Update event ring dequeue pointer before dropping the lock */
1989        inc_deq(xhci, xhci->event_ring);
1990
1991        /* Don't make the USB core poll the roothub if we got a bad port status
1992         * change event.  Besides, at that point we can't tell which roothub
1993         * (USB 2.0 or USB 3.0) to kick.
1994         */
1995        if (bogus_port_status)
1996                return;
1997
1998        /*
1999         * xHCI port-status-change events occur when the "or" of all the
2000         * status-change bits in the portsc register changes from 0 to 1.
2001         * New status changes won't cause an event if any other change
2002         * bits are still set.  When an event occurs, switch over to
2003         * polling to avoid losing status changes.
2004         */
2005        xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
2006        set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2007        spin_unlock(&xhci->lock);
2008        /* Pass this up to the core */
2009        usb_hcd_poll_rh_status(hcd);
2010        spin_lock(&xhci->lock);
2011}
2012
2013/*
2014 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
2015 * at end_trb, which may be in another segment.  If the suspect DMA address is a
2016 * TRB in this TD, this function returns that TRB's segment.  Otherwise it
2017 * returns 0.
2018 */
2019struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
2020                struct xhci_segment *start_seg,
2021                union xhci_trb  *start_trb,
2022                union xhci_trb  *end_trb,
2023                dma_addr_t      suspect_dma,
2024                bool            debug)
2025{
2026        dma_addr_t start_dma;
2027        dma_addr_t end_seg_dma;
2028        dma_addr_t end_trb_dma;
2029        struct xhci_segment *cur_seg;
2030
2031        start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
2032        cur_seg = start_seg;
2033
2034        do {
2035                if (start_dma == 0)
2036                        return NULL;
2037                /* We may get an event for a Link TRB in the middle of a TD */
2038                end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2039                                &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
2040                /* If the end TRB isn't in this segment, this is set to 0 */
2041                end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
2042
2043                if (debug)
2044                        xhci_warn(xhci,
2045                                "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2046                                (unsigned long long)suspect_dma,
2047                                (unsigned long long)start_dma,
2048                                (unsigned long long)end_trb_dma,
2049                                (unsigned long long)cur_seg->dma,
2050                                (unsigned long long)end_seg_dma);
2051
2052                if (end_trb_dma > 0) {
2053                        /* The end TRB is in this segment, so suspect should be here */
2054                        if (start_dma <= end_trb_dma) {
2055                                if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
2056                                        return cur_seg;
2057                        } else {
2058                                /* Case for one segment with
2059                                 * a TD wrapped around to the top
2060                                 */
2061                                if ((suspect_dma >= start_dma &&
2062                                                        suspect_dma <= end_seg_dma) ||
2063                                                (suspect_dma >= cur_seg->dma &&
2064                                                 suspect_dma <= end_trb_dma))
2065                                        return cur_seg;
2066                        }
2067                        return NULL;
2068                } else {
2069                        /* Might still be somewhere in this segment */
2070                        if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
2071                                return cur_seg;
2072                }
2073                cur_seg = cur_seg->next;
2074                start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2075        } while (cur_seg != start_seg);
2076
2077        return NULL;
2078}
2079
2080static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
2081                struct xhci_virt_ep *ep)
2082{
2083        /*
2084         * As part of low/full-speed endpoint-halt processing
2085         * we must clear the TT buffer (USB 2.0 specification 11.17.5).
2086         */
2087        if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
2088            (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
2089            !(ep->ep_state & EP_CLEARING_TT)) {
2090                ep->ep_state |= EP_CLEARING_TT;
2091                td->urb->ep->hcpriv = td->urb->dev;
2092                if (usb_hub_clear_tt_buffer(td->urb))
2093                        ep->ep_state &= ~EP_CLEARING_TT;
2094        }
2095}
2096
2097/* Check if an error has halted the endpoint ring.  The class driver will
2098 * cleanup the halt for a non-default control endpoint if we indicate a stall.
2099 * However, a babble and other errors also halt the endpoint ring, and the class
2100 * driver won't clear the halt in that case, so we need to issue a Set Transfer
2101 * Ring Dequeue Pointer command manually.
2102 */
2103static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
2104                struct xhci_ep_ctx *ep_ctx,
2105                unsigned int trb_comp_code)
2106{
2107        /* TRB completion codes that may require a manual halt cleanup */
2108        if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
2109                        trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
2110                        trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
2111                /* The 0.95 spec says a babbling control endpoint
2112                 * is not halted. The 0.96 spec says it is.  Some HW
2113                 * claims to be 0.95 compliant, but it halts the control
2114                 * endpoint anyway.  Check if a babble halted the
2115                 * endpoint.
2116                 */
2117                if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
2118                        return 1;
2119
2120        return 0;
2121}
2122
2123int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
2124{
2125        if (trb_comp_code >= 224 && trb_comp_code <= 255) {
2126                /* Vendor defined "informational" completion code,
2127                 * treat as not-an-error.
2128                 */
2129                xhci_dbg(xhci, "Vendor defined info completion code %u\n",
2130                                trb_comp_code);
2131                xhci_dbg(xhci, "Treating code as success.\n");
2132                return 1;
2133        }
2134        return 0;
2135}
2136
2137static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2138                     struct xhci_ring *ep_ring, struct xhci_td *td,
2139                     u32 trb_comp_code)
2140{
2141        struct xhci_ep_ctx *ep_ctx;
2142
2143        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2144
2145        switch (trb_comp_code) {
2146        case COMP_STOPPED_LENGTH_INVALID:
2147        case COMP_STOPPED_SHORT_PACKET:
2148        case COMP_STOPPED:
2149                /*
2150                 * The "Stop Endpoint" completion will take care of any
2151                 * stopped TDs. A stopped TD may be restarted, so don't update
2152                 * the ring dequeue pointer or take this TD off any lists yet.
2153                 */
2154                return 0;
2155        case COMP_USB_TRANSACTION_ERROR:
2156        case COMP_BABBLE_DETECTED_ERROR:
2157        case COMP_SPLIT_TRANSACTION_ERROR:
2158                /*
2159                 * If endpoint context state is not halted we might be
2160                 * racing with a reset endpoint command issued by a unsuccessful
2161                 * stop endpoint completion (context error). In that case the
2162                 * td should be on the cancelled list, and EP_HALTED flag set.
2163                 *
2164                 * Or then it's not halted due to the 0.95 spec stating that a
2165                 * babbling control endpoint should not halt. The 0.96 spec
2166                 * again says it should.  Some HW claims to be 0.95 compliant,
2167                 * but it halts the control endpoint anyway.
2168                 */
2169                if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
2170                        /*
2171                         * If EP_HALTED is set and TD is on the cancelled list
2172                         * the TD and dequeue pointer will be handled by reset
2173                         * ep command completion
2174                         */
2175                        if ((ep->ep_state & EP_HALTED) &&
2176                            !list_empty(&td->cancelled_td_list)) {
2177                                xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
2178                                         (unsigned long long)xhci_trb_virt_to_dma(
2179                                                 td->start_seg, td->first_trb));
2180                                return 0;
2181                        }
2182                        /* endpoint not halted, don't reset it */
2183                        break;
2184                }
2185                /* Almost same procedure as for STALL_ERROR below */
2186                xhci_clear_hub_tt_buffer(xhci, td, ep);
2187                xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2188                                            EP_HARD_RESET);
2189                return 0;
2190        case COMP_STALL_ERROR:
2191                /*
2192                 * xhci internal endpoint state will go to a "halt" state for
2193                 * any stall, including default control pipe protocol stall.
2194                 * To clear the host side halt we need to issue a reset endpoint
2195                 * command, followed by a set dequeue command to move past the
2196                 * TD.
2197                 * Class drivers clear the device side halt from a functional
2198                 * stall later. Hub TT buffer should only be cleared for FS/LS
2199                 * devices behind HS hubs for functional stalls.
2200                 */
2201                if (ep->ep_index != 0)
2202                        xhci_clear_hub_tt_buffer(xhci, td, ep);
2203
2204                xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2205                                            EP_HARD_RESET);
2206
2207                return 0; /* xhci_handle_halted_endpoint marked td cancelled */
2208        default:
2209                break;
2210        }
2211
2212        /* Update ring dequeue pointer */
2213        ep_ring->dequeue = td->last_trb;
2214        ep_ring->deq_seg = td->last_trb_seg;
2215        ep_ring->num_trbs_free += td->num_trbs - 1;
2216        inc_deq(xhci, ep_ring);
2217
2218        return xhci_td_cleanup(xhci, td, ep_ring, td->status);
2219}
2220
2221/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
2222static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
2223                           union xhci_trb *stop_trb)
2224{
2225        u32 sum;
2226        union xhci_trb *trb = ring->dequeue;
2227        struct xhci_segment *seg = ring->deq_seg;
2228
2229        for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2230                if (!trb_is_noop(trb) && !trb_is_link(trb))
2231                        sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2232        }
2233        return sum;
2234}
2235
2236/*
2237 * Process control tds, update urb status and actual_length.
2238 */
2239static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2240                struct xhci_ring *ep_ring,  struct xhci_td *td,
2241                           union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2242{
2243        struct xhci_ep_ctx *ep_ctx;
2244        u32 trb_comp_code;
2245        u32 remaining, requested;
2246        u32 trb_type;
2247
2248        trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
2249        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2250        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2251        requested = td->urb->transfer_buffer_length;
2252        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2253
2254        switch (trb_comp_code) {
2255        case COMP_SUCCESS:
2256                if (trb_type != TRB_STATUS) {
2257                        xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2258                                  (trb_type == TRB_DATA) ? "data" : "setup");
2259                        td->status = -ESHUTDOWN;
2260                        break;
2261                }
2262                td->status = 0;
2263                break;
2264        case COMP_SHORT_PACKET:
2265                td->status = 0;
2266                break;
2267        case COMP_STOPPED_SHORT_PACKET:
2268                if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2269                        td->urb->actual_length = remaining;
2270                else
2271                        xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2272                goto finish_td;
2273        case COMP_STOPPED:
2274                switch (trb_type) {
2275                case TRB_SETUP:
2276                        td->urb->actual_length = 0;
2277                        goto finish_td;
2278                case TRB_DATA:
2279                case TRB_NORMAL:
2280                        td->urb->actual_length = requested - remaining;
2281                        goto finish_td;
2282                case TRB_STATUS:
2283                        td->urb->actual_length = requested;
2284                        goto finish_td;
2285                default:
2286                        xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2287                                  trb_type);
2288                        goto finish_td;
2289                }
2290        case COMP_STOPPED_LENGTH_INVALID:
2291                goto finish_td;
2292        default:
2293                if (!xhci_requires_manual_halt_cleanup(xhci,
2294                                                       ep_ctx, trb_comp_code))
2295                        break;
2296                xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2297                         trb_comp_code, ep->ep_index);
2298                fallthrough;
2299        case COMP_STALL_ERROR:
2300                /* Did we transfer part of the data (middle) phase? */
2301                if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2302                        td->urb->actual_length = requested - remaining;
2303                else if (!td->urb_length_set)
2304                        td->urb->actual_length = 0;
2305                goto finish_td;
2306        }
2307
2308        /* stopped at setup stage, no data transferred */
2309        if (trb_type == TRB_SETUP)
2310                goto finish_td;
2311
2312        /*
2313         * if on data stage then update the actual_length of the URB and flag it
2314         * as set, so it won't be overwritten in the event for the last TRB.
2315         */
2316        if (trb_type == TRB_DATA ||
2317                trb_type == TRB_NORMAL) {
2318                td->urb_length_set = true;
2319                td->urb->actual_length = requested - remaining;
2320                xhci_dbg(xhci, "Waiting for status stage event\n");
2321                return 0;
2322        }
2323
2324        /* at status stage */
2325        if (!td->urb_length_set)
2326                td->urb->actual_length = requested;
2327
2328finish_td:
2329        return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2330}
2331
2332/*
2333 * Process isochronous tds, update urb packet status and actual_length.
2334 */
2335static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2336                struct xhci_ring *ep_ring, struct xhci_td *td,
2337                union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2338{
2339        struct urb_priv *urb_priv;
2340        int idx;
2341        struct usb_iso_packet_descriptor *frame;
2342        u32 trb_comp_code;
2343        bool sum_trbs_for_length = false;
2344        u32 remaining, requested, ep_trb_len;
2345        int short_framestatus;
2346
2347        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2348        urb_priv = td->urb->hcpriv;
2349        idx = urb_priv->num_tds_done;
2350        frame = &td->urb->iso_frame_desc[idx];
2351        requested = frame->length;
2352        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2353        ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2354        short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2355                -EREMOTEIO : 0;
2356
2357        /* handle completion code */
2358        switch (trb_comp_code) {
2359        case COMP_SUCCESS:
2360                if (remaining) {
2361                        frame->status = short_framestatus;
2362                        if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2363                                sum_trbs_for_length = true;
2364                        break;
2365                }
2366                frame->status = 0;
2367                break;
2368        case COMP_SHORT_PACKET:
2369                frame->status = short_framestatus;
2370                sum_trbs_for_length = true;
2371                break;
2372        case COMP_BANDWIDTH_OVERRUN_ERROR:
2373                frame->status = -ECOMM;
2374                break;
2375        case COMP_ISOCH_BUFFER_OVERRUN:
2376        case COMP_BABBLE_DETECTED_ERROR:
2377                frame->status = -EOVERFLOW;
2378                break;
2379        case COMP_INCOMPATIBLE_DEVICE_ERROR:
2380        case COMP_STALL_ERROR:
2381                frame->status = -EPROTO;
2382                break;
2383        case COMP_USB_TRANSACTION_ERROR:
2384                frame->status = -EPROTO;
2385                if (ep_trb != td->last_trb)
2386                        return 0;
2387                break;
2388        case COMP_STOPPED:
2389                sum_trbs_for_length = true;
2390                break;
2391        case COMP_STOPPED_SHORT_PACKET:
2392                /* field normally containing residue now contains tranferred */
2393                frame->status = short_framestatus;
2394                requested = remaining;
2395                break;
2396        case COMP_STOPPED_LENGTH_INVALID:
2397                requested = 0;
2398                remaining = 0;
2399                break;
2400        default:
2401                sum_trbs_for_length = true;
2402                frame->status = -1;
2403                break;
2404        }
2405
2406        if (sum_trbs_for_length)
2407                frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
2408                        ep_trb_len - remaining;
2409        else
2410                frame->actual_length = requested;
2411
2412        td->urb->actual_length += frame->actual_length;
2413
2414        return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2415}
2416
2417static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2418                        struct xhci_virt_ep *ep, int status)
2419{
2420        struct urb_priv *urb_priv;
2421        struct usb_iso_packet_descriptor *frame;
2422        int idx;
2423
2424        urb_priv = td->urb->hcpriv;
2425        idx = urb_priv->num_tds_done;
2426        frame = &td->urb->iso_frame_desc[idx];
2427
2428        /* The transfer is partly done. */
2429        frame->status = -EXDEV;
2430
2431        /* calc actual length */
2432        frame->actual_length = 0;
2433
2434        /* Update ring dequeue pointer */
2435        ep->ring->dequeue = td->last_trb;
2436        ep->ring->deq_seg = td->last_trb_seg;
2437        ep->ring->num_trbs_free += td->num_trbs - 1;
2438        inc_deq(xhci, ep->ring);
2439
2440        return xhci_td_cleanup(xhci, td, ep->ring, status);
2441}
2442
2443/*
2444 * Process bulk and interrupt tds, update urb status and actual_length.
2445 */
2446static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2447                struct xhci_ring *ep_ring, struct xhci_td *td,
2448                union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2449{
2450        struct xhci_slot_ctx *slot_ctx;
2451        u32 trb_comp_code;
2452        u32 remaining, requested, ep_trb_len;
2453
2454        slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
2455        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2456        remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2457        ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2458        requested = td->urb->transfer_buffer_length;
2459
2460        switch (trb_comp_code) {
2461        case COMP_SUCCESS:
2462                ep_ring->err_count = 0;
2463                /* handle success with untransferred data as short packet */
2464                if (ep_trb != td->last_trb || remaining) {
2465                        xhci_warn(xhci, "WARN Successful completion on short TX\n");
2466                        xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2467                                 td->urb->ep->desc.bEndpointAddress,
2468                                 requested, remaining);
2469                }
2470                td->status = 0;
2471                break;
2472        case COMP_SHORT_PACKET:
2473                xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2474                         td->urb->ep->desc.bEndpointAddress,
2475                         requested, remaining);
2476                td->status = 0;
2477                break;
2478        case COMP_STOPPED_SHORT_PACKET:
2479                td->urb->actual_length = remaining;
2480                goto finish_td;
2481        case COMP_STOPPED_LENGTH_INVALID:
2482                /* stopped on ep trb with invalid length, exclude it */
2483                ep_trb_len      = 0;
2484                remaining       = 0;
2485                break;
2486        case COMP_USB_TRANSACTION_ERROR:
2487                if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
2488                    (ep_ring->err_count++ > MAX_SOFT_RETRY) ||
2489                    le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2490                        break;
2491
2492                td->status = 0;
2493
2494                xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
2495                                            EP_SOFT_RESET);
2496                return 0;
2497        default:
2498                /* do nothing */
2499                break;
2500        }
2501
2502        if (ep_trb == td->last_trb)
2503                td->urb->actual_length = requested - remaining;
2504        else
2505                td->urb->actual_length =
2506                        sum_trb_lengths(xhci, ep_ring, ep_trb) +
2507                        ep_trb_len - remaining;
2508finish_td:
2509        if (remaining > requested) {
2510                xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2511                          remaining);
2512                td->urb->actual_length = 0;
2513        }
2514
2515        return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2516}
2517
2518/*
2519 * If this function returns an error condition, it means it got a Transfer
2520 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2521 * At this point, the host controller is probably hosed and should be reset.
2522 */
2523static int handle_tx_event(struct xhci_hcd *xhci,
2524                struct xhci_transfer_event *event)
2525{
2526        struct xhci_virt_ep *ep;
2527        struct xhci_ring *ep_ring;
2528        unsigned int slot_id;
2529        int ep_index;
2530        struct xhci_td *td = NULL;
2531        dma_addr_t ep_trb_dma;
2532        struct xhci_segment *ep_seg;
2533        union xhci_trb *ep_trb;
2534        int status = -EINPROGRESS;
2535        struct xhci_ep_ctx *ep_ctx;
2536        struct list_head *tmp;
2537        u32 trb_comp_code;
2538        int td_num = 0;
2539        bool handling_skipped_tds = false;
2540
2541        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2542        ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2543        trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2544        ep_trb_dma = le64_to_cpu(event->buffer);
2545
2546        ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
2547        if (!ep) {
2548                xhci_err(xhci, "ERROR Invalid Transfer event\n");
2549                goto err_out;
2550        }
2551
2552        ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2553        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
2554
2555        if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2556                xhci_err(xhci,
2557                         "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2558                          slot_id, ep_index);
2559                goto err_out;
2560        }
2561
2562        /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
2563        if (!ep_ring) {
2564                switch (trb_comp_code) {
2565                case COMP_STALL_ERROR:
2566                case COMP_USB_TRANSACTION_ERROR:
2567                case COMP_INVALID_STREAM_TYPE_ERROR:
2568                case COMP_INVALID_STREAM_ID_ERROR:
2569                        xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
2570                                                    EP_SOFT_RESET);
2571                        goto cleanup;
2572                case COMP_RING_UNDERRUN:
2573                case COMP_RING_OVERRUN:
2574                case COMP_STOPPED_LENGTH_INVALID:
2575                        goto cleanup;
2576                default:
2577                        xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2578                                 slot_id, ep_index);
2579                        goto err_out;
2580                }
2581        }
2582
2583        /* Count current td numbers if ep->skip is set */
2584        if (ep->skip) {
2585                list_for_each(tmp, &ep_ring->td_list)
2586                        td_num++;
2587        }
2588
2589        /* Look for common error cases */
2590        switch (trb_comp_code) {
2591        /* Skip codes that require special handling depending on
2592         * transfer type
2593         */
2594        case COMP_SUCCESS:
2595                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2596                        break;
2597                if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
2598                    ep_ring->last_td_was_short)
2599                        trb_comp_code = COMP_SHORT_PACKET;
2600                else
2601                        xhci_warn_ratelimited(xhci,
2602                                              "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2603                                              slot_id, ep_index);
2604                break;
2605        case COMP_SHORT_PACKET:
2606                break;
2607        /* Completion codes for endpoint stopped state */
2608        case COMP_STOPPED:
2609                xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2610                         slot_id, ep_index);
2611                break;
2612        case COMP_STOPPED_LENGTH_INVALID:
2613                xhci_dbg(xhci,
2614                         "Stopped on No-op or Link TRB for slot %u ep %u\n",
2615                         slot_id, ep_index);
2616                break;
2617        case COMP_STOPPED_SHORT_PACKET:
2618                xhci_dbg(xhci,
2619                         "Stopped with short packet transfer detected for slot %u ep %u\n",
2620                         slot_id, ep_index);
2621                break;
2622        /* Completion codes for endpoint halted state */
2623        case COMP_STALL_ERROR:
2624                xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2625                         ep_index);
2626                status = -EPIPE;
2627                break;
2628        case COMP_SPLIT_TRANSACTION_ERROR:
2629                xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2630                         slot_id, ep_index);
2631                status = -EPROTO;
2632                break;
2633        case COMP_USB_TRANSACTION_ERROR:
2634                xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2635                         slot_id, ep_index);
2636                status = -EPROTO;
2637                break;
2638        case COMP_BABBLE_DETECTED_ERROR:
2639                xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2640                         slot_id, ep_index);
2641                status = -EOVERFLOW;
2642                break;
2643        /* Completion codes for endpoint error state */
2644        case COMP_TRB_ERROR:
2645                xhci_warn(xhci,
2646                          "WARN: TRB error for slot %u ep %u on endpoint\n",
2647                          slot_id, ep_index);
2648                status = -EILSEQ;
2649                break;
2650        /* completion codes not indicating endpoint state change */
2651        case COMP_DATA_BUFFER_ERROR:
2652                xhci_warn(xhci,
2653                          "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2654                          slot_id, ep_index);
2655                status = -ENOSR;
2656                break;
2657        case COMP_BANDWIDTH_OVERRUN_ERROR:
2658                xhci_warn(xhci,
2659                          "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2660                          slot_id, ep_index);
2661                break;
2662        case COMP_ISOCH_BUFFER_OVERRUN:
2663                xhci_warn(xhci,
2664                          "WARN: buffer overrun event for slot %u ep %u on endpoint",
2665                          slot_id, ep_index);
2666                break;
2667        case COMP_RING_UNDERRUN:
2668                /*
2669                 * When the Isoch ring is empty, the xHC will generate
2670                 * a Ring Overrun Event for IN Isoch endpoint or Ring
2671                 * Underrun Event for OUT Isoch endpoint.
2672                 */
2673                xhci_dbg(xhci, "underrun event on endpoint\n");
2674                if (!list_empty(&ep_ring->td_list))
2675                        xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2676                                        "still with TDs queued?\n",
2677                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2678                                 ep_index);
2679                goto cleanup;
2680        case COMP_RING_OVERRUN:
2681                xhci_dbg(xhci, "overrun event on endpoint\n");
2682                if (!list_empty(&ep_ring->td_list))
2683                        xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2684                                        "still with TDs queued?\n",
2685                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2686                                 ep_index);
2687                goto cleanup;
2688        case COMP_MISSED_SERVICE_ERROR:
2689                /*
2690                 * When encounter missed service error, one or more isoc tds
2691                 * may be missed by xHC.
2692                 * Set skip flag of the ep_ring; Complete the missed tds as
2693                 * short transfer when process the ep_ring next time.
2694                 */
2695                ep->skip = true;
2696                xhci_dbg(xhci,
2697                         "Miss service interval error for slot %u ep %u, set skip flag\n",
2698                         slot_id, ep_index);
2699                goto cleanup;
2700        case COMP_NO_PING_RESPONSE_ERROR:
2701                ep->skip = true;
2702                xhci_dbg(xhci,
2703                         "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2704                         slot_id, ep_index);
2705                goto cleanup;
2706
2707        case COMP_INCOMPATIBLE_DEVICE_ERROR:
2708                /* needs disable slot command to recover */
2709                xhci_warn(xhci,
2710                          "WARN: detect an incompatible device for slot %u ep %u",
2711                          slot_id, ep_index);
2712                status = -EPROTO;
2713                break;
2714        default:
2715                if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2716                        status = 0;
2717                        break;
2718                }
2719                xhci_warn(xhci,
2720                          "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2721                          trb_comp_code, slot_id, ep_index);
2722                goto cleanup;
2723        }
2724
2725        do {
2726                /* This TRB should be in the TD at the head of this ring's
2727                 * TD list.
2728                 */
2729                if (list_empty(&ep_ring->td_list)) {
2730                        /*
2731                         * Don't print wanings if it's due to a stopped endpoint
2732                         * generating an extra completion event if the device
2733                         * was suspended. Or, a event for the last TRB of a
2734                         * short TD we already got a short event for.
2735                         * The short TD is already removed from the TD list.
2736                         */
2737
2738                        if (!(trb_comp_code == COMP_STOPPED ||
2739                              trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2740                              ep_ring->last_td_was_short)) {
2741                                xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2742                                                TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2743                                                ep_index);
2744                        }
2745                        if (ep->skip) {
2746                                ep->skip = false;
2747                                xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2748                                         slot_id, ep_index);
2749                        }
2750                        if (trb_comp_code == COMP_STALL_ERROR ||
2751                            xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2752                                                              trb_comp_code)) {
2753                                xhci_handle_halted_endpoint(xhci, ep,
2754                                                            ep_ring->stream_id,
2755                                                            NULL,
2756                                                            EP_HARD_RESET);
2757                        }
2758                        goto cleanup;
2759                }
2760
2761                /* We've skipped all the TDs on the ep ring when ep->skip set */
2762                if (ep->skip && td_num == 0) {
2763                        ep->skip = false;
2764                        xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2765                                 slot_id, ep_index);
2766                        goto cleanup;
2767                }
2768
2769                td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2770                                      td_list);
2771                if (ep->skip)
2772                        td_num--;
2773
2774                /* Is this a TRB in the currently executing TD? */
2775                ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2776                                td->last_trb, ep_trb_dma, false);
2777
2778                /*
2779                 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2780                 * is not in the current TD pointed by ep_ring->dequeue because
2781                 * that the hardware dequeue pointer still at the previous TRB
2782                 * of the current TD. The previous TRB maybe a Link TD or the
2783                 * last TRB of the previous TD. The command completion handle
2784                 * will take care the rest.
2785                 */
2786                if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2787                           trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2788                        goto cleanup;
2789                }
2790
2791                if (!ep_seg) {
2792                        if (!ep->skip ||
2793                            !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2794                                /* Some host controllers give a spurious
2795                                 * successful event after a short transfer.
2796                                 * Ignore it.
2797                                 */
2798                                if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2799                                                ep_ring->last_td_was_short) {
2800                                        ep_ring->last_td_was_short = false;
2801                                        goto cleanup;
2802                                }
2803                                /* HC is busted, give up! */
2804                                xhci_err(xhci,
2805                                        "ERROR Transfer event TRB DMA ptr not "
2806                                        "part of current TD ep_index %d "
2807                                        "comp_code %u\n", ep_index,
2808                                        trb_comp_code);
2809                                trb_in_td(xhci, ep_ring->deq_seg,
2810                                          ep_ring->dequeue, td->last_trb,
2811                                          ep_trb_dma, true);
2812                                return -ESHUTDOWN;
2813                        }
2814
2815                        skip_isoc_td(xhci, td, ep, status);
2816                        goto cleanup;
2817                }
2818                if (trb_comp_code == COMP_SHORT_PACKET)
2819                        ep_ring->last_td_was_short = true;
2820                else
2821                        ep_ring->last_td_was_short = false;
2822
2823                if (ep->skip) {
2824                        xhci_dbg(xhci,
2825                                 "Found td. Clear skip flag for slot %u ep %u.\n",
2826                                 slot_id, ep_index);
2827                        ep->skip = false;
2828                }
2829
2830                ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2831                                                sizeof(*ep_trb)];
2832
2833                trace_xhci_handle_transfer(ep_ring,
2834                                (struct xhci_generic_trb *) ep_trb);
2835
2836                /*
2837                 * No-op TRB could trigger interrupts in a case where
2838                 * a URB was killed and a STALL_ERROR happens right
2839                 * after the endpoint ring stopped. Reset the halted
2840                 * endpoint. Otherwise, the endpoint remains stalled
2841                 * indefinitely.
2842                 */
2843
2844                if (trb_is_noop(ep_trb)) {
2845                        if (trb_comp_code == COMP_STALL_ERROR ||
2846                            xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2847                                                              trb_comp_code))
2848                                xhci_handle_halted_endpoint(xhci, ep,
2849                                                            ep_ring->stream_id,
2850                                                            td, EP_HARD_RESET);
2851                        goto cleanup;
2852                }
2853
2854                td->status = status;
2855
2856                /* update the urb's actual_length and give back to the core */
2857                if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2858                        process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
2859                else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2860                        process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
2861                else
2862                        process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
2863cleanup:
2864                handling_skipped_tds = ep->skip &&
2865                        trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2866                        trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2867
2868                /*
2869                 * Do not update event ring dequeue pointer if we're in a loop
2870                 * processing missed tds.
2871                 */
2872                if (!handling_skipped_tds)
2873                        inc_deq(xhci, xhci->event_ring);
2874
2875        /*
2876         * If ep->skip is set, it means there are missed tds on the
2877         * endpoint ring need to take care of.
2878         * Process them as short transfer until reach the td pointed by
2879         * the event.
2880         */
2881        } while (handling_skipped_tds);
2882
2883        return 0;
2884
2885err_out:
2886        xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2887                 (unsigned long long) xhci_trb_virt_to_dma(
2888                         xhci->event_ring->deq_seg,
2889                         xhci->event_ring->dequeue),
2890                 lower_32_bits(le64_to_cpu(event->buffer)),
2891                 upper_32_bits(le64_to_cpu(event->buffer)),
2892                 le32_to_cpu(event->transfer_len),
2893                 le32_to_cpu(event->flags));
2894        return -ENODEV;
2895}
2896
2897/*
2898 * This function handles all OS-owned events on the event ring.  It may drop
2899 * xhci->lock between event processing (e.g. to pass up port status changes).
2900 * Returns >0 for "possibly more events to process" (caller should call again),
2901 * otherwise 0 if done.  In future, <0 returns should indicate error code.
2902 */
2903static int xhci_handle_event(struct xhci_hcd *xhci)
2904{
2905        union xhci_trb *event;
2906        int update_ptrs = 1;
2907        u32 trb_type;
2908        int ret;
2909
2910        /* Event ring hasn't been allocated yet. */
2911        if (!xhci->event_ring || !xhci->event_ring->dequeue) {
2912                xhci_err(xhci, "ERROR event ring not ready\n");
2913                return -ENOMEM;
2914        }
2915
2916        event = xhci->event_ring->dequeue;
2917        /* Does the HC or OS own the TRB? */
2918        if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2919            xhci->event_ring->cycle_state)
2920                return 0;
2921
2922        trace_xhci_handle_event(xhci->event_ring, &event->generic);
2923
2924        /*
2925         * Barrier between reading the TRB_CYCLE (valid) flag above and any
2926         * speculative reads of the event's flags/data below.
2927         */
2928        rmb();
2929        trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
2930        /* FIXME: Handle more event types. */
2931
2932        switch (trb_type) {
2933        case TRB_COMPLETION:
2934                handle_cmd_completion(xhci, &event->event_cmd);
2935                break;
2936        case TRB_PORT_STATUS:
2937                handle_port_status(xhci, event);
2938                update_ptrs = 0;
2939                break;
2940        case TRB_TRANSFER:
2941                ret = handle_tx_event(xhci, &event->trans_event);
2942                if (ret >= 0)
2943                        update_ptrs = 0;
2944                break;
2945        case TRB_DEV_NOTE:
2946                handle_device_notification(xhci, event);
2947                break;
2948        default:
2949                if (trb_type >= TRB_VENDOR_DEFINED_LOW)
2950                        handle_vendor_event(xhci, event, trb_type);
2951                else
2952                        xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
2953        }
2954        /* Any of the above functions may drop and re-acquire the lock, so check
2955         * to make sure a watchdog timer didn't mark the host as non-responsive.
2956         */
2957        if (xhci->xhc_state & XHCI_STATE_DYING) {
2958                xhci_dbg(xhci, "xHCI host dying, returning from "
2959                                "event handler.\n");
2960                return 0;
2961        }
2962
2963        if (update_ptrs)
2964                /* Update SW event ring dequeue pointer */
2965                inc_deq(xhci, xhci->event_ring);
2966
2967        /* Are there more items on the event ring?  Caller will call us again to
2968         * check.
2969         */
2970        return 1;
2971}
2972
2973/*
2974 * Update Event Ring Dequeue Pointer:
2975 * - When all events have finished
2976 * - To avoid "Event Ring Full Error" condition
2977 */
2978static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
2979                union xhci_trb *event_ring_deq)
2980{
2981        u64 temp_64;
2982        dma_addr_t deq;
2983
2984        temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
2985        /* If necessary, update the HW's version of the event ring deq ptr. */
2986        if (event_ring_deq != xhci->event_ring->dequeue) {
2987                deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
2988                                xhci->event_ring->dequeue);
2989                if (deq == 0)
2990                        xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
2991                /*
2992                 * Per 4.9.4, Software writes to the ERDP register shall
2993                 * always advance the Event Ring Dequeue Pointer value.
2994                 */
2995                if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
2996                                ((u64) deq & (u64) ~ERST_PTR_MASK))
2997                        return;
2998
2999                /* Update HC event ring dequeue pointer */
3000                temp_64 &= ERST_PTR_MASK;
3001                temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3002        }
3003
3004        /* Clear the event handler busy flag (RW1C) */
3005        temp_64 |= ERST_EHB;
3006        xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
3007}
3008
3009/*
3010 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3011 * we might get bad data out of the event ring.  Section 4.10.2.7 has a list of
3012 * indicators of an event TRB error, but we check the status *first* to be safe.
3013 */
3014irqreturn_t xhci_irq(struct usb_hcd *hcd)
3015{
3016        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3017        union xhci_trb *event_ring_deq;
3018        irqreturn_t ret = IRQ_NONE;
3019        u64 temp_64;
3020        u32 status;
3021        int event_loop = 0;
3022
3023        spin_lock(&xhci->lock);
3024        /* Check if the xHC generated the interrupt, or the irq is shared */
3025        status = readl(&xhci->op_regs->status);
3026        if (status == ~(u32)0) {
3027                xhci_hc_died(xhci);
3028                ret = IRQ_HANDLED;
3029                goto out;
3030        }
3031
3032        if (!(status & STS_EINT))
3033                goto out;
3034
3035        if (status & STS_FATAL) {
3036                xhci_warn(xhci, "WARNING: Host System Error\n");
3037                xhci_halt(xhci);
3038                ret = IRQ_HANDLED;
3039                goto out;
3040        }
3041
3042        /*
3043         * Clear the op reg interrupt status first,
3044         * so we can receive interrupts from other MSI-X interrupters.
3045         * Write 1 to clear the interrupt status.
3046         */
3047        status |= STS_EINT;
3048        writel(status, &xhci->op_regs->status);
3049
3050        if (!hcd->msi_enabled) {
3051                u32 irq_pending;
3052                irq_pending = readl(&xhci->ir_set->irq_pending);
3053                irq_pending |= IMAN_IP;
3054                writel(irq_pending, &xhci->ir_set->irq_pending);
3055        }
3056
3057        if (xhci->xhc_state & XHCI_STATE_DYING ||
3058            xhci->xhc_state & XHCI_STATE_HALTED) {
3059                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
3060                                "Shouldn't IRQs be disabled?\n");
3061                /* Clear the event handler busy flag (RW1C);
3062                 * the event ring should be empty.
3063                 */
3064                temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
3065                xhci_write_64(xhci, temp_64 | ERST_EHB,
3066                                &xhci->ir_set->erst_dequeue);
3067                ret = IRQ_HANDLED;
3068                goto out;
3069        }
3070
3071        event_ring_deq = xhci->event_ring->dequeue;
3072        /* FIXME this should be a delayed service routine
3073         * that clears the EHB.
3074         */
3075        while (xhci_handle_event(xhci) > 0) {
3076                if (event_loop++ < TRBS_PER_SEGMENT / 2)
3077                        continue;
3078                xhci_update_erst_dequeue(xhci, event_ring_deq);
3079
3080                /* ring is half-full, force isoc trbs to interrupt more often */
3081                if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
3082                        xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
3083
3084                event_loop = 0;
3085        }
3086
3087        xhci_update_erst_dequeue(xhci, event_ring_deq);
3088        ret = IRQ_HANDLED;
3089
3090out:
3091        spin_unlock(&xhci->lock);
3092
3093        return ret;
3094}
3095
3096irqreturn_t xhci_msi_irq(int irq, void *hcd)
3097{
3098        return xhci_irq(hcd);
3099}
3100
3101/****           Endpoint Ring Operations        ****/
3102
3103/*
3104 * Generic function for queueing a TRB on a ring.
3105 * The caller must have checked to make sure there's room on the ring.
3106 *
3107 * @more_trbs_coming:   Will you enqueue more TRBs before calling
3108 *                      prepare_transfer()?
3109 */
3110static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3111                bool more_trbs_coming,
3112                u32 field1, u32 field2, u32 field3, u32 field4)
3113{
3114        struct xhci_generic_trb *trb;
3115
3116        trb = &ring->enqueue->generic;
3117        trb->field[0] = cpu_to_le32(field1);
3118        trb->field[1] = cpu_to_le32(field2);
3119        trb->field[2] = cpu_to_le32(field3);
3120        /* make sure TRB is fully written before giving it to the controller */
3121        wmb();
3122        trb->field[3] = cpu_to_le32(field4);
3123
3124        trace_xhci_queue_trb(ring, trb);
3125
3126        inc_enq(xhci, ring, more_trbs_coming);
3127}
3128
3129/*
3130 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
3131 * FIXME allocate segments if the ring is full.
3132 */
3133static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3134                u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
3135{
3136        unsigned int num_trbs_needed;
3137        unsigned int link_trb_count = 0;
3138
3139        /* Make sure the endpoint has been added to xHC schedule */
3140        switch (ep_state) {
3141        case EP_STATE_DISABLED:
3142                /*
3143                 * USB core changed config/interfaces without notifying us,
3144                 * or hardware is reporting the wrong state.
3145                 */
3146                xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
3147                return -ENOENT;
3148        case EP_STATE_ERROR:
3149                xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
3150                /* FIXME event handling code for error needs to clear it */
3151                /* XXX not sure if this should be -ENOENT or not */
3152                return -EINVAL;
3153        case EP_STATE_HALTED:
3154                xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
3155                break;
3156        case EP_STATE_STOPPED:
3157        case EP_STATE_RUNNING:
3158                break;
3159        default:
3160                xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
3161                /*
3162                 * FIXME issue Configure Endpoint command to try to get the HC
3163                 * back into a known state.
3164                 */
3165                return -EINVAL;
3166        }
3167
3168        while (1) {
3169                if (room_on_ring(xhci, ep_ring, num_trbs))
3170                        break;
3171
3172                if (ep_ring == xhci->cmd_ring) {
3173                        xhci_err(xhci, "Do not support expand command ring\n");
3174                        return -ENOMEM;
3175                }
3176
3177                xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3178                                "ERROR no room on ep ring, try ring expansion");
3179                num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
3180                if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
3181                                        mem_flags)) {
3182                        xhci_err(xhci, "Ring expansion failed\n");
3183                        return -ENOMEM;
3184                }
3185        }
3186
3187        while (trb_is_link(ep_ring->enqueue)) {
3188                /* If we're not dealing with 0.95 hardware or isoc rings
3189                 * on AMD 0.96 host, clear the chain bit.
3190                 */
3191                if (!xhci_link_trb_quirk(xhci) &&
3192                    !(ep_ring->type == TYPE_ISOC &&
3193                      (xhci->quirks & XHCI_AMD_0x96_HOST)))
3194                        ep_ring->enqueue->link.control &=
3195                                cpu_to_le32(~TRB_CHAIN);
3196                else
3197                        ep_ring->enqueue->link.control |=
3198                                cpu_to_le32(TRB_CHAIN);
3199
3200                wmb();
3201                ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3202
3203                /* Toggle the cycle bit after the last ring segment. */
3204                if (link_trb_toggles_cycle(ep_ring->enqueue))
3205                        ep_ring->cycle_state ^= 1;
3206
3207                ep_ring->enq_seg = ep_ring->enq_seg->next;
3208                ep_ring->enqueue = ep_ring->enq_seg->trbs;
3209
3210                /* prevent infinite loop if all first trbs are link trbs */
3211                if (link_trb_count++ > ep_ring->num_segs) {
3212                        xhci_warn(xhci, "Ring is an endless link TRB loop\n");
3213                        return -EINVAL;
3214                }
3215        }
3216
3217        if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3218                xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
3219                return -EINVAL;
3220        }
3221
3222        return 0;
3223}
3224
3225static int prepare_transfer(struct xhci_hcd *xhci,
3226                struct xhci_virt_device *xdev,
3227                unsigned int ep_index,
3228                unsigned int stream_id,
3229                unsigned int num_trbs,
3230                struct urb *urb,
3231                unsigned int td_index,
3232                gfp_t mem_flags)
3233{
3234        int ret;
3235        struct urb_priv *urb_priv;
3236        struct xhci_td  *td;
3237        struct xhci_ring *ep_ring;
3238        struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3239
3240        ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3241                                              stream_id);
3242        if (!ep_ring) {
3243                xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3244                                stream_id);
3245                return -EINVAL;
3246        }
3247
3248        ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3249                           num_trbs, mem_flags);
3250        if (ret)
3251                return ret;
3252
3253        urb_priv = urb->hcpriv;
3254        td = &urb_priv->td[td_index];
3255
3256        INIT_LIST_HEAD(&td->td_list);
3257        INIT_LIST_HEAD(&td->cancelled_td_list);
3258
3259        if (td_index == 0) {
3260                ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3261                if (unlikely(ret))
3262                        return ret;
3263        }
3264
3265        td->urb = urb;
3266        /* Add this TD to the tail of the endpoint ring's TD list */
3267        list_add_tail(&td->td_list, &ep_ring->td_list);
3268        td->start_seg = ep_ring->enq_seg;
3269        td->first_trb = ep_ring->enqueue;
3270
3271        return 0;
3272}
3273
3274unsigned int count_trbs(u64 addr, u64 len)
3275{
3276        unsigned int num_trbs;
3277
3278        num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3279                        TRB_MAX_BUFF_SIZE);
3280        if (num_trbs == 0)
3281                num_trbs++;
3282
3283        return num_trbs;
3284}
3285
3286static inline unsigned int count_trbs_needed(struct urb *urb)
3287{
3288        return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3289}
3290
3291static unsigned int count_sg_trbs_needed(struct urb *urb)
3292{
3293        struct scatterlist *sg;
3294        unsigned int i, len, full_len, num_trbs = 0;
3295
3296        full_len = urb->transfer_buffer_length;
3297
3298        for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3299                len = sg_dma_len(sg);
3300                num_trbs += count_trbs(sg_dma_address(sg), len);
3301                len = min_t(unsigned int, len, full_len);
3302                full_len -= len;
3303                if (full_len == 0)
3304                        break;
3305        }
3306
3307        return num_trbs;
3308}
3309
3310static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3311{
3312        u64 addr, len;
3313
3314        addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3315        len = urb->iso_frame_desc[i].length;
3316
3317        return count_trbs(addr, len);
3318}
3319
3320static void check_trb_math(struct urb *urb, int running_total)
3321{
3322        if (unlikely(running_total != urb->transfer_buffer_length))
3323                dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3324                                "queued %#x (%d), asked for %#x (%d)\n",
3325                                __func__,
3326                                urb->ep->desc.bEndpointAddress,
3327                                running_total, running_total,
3328                                urb->transfer_buffer_length,
3329                                urb->transfer_buffer_length);
3330}
3331
3332static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3333                unsigned int ep_index, unsigned int stream_id, int start_cycle,
3334                struct xhci_generic_trb *start_trb)
3335{
3336        /*
3337         * Pass all the TRBs to the hardware at once and make sure this write
3338         * isn't reordered.
3339         */
3340        wmb();
3341        if (start_cycle)
3342                start_trb->field[3] |= cpu_to_le32(start_cycle);
3343        else
3344                start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3345        xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3346}
3347
3348static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3349                                                struct xhci_ep_ctx *ep_ctx)
3350{
3351        int xhci_interval;
3352        int ep_interval;
3353
3354        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3355        ep_interval = urb->interval;
3356
3357        /* Convert to microframes */
3358        if (urb->dev->speed == USB_SPEED_LOW ||
3359                        urb->dev->speed == USB_SPEED_FULL)
3360                ep_interval *= 8;
3361
3362        /* FIXME change this to a warning and a suggestion to use the new API
3363         * to set the polling interval (once the API is added).
3364         */
3365        if (xhci_interval != ep_interval) {
3366                dev_dbg_ratelimited(&urb->dev->dev,
3367                                "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3368                                ep_interval, ep_interval == 1 ? "" : "s",
3369                                xhci_interval, xhci_interval == 1 ? "" : "s");
3370                urb->interval = xhci_interval;
3371                /* Convert back to frames for LS/FS devices */
3372                if (urb->dev->speed == USB_SPEED_LOW ||
3373                                urb->dev->speed == USB_SPEED_FULL)
3374                        urb->interval /= 8;
3375        }
3376}
3377
3378/*
3379 * xHCI uses normal TRBs for both bulk and interrupt.  When the interrupt
3380 * endpoint is to be serviced, the xHC will consume (at most) one TD.  A TD
3381 * (comprised of sg list entries) can take several service intervals to
3382 * transmit.
3383 */
3384int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3385                struct urb *urb, int slot_id, unsigned int ep_index)
3386{
3387        struct xhci_ep_ctx *ep_ctx;
3388
3389        ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3390        check_interval(xhci, urb, ep_ctx);
3391
3392        return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3393}
3394
3395/*
3396 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3397 * packets remaining in the TD (*not* including this TRB).
3398 *
3399 * Total TD packet count = total_packet_count =
3400 *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3401 *
3402 * Packets transferred up to and including this TRB = packets_transferred =
3403 *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3404 *
3405 * TD size = total_packet_count - packets_transferred
3406 *
3407 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3408 * including this TRB, right shifted by 10
3409 *
3410 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3411 * This is taken care of in the TRB_TD_SIZE() macro
3412 *
3413 * The last TRB in a TD must have the TD size set to zero.
3414 */
3415static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3416                              int trb_buff_len, unsigned int td_total_len,
3417                              struct urb *urb, bool more_trbs_coming)
3418{
3419        u32 maxp, total_packet_count;
3420
3421        /* MTK xHCI 0.96 contains some features from 1.0 */
3422        if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3423                return ((td_total_len - transferred) >> 10);
3424
3425        /* One TRB with a zero-length data packet. */
3426        if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3427            trb_buff_len == td_total_len)
3428                return 0;
3429
3430        /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3431        if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3432                trb_buff_len = 0;
3433
3434        maxp = usb_endpoint_maxp(&urb->ep->desc);
3435        total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3436
3437        /* Queueing functions don't count the current TRB into transferred */
3438        return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3439}
3440
3441
3442static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3443                         u32 *trb_buff_len, struct xhci_segment *seg)
3444{
3445        struct device *dev = xhci_to_hcd(xhci)->self.controller;
3446        unsigned int unalign;
3447        unsigned int max_pkt;
3448        u32 new_buff_len;
3449        size_t len;
3450
3451        max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3452        unalign = (enqd_len + *trb_buff_len) % max_pkt;
3453
3454        /* we got lucky, last normal TRB data on segment is packet aligned */
3455        if (unalign == 0)
3456                return 0;
3457
3458        xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3459                 unalign, *trb_buff_len);
3460
3461        /* is the last nornal TRB alignable by splitting it */
3462        if (*trb_buff_len > unalign) {
3463                *trb_buff_len -= unalign;
3464                xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3465                return 0;
3466        }
3467
3468        /*
3469         * We want enqd_len + trb_buff_len to sum up to a number aligned to
3470         * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3471         * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3472         */
3473        new_buff_len = max_pkt - (enqd_len % max_pkt);
3474
3475        if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3476                new_buff_len = (urb->transfer_buffer_length - enqd_len);
3477
3478        /* create a max max_pkt sized bounce buffer pointed to by last trb */
3479        if (usb_urb_dir_out(urb)) {
3480                if (urb->num_sgs) {
3481                        len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3482                                                 seg->bounce_buf, new_buff_len, enqd_len);
3483                        if (len != new_buff_len)
3484                                xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3485                                          len, new_buff_len);
3486                } else {
3487                        memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3488                }
3489
3490                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3491                                                 max_pkt, DMA_TO_DEVICE);
3492        } else {
3493                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3494                                                 max_pkt, DMA_FROM_DEVICE);
3495        }
3496
3497        if (dma_mapping_error(dev, seg->bounce_dma)) {
3498                /* try without aligning. Some host controllers survive */
3499                xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3500                return 0;
3501        }
3502        *trb_buff_len = new_buff_len;
3503        seg->bounce_len = new_buff_len;
3504        seg->bounce_offs = enqd_len;
3505
3506        xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3507
3508        return 1;
3509}
3510
3511/* This is very similar to what ehci-q.c qtd_fill() does */
3512int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3513                struct urb *urb, int slot_id, unsigned int ep_index)
3514{
3515        struct xhci_ring *ring;
3516        struct urb_priv *urb_priv;
3517        struct xhci_td *td;
3518        struct xhci_generic_trb *start_trb;
3519        struct scatterlist *sg = NULL;
3520        bool more_trbs_coming = true;
3521        bool need_zero_pkt = false;
3522        bool first_trb = true;
3523        unsigned int num_trbs;
3524        unsigned int start_cycle, num_sgs = 0;
3525        unsigned int enqd_len, block_len, trb_buff_len, full_len;
3526        int sent_len, ret;
3527        u32 field, length_field, remainder;
3528        u64 addr, send_addr;
3529
3530        ring = xhci_urb_to_transfer_ring(xhci, urb);
3531        if (!ring)
3532                return -EINVAL;
3533
3534        full_len = urb->transfer_buffer_length;
3535        /* If we have scatter/gather list, we use it. */
3536        if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
3537                num_sgs = urb->num_mapped_sgs;
3538                sg = urb->sg;
3539                addr = (u64) sg_dma_address(sg);
3540                block_len = sg_dma_len(sg);
3541                num_trbs = count_sg_trbs_needed(urb);
3542        } else {
3543                num_trbs = count_trbs_needed(urb);
3544                addr = (u64) urb->transfer_dma;
3545                block_len = full_len;
3546        }
3547        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3548                        ep_index, urb->stream_id,
3549                        num_trbs, urb, 0, mem_flags);
3550        if (unlikely(ret < 0))
3551                return ret;
3552
3553        urb_priv = urb->hcpriv;
3554
3555        /* Deal with URB_ZERO_PACKET - need one more td/trb */
3556        if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3557                need_zero_pkt = true;
3558
3559        td = &urb_priv->td[0];
3560
3561        /*
3562         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3563         * until we've finished creating all the other TRBs.  The ring's cycle
3564         * state may change as we enqueue the other TRBs, so save it too.
3565         */
3566        start_trb = &ring->enqueue->generic;
3567        start_cycle = ring->cycle_state;
3568        send_addr = addr;
3569
3570        /* Queue the TRBs, even if they are zero-length */
3571        for (enqd_len = 0; first_trb || enqd_len < full_len;
3572                        enqd_len += trb_buff_len) {
3573                field = TRB_TYPE(TRB_NORMAL);
3574
3575                /* TRB buffer should not cross 64KB boundaries */
3576                trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3577                trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3578
3579                if (enqd_len + trb_buff_len > full_len)
3580                        trb_buff_len = full_len - enqd_len;
3581
3582                /* Don't change the cycle bit of the first TRB until later */
3583                if (first_trb) {
3584                        first_trb = false;
3585                        if (start_cycle == 0)
3586                                field |= TRB_CYCLE;
3587                } else
3588                        field |= ring->cycle_state;
3589
3590                /* Chain all the TRBs together; clear the chain bit in the last
3591                 * TRB to indicate it's the last TRB in the chain.
3592                 */
3593                if (enqd_len + trb_buff_len < full_len) {
3594                        field |= TRB_CHAIN;
3595                        if (trb_is_link(ring->enqueue + 1)) {
3596                                if (xhci_align_td(xhci, urb, enqd_len,
3597                                                  &trb_buff_len,
3598                                                  ring->enq_seg)) {
3599                                        send_addr = ring->enq_seg->bounce_dma;
3600                                        /* assuming TD won't span 2 segs */
3601                                        td->bounce_seg = ring->enq_seg;
3602                                }
3603                        }
3604                }
3605                if (enqd_len + trb_buff_len >= full_len) {
3606                        field &= ~TRB_CHAIN;
3607                        field |= TRB_IOC;
3608                        more_trbs_coming = false;
3609                        td->last_trb = ring->enqueue;
3610                        td->last_trb_seg = ring->enq_seg;
3611                        if (xhci_urb_suitable_for_idt(urb)) {
3612                                memcpy(&send_addr, urb->transfer_buffer,
3613                                       trb_buff_len);
3614                                le64_to_cpus(&send_addr);
3615                                field |= TRB_IDT;
3616                        }
3617                }
3618
3619                /* Only set interrupt on short packet for IN endpoints */
3620                if (usb_urb_dir_in(urb))
3621                        field |= TRB_ISP;
3622
3623                /* Set the TRB length, TD size, and interrupter fields. */
3624                remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3625                                              full_len, urb, more_trbs_coming);
3626
3627                length_field = TRB_LEN(trb_buff_len) |
3628                        TRB_TD_SIZE(remainder) |
3629                        TRB_INTR_TARGET(0);
3630
3631                queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3632                                lower_32_bits(send_addr),
3633                                upper_32_bits(send_addr),
3634                                length_field,
3635                                field);
3636                td->num_trbs++;
3637                addr += trb_buff_len;
3638                sent_len = trb_buff_len;
3639
3640                while (sg && sent_len >= block_len) {
3641                        /* New sg entry */
3642                        --num_sgs;
3643                        sent_len -= block_len;
3644                        sg = sg_next(sg);
3645                        if (num_sgs != 0 && sg) {
3646                                block_len = sg_dma_len(sg);
3647                                addr = (u64) sg_dma_address(sg);
3648                                addr += sent_len;
3649                        }
3650                }
3651                block_len -= sent_len;
3652                send_addr = addr;
3653        }
3654
3655        if (need_zero_pkt) {
3656                ret = prepare_transfer(xhci, xhci->devs[slot_id],
3657                                       ep_index, urb->stream_id,
3658                                       1, urb, 1, mem_flags);
3659                urb_priv->td[1].last_trb = ring->enqueue;
3660                urb_priv->td[1].last_trb_seg = ring->enq_seg;
3661                field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3662                queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3663                urb_priv->td[1].num_trbs++;
3664        }
3665
3666        check_trb_math(urb, enqd_len);
3667        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3668                        start_cycle, start_trb);
3669        return 0;
3670}
3671
3672/* Caller must have locked xhci->lock */
3673int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3674                struct urb *urb, int slot_id, unsigned int ep_index)
3675{
3676        struct xhci_ring *ep_ring;
3677        int num_trbs;
3678        int ret;
3679        struct usb_ctrlrequest *setup;
3680        struct xhci_generic_trb *start_trb;
3681        int start_cycle;
3682        u32 field;
3683        struct urb_priv *urb_priv;
3684        struct xhci_td *td;
3685
3686        ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3687        if (!ep_ring)
3688                return -EINVAL;
3689
3690        /*
3691         * Need to copy setup packet into setup TRB, so we can't use the setup
3692         * DMA address.
3693         */
3694        if (!urb->setup_packet)
3695                return -EINVAL;
3696
3697        /* 1 TRB for setup, 1 for status */
3698        num_trbs = 2;
3699        /*
3700         * Don't need to check if we need additional event data and normal TRBs,
3701         * since data in control transfers will never get bigger than 16MB
3702         * XXX: can we get a buffer that crosses 64KB boundaries?
3703         */
3704        if (urb->transfer_buffer_length > 0)
3705                num_trbs++;
3706        ret = prepare_transfer(xhci, xhci->devs[slot_id],
3707                        ep_index, urb->stream_id,
3708                        num_trbs, urb, 0, mem_flags);
3709        if (ret < 0)
3710                return ret;
3711
3712        urb_priv = urb->hcpriv;
3713        td = &urb_priv->td[0];
3714        td->num_trbs = num_trbs;
3715
3716        /*
3717         * Don't give the first TRB to the hardware (by toggling the cycle bit)
3718         * until we've finished creating all the other TRBs.  The ring's cycle
3719         * state may change as we enqueue the other TRBs, so save it too.
3720         */
3721        start_trb = &ep_ring->enqueue->generic;
3722        start_cycle = ep_ring->cycle_state;
3723
3724        /* Queue setup TRB - see section 6.4.1.2.1 */
3725        /* FIXME better way to translate setup_packet into two u32 fields? */
3726        setup = (struct usb_ctrlrequest *) urb->setup_packet;
3727        field = 0;
3728        field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3729        if (start_cycle == 0)
3730                field |= 0x1;
3731
3732        /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3733        if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3734                if (urb->transfer_buffer_length > 0) {
3735                        if (setup->bRequestType & USB_DIR_IN)
3736                                field |= TRB_TX_TYPE(TRB_DATA_IN);
3737                        else
3738                                field |= TRB_TX_TYPE(TRB_DATA_OUT);
3739                }
3740        }
3741
3742        queue_trb(xhci, ep_ring, true,
3743                  setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3744                  le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3745                  TRB_LEN(8) | TRB_INTR_TARGET(0),
3746                  /* Immediate data in pointer */
3747                  field);
3748
3749        /* If there's data, queue data TRBs */
3750        /* Only set interrupt on short packet for IN endpoints */
3751        if (usb_urb_dir_in(urb))
3752                field = TRB_ISP | TRB_TYPE(TRB_DATA);
3753        else
3754                field = TRB_TYPE(TRB_DATA);
3755
3756        if (urb->transfer_buffer_length > 0) {
3757                u32 length_field, remainder;
3758                u64 addr;
3759
3760                if (xhci_urb_suitable_for_idt(urb)) {
3761                        memcpy(&addr, urb->transfer_buffer,
3762                               urb->transfer_buffer_length);
3763                        le64_to_cpus(&addr);
3764                        field |= TRB_IDT;
3765                } else {
3766                        addr = (u64) urb->transfer_dma;
3767                }
3768
3769                remainder = xhci_td_remainder(xhci, 0,
3770                                urb->transfer_buffer_length,
3771                                urb->transfer_buffer_length,
3772                                urb, 1);
3773                length_field = TRB_LEN(urb->transfer_buffer_length) |
3774                                TRB_TD_SIZE(remainder) |
3775                                TRB_INTR_TARGET(0);
3776                if (setup->bRequestType & USB_DIR_IN)
3777                        field |= TRB_DIR_IN;
3778                queue_trb(xhci, ep_ring, true,
3779                                lower_32_bits(addr),
3780                                upper_32_bits(addr),
3781                                length_field,
3782                                field | ep_ring->cycle_state);
3783        }
3784
3785        /* Save the DMA address of the last TRB in the TD */
3786        td->last_trb = ep_ring->enqueue;
3787        td->last_trb_seg = ep_ring->enq_seg;
3788
3789        /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3790        /* If the device sent data, the status stage is an OUT transfer */
3791        if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3792                field = 0;
3793        else
3794                field = TRB_DIR_IN;
3795        queue_trb(xhci, ep_ring, false,
3796                        0,
3797                        0,
3798                        TRB_INTR_TARGET(0),
3799                        /* Event on completion */
3800                        field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3801
3802        giveback_first_trb(xhci, slot_id, ep_index, 0,
3803                        start_cycle, start_trb);
3804        return 0;
3805}
3806
3807/*
3808 * The transfer burst count field of the isochronous TRB defines the number of
3809 * bursts that are required to move all packets in this TD.  Only SuperSpeed
3810 * devices can burst up to bMaxBurst number of packets per service interval.
3811 * This field is zero based, meaning a value of zero in the field means one
3812 * burst.  Basically, for everything but SuperSpeed devices, this field will be
3813 * zero.  Only xHCI 1.0 host controllers support this field.
3814 */
3815static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3816                struct urb *urb, unsigned int total_packet_count)
3817{
3818        unsigned int max_burst;
3819
3820        if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3821                return 0;
3822
3823        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3824        return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3825}
3826
3827/*
3828 * Returns the number of packets in the last "burst" of packets.  This field is
3829 * valid for all speeds of devices.  USB 2.0 devices can only do one "burst", so
3830 * the last burst packet count is equal to the total number of packets in the
3831 * TD.  SuperSpeed endpoints can have up to 3 bursts.  All but the last burst
3832 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3833 * contain 1 to (bMaxBurst + 1) packets.
3834 */
3835static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3836                struct urb *urb, unsigned int total_packet_count)
3837{
3838        unsigned int max_burst;
3839        unsigned int residue;
3840
3841        if (xhci->hci_version < 0x100)
3842                return 0;
3843
3844        if (urb->dev->speed >= USB_SPEED_SUPER) {
3845                /* bMaxBurst is zero based: 0 means 1 packet per burst */
3846                max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3847                residue = total_packet_count % (max_burst + 1);
3848                /* If residue is zero, the last burst contains (max_burst + 1)
3849                 * number of packets, but the TLBPC field is zero-based.
3850                 */
3851                if (residue == 0)
3852                        return max_burst;
3853                return residue - 1;
3854        }
3855        if (total_packet_count == 0)
3856                return 0;
3857        return total_packet_count - 1;
3858}
3859
3860/*
3861 * Calculates Frame ID field of the isochronous TRB identifies the
3862 * target frame that the Interval associated with this Isochronous
3863 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3864 *
3865 * Returns actual frame id on success, negative value on error.
3866 */
3867static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3868                struct urb *urb, int index)
3869{
3870        int start_frame, ist, ret = 0;
3871        int start_frame_id, end_frame_id, current_frame_id;
3872
3873        if (urb->dev->speed == USB_SPEED_LOW ||
3874                        urb->dev->speed == USB_SPEED_FULL)
3875                start_frame = urb->start_frame + index * urb->interval;
3876        else
3877                start_frame = (urb->start_frame + index * urb->interval) >> 3;
3878
3879        /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3880         *
3881         * If bit [3] of IST is cleared to '0', software can add a TRB no
3882         * later than IST[2:0] Microframes before that TRB is scheduled to
3883         * be executed.
3884         * If bit [3] of IST is set to '1', software can add a TRB no later
3885         * than IST[2:0] Frames before that TRB is scheduled to be executed.
3886         */
3887        ist = HCS_IST(xhci->hcs_params2) & 0x7;
3888        if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3889                ist <<= 3;
3890
3891        /* Software shall not schedule an Isoch TD with a Frame ID value that
3892         * is less than the Start Frame ID or greater than the End Frame ID,
3893         * where:
3894         *
3895         * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3896         * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3897         *
3898         * Both the End Frame ID and Start Frame ID values are calculated
3899         * in microframes. When software determines the valid Frame ID value;
3900         * The End Frame ID value should be rounded down to the nearest Frame
3901         * boundary, and the Start Frame ID value should be rounded up to the
3902         * nearest Frame boundary.
3903         */
3904        current_frame_id = readl(&xhci->run_regs->microframe_index);
3905        start_frame_id = roundup(current_frame_id + ist + 1, 8);
3906        end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3907
3908        start_frame &= 0x7ff;
3909        start_frame_id = (start_frame_id >> 3) & 0x7ff;
3910        end_frame_id = (end_frame_id >> 3) & 0x7ff;
3911
3912        xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3913                 __func__, index, readl(&xhci->run_regs->microframe_index),
3914                 start_frame_id, end_frame_id, start_frame);
3915
3916        if (start_frame_id < end_frame_id) {
3917                if (start_frame > end_frame_id ||
3918                                start_frame < start_frame_id)
3919                        ret = -EINVAL;
3920        } else if (start_frame_id > end_frame_id) {
3921                if ((start_frame > end_frame_id &&
3922                                start_frame < start_frame_id))
3923                        ret = -EINVAL;
3924        } else {
3925                        ret = -EINVAL;
3926        }
3927
3928        if (index == 0) {
3929                if (ret == -EINVAL || start_frame == start_frame_id) {
3930                        start_frame = start_frame_id + 1;
3931                        if (urb->dev->speed == USB_SPEED_LOW ||
3932                                        urb->dev->speed == USB_SPEED_FULL)
3933                                urb->start_frame = start_frame;
3934                        else
3935                                urb->start_frame = start_frame << 3;
3936                        ret = 0;
3937                }
3938        }
3939
3940        if (ret) {
3941                xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3942                                start_frame, current_frame_id, index,
3943                                start_frame_id, end_frame_id);
3944                xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3945                return ret;
3946        }
3947
3948        return start_frame;
3949}
3950
3951/* Check if we should generate event interrupt for a TD in an isoc URB */
3952static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
3953{
3954        if (xhci->hci_version < 0x100)
3955                return false;
3956        /* always generate an event interrupt for the last TD */
3957        if (i == num_tds - 1)
3958                return false;
3959        /*
3960         * If AVOID_BEI is set the host handles full event rings poorly,
3961         * generate an event at least every 8th TD to clear the event ring
3962         */
3963        if (i && xhci->quirks & XHCI_AVOID_BEI)
3964                return !!(i % xhci->isoc_bei_interval);
3965
3966        return true;
3967}
3968
3969/* This is for isoc transfer */
3970static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3971                struct urb *urb, int slot_id, unsigned int ep_index)
3972{
3973        struct xhci_ring *ep_ring;
3974        struct urb_priv *urb_priv;
3975        struct xhci_td *td;
3976        int num_tds, trbs_per_td;
3977        struct xhci_generic_trb *start_trb;
3978        bool first_trb;
3979        int start_cycle;
3980        u32 field, length_field;
3981        int running_total, trb_buff_len, td_len, td_remain_len, ret;
3982        u64 start_addr, addr;
3983        int i, j;
3984        bool more_trbs_coming;
3985        struct xhci_virt_ep *xep;
3986        int frame_id;
3987
3988        xep = &xhci->devs[slot_id]->eps[ep_index];
3989        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
3990
3991        num_tds = urb->number_of_packets;
3992        if (num_tds < 1) {
3993                xhci_dbg(xhci, "Isoc URB with zero packets?\n");
3994                return -EINVAL;
3995        }
3996        start_addr = (u64) urb->transfer_dma;
3997        start_trb = &ep_ring->enqueue->generic;
3998        start_cycle = ep_ring->cycle_state;
3999
4000        urb_priv = urb->hcpriv;
4001        /* Queue the TRBs for each TD, even if they are zero-length */
4002        for (i = 0; i < num_tds; i++) {
4003                unsigned int total_pkt_count, max_pkt;
4004                unsigned int burst_count, last_burst_pkt_count;
4005                u32 sia_frame_id;
4006
4007                first_trb = true;
4008                running_total = 0;
4009                addr = start_addr + urb->iso_frame_desc[i].offset;
4010                td_len = urb->iso_frame_desc[i].length;
4011                td_remain_len = td_len;
4012                max_pkt = usb_endpoint_maxp(&urb->ep->desc);
4013                total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
4014
4015                /* A zero-length transfer still involves at least one packet. */
4016                if (total_pkt_count == 0)
4017                        total_pkt_count++;
4018                burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
4019                last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
4020                                                        urb, total_pkt_count);
4021
4022                trbs_per_td = count_isoc_trbs_needed(urb, i);
4023
4024                ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
4025                                urb->stream_id, trbs_per_td, urb, i, mem_flags);
4026                if (ret < 0) {
4027                        if (i == 0)
4028                                return ret;
4029                        goto cleanup;
4030                }
4031                td = &urb_priv->td[i];
4032                td->num_trbs = trbs_per_td;
4033                /* use SIA as default, if frame id is used overwrite it */
4034                sia_frame_id = TRB_SIA;
4035                if (!(urb->transfer_flags & URB_ISO_ASAP) &&
4036                    HCC_CFC(xhci->hcc_params)) {
4037                        frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
4038                        if (frame_id >= 0)
4039                                sia_frame_id = TRB_FRAME_ID(frame_id);
4040                }
4041                /*
4042                 * Set isoc specific data for the first TRB in a TD.
4043                 * Prevent HW from getting the TRBs by keeping the cycle state
4044                 * inverted in the first TDs isoc TRB.
4045                 */
4046                field = TRB_TYPE(TRB_ISOC) |
4047                        TRB_TLBPC(last_burst_pkt_count) |
4048                        sia_frame_id |
4049                        (i ? ep_ring->cycle_state : !start_cycle);
4050
4051                /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
4052                if (!xep->use_extended_tbc)
4053                        field |= TRB_TBC(burst_count);
4054
4055