linux/drivers/usb/gadget/udc/gr_udc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
   4 *
   5 * 2013 (c) Aeroflex Gaisler AB
   6 *
   7 * This driver supports GRUSBDC USB Device Controller cores available in the
   8 * GRLIB VHDL IP core library.
   9 *
  10 * Full documentation of the GRUSBDC core can be found here:
  11 * https://www.gaisler.com/products/grlib/grip.pdf
  12 *
  13 * Contributors:
  14 * - Andreas Larsson <andreas@gaisler.com>
  15 * - Marko Isomaki
  16 */
  17
  18/*
  19 * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  20 * individually configurable to any of the four USB transfer types. This driver
  21 * only supports cores in DMA mode.
  22 */
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/spinlock.h>
  28#include <linux/errno.h>
  29#include <linux/list.h>
  30#include <linux/interrupt.h>
  31#include <linux/device.h>
  32#include <linux/usb.h>
  33#include <linux/usb/ch9.h>
  34#include <linux/usb/gadget.h>
  35#include <linux/dma-mapping.h>
  36#include <linux/dmapool.h>
  37#include <linux/debugfs.h>
  38#include <linux/seq_file.h>
  39#include <linux/of_platform.h>
  40#include <linux/of_irq.h>
  41#include <linux/of_address.h>
  42
  43#include <asm/byteorder.h>
  44
  45#include "gr_udc.h"
  46
  47#define DRIVER_NAME     "gr_udc"
  48#define DRIVER_DESC     "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  49
  50static const char driver_name[] = DRIVER_NAME;
  51
  52#define gr_read32(x) (ioread32be((x)))
  53#define gr_write32(x, v) (iowrite32be((v), (x)))
  54
  55/* USB speed and corresponding string calculated from status register value */
  56#define GR_SPEED(status) \
  57        ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  58#define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  59
  60/* Size of hardware buffer calculated from epctrl register value */
  61#define GR_BUFFER_SIZE(epctrl)                                        \
  62        ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  63         GR_EPCTRL_BUFSZ_SCALER)
  64
  65/* ---------------------------------------------------------------------- */
  66/* Debug printout functionality */
  67
  68static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  69
  70static const char *gr_ep0state_string(enum gr_ep0state state)
  71{
  72        static const char *const names[] = {
  73                [GR_EP0_DISCONNECT] = "disconnect",
  74                [GR_EP0_SETUP] = "setup",
  75                [GR_EP0_IDATA] = "idata",
  76                [GR_EP0_ODATA] = "odata",
  77                [GR_EP0_ISTATUS] = "istatus",
  78                [GR_EP0_OSTATUS] = "ostatus",
  79                [GR_EP0_STALL] = "stall",
  80                [GR_EP0_SUSPEND] = "suspend",
  81        };
  82
  83        if (state < 0 || state >= ARRAY_SIZE(names))
  84                return "UNKNOWN";
  85
  86        return names[state];
  87}
  88
  89#ifdef VERBOSE_DEBUG
  90
  91static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  92                                struct gr_request *req)
  93{
  94        int buflen = ep->is_in ? req->req.length : req->req.actual;
  95        int rowlen = 32;
  96        int plen = min(rowlen, buflen);
  97
  98        dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  99                (buflen > plen ? " (truncated)" : ""));
 100        print_hex_dump_debug("   ", DUMP_PREFIX_NONE,
 101                             rowlen, 4, req->req.buf, plen, false);
 102}
 103
 104static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
 105                               u16 value, u16 index, u16 length)
 106{
 107        dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
 108                 type, request, value, index, length);
 109}
 110#else /* !VERBOSE_DEBUG */
 111
 112static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
 113                                struct gr_request *req) {}
 114
 115static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
 116                               u16 value, u16 index, u16 length) {}
 117
 118#endif /* VERBOSE_DEBUG */
 119
 120/* ---------------------------------------------------------------------- */
 121/* Debugfs functionality */
 122
 123#ifdef CONFIG_USB_GADGET_DEBUG_FS
 124
 125static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
 126{
 127        u32 epctrl = gr_read32(&ep->regs->epctrl);
 128        u32 epstat = gr_read32(&ep->regs->epstat);
 129        int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
 130        struct gr_request *req;
 131
 132        seq_printf(seq, "%s:\n", ep->ep.name);
 133        seq_printf(seq, "  mode = %s\n", gr_modestring[mode]);
 134        seq_printf(seq, "  halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
 135        seq_printf(seq, "  disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
 136        seq_printf(seq, "  valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
 137        seq_printf(seq, "  dma_start = %d\n", ep->dma_start);
 138        seq_printf(seq, "  stopped = %d\n", ep->stopped);
 139        seq_printf(seq, "  wedged = %d\n", ep->wedged);
 140        seq_printf(seq, "  callback = %d\n", ep->callback);
 141        seq_printf(seq, "  maxpacket = %d\n", ep->ep.maxpacket);
 142        seq_printf(seq, "  maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
 143        seq_printf(seq, "  bytes_per_buffer = %d\n", ep->bytes_per_buffer);
 144        if (mode == 1 || mode == 3)
 145                seq_printf(seq, "  nt = %d\n",
 146                           (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
 147
 148        seq_printf(seq, "  Buffer 0: %s %s%d\n",
 149                   epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
 150                   epstat & GR_EPSTAT_BS ? " " : "selected ",
 151                   (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
 152        seq_printf(seq, "  Buffer 1: %s %s%d\n",
 153                   epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
 154                   epstat & GR_EPSTAT_BS ? "selected " : " ",
 155                   (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
 156
 157        if (list_empty(&ep->queue)) {
 158                seq_puts(seq, "  Queue: empty\n\n");
 159                return;
 160        }
 161
 162        seq_puts(seq, "  Queue:\n");
 163        list_for_each_entry(req, &ep->queue, queue) {
 164                struct gr_dma_desc *desc;
 165                struct gr_dma_desc *next;
 166
 167                seq_printf(seq, "    0x%p: 0x%p %d %d\n", req,
 168                           &req->req.buf, req->req.actual, req->req.length);
 169
 170                next = req->first_desc;
 171                do {
 172                        desc = next;
 173                        next = desc->next_desc;
 174                        seq_printf(seq, "    %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
 175                                   desc == req->curr_desc ? 'c' : ' ',
 176                                   desc, desc->paddr, desc->ctrl, desc->data);
 177                } while (desc != req->last_desc);
 178        }
 179        seq_puts(seq, "\n");
 180}
 181
 182static int gr_dfs_show(struct seq_file *seq, void *v)
 183{
 184        struct gr_udc *dev = seq->private;
 185        u32 control = gr_read32(&dev->regs->control);
 186        u32 status = gr_read32(&dev->regs->status);
 187        struct gr_ep *ep;
 188
 189        seq_printf(seq, "usb state = %s\n",
 190                   usb_state_string(dev->gadget.state));
 191        seq_printf(seq, "address = %d\n",
 192                   (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
 193        seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
 194        seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
 195        seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
 196        seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
 197        seq_printf(seq, "test_mode = %d\n", dev->test_mode);
 198        seq_puts(seq, "\n");
 199
 200        list_for_each_entry(ep, &dev->ep_list, ep_list)
 201                gr_seq_ep_show(seq, ep);
 202
 203        return 0;
 204}
 205DEFINE_SHOW_ATTRIBUTE(gr_dfs);
 206
 207static void gr_dfs_create(struct gr_udc *dev)
 208{
 209        const char *name = "gr_udc_state";
 210        struct dentry *root;
 211
 212        root = debugfs_create_dir(dev_name(dev->dev), usb_debug_root);
 213        debugfs_create_file(name, 0444, root, dev, &gr_dfs_fops);
 214}
 215
 216static void gr_dfs_delete(struct gr_udc *dev)
 217{
 218        debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root));
 219}
 220
 221#else /* !CONFIG_USB_GADGET_DEBUG_FS */
 222
 223static void gr_dfs_create(struct gr_udc *dev) {}
 224static void gr_dfs_delete(struct gr_udc *dev) {}
 225
 226#endif /* CONFIG_USB_GADGET_DEBUG_FS */
 227
 228/* ---------------------------------------------------------------------- */
 229/* DMA and request handling */
 230
 231/* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
 232static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
 233{
 234        dma_addr_t paddr;
 235        struct gr_dma_desc *dma_desc;
 236
 237        dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
 238        if (!dma_desc) {
 239                dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
 240                return NULL;
 241        }
 242
 243        dma_desc->paddr = paddr;
 244
 245        return dma_desc;
 246}
 247
 248static inline void gr_free_dma_desc(struct gr_udc *dev,
 249                                    struct gr_dma_desc *desc)
 250{
 251        dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
 252}
 253
 254/* Frees the chain of struct gr_dma_desc for the given request */
 255static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
 256{
 257        struct gr_dma_desc *desc;
 258        struct gr_dma_desc *next;
 259
 260        next = req->first_desc;
 261        if (!next)
 262                return;
 263
 264        do {
 265                desc = next;
 266                next = desc->next_desc;
 267                gr_free_dma_desc(dev, desc);
 268        } while (desc != req->last_desc);
 269
 270        req->first_desc = NULL;
 271        req->curr_desc = NULL;
 272        req->last_desc = NULL;
 273}
 274
 275static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
 276
 277/*
 278 * Frees allocated resources and calls the appropriate completion function/setup
 279 * package handler for a finished request.
 280 *
 281 * Must be called with dev->lock held and irqs disabled.
 282 */
 283static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
 284                              int status)
 285        __releases(&dev->lock)
 286        __acquires(&dev->lock)
 287{
 288        struct gr_udc *dev;
 289
 290        list_del_init(&req->queue);
 291
 292        if (likely(req->req.status == -EINPROGRESS))
 293                req->req.status = status;
 294        else
 295                status = req->req.status;
 296
 297        dev = ep->dev;
 298        usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
 299        gr_free_dma_desc_chain(dev, req);
 300
 301        if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
 302                req->req.actual = req->req.length;
 303        } else if (req->oddlen && req->req.actual > req->evenlen) {
 304                /*
 305                 * Copy to user buffer in this case where length was not evenly
 306                 * divisible by ep->ep.maxpacket and the last descriptor was
 307                 * actually used.
 308                 */
 309                char *buftail = ((char *)req->req.buf + req->evenlen);
 310
 311                memcpy(buftail, ep->tailbuf, req->oddlen);
 312
 313                if (req->req.actual > req->req.length) {
 314                        /* We got more data than was requested */
 315                        dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
 316                                ep->ep.name);
 317                        gr_dbgprint_request("OVFL", ep, req);
 318                        req->req.status = -EOVERFLOW;
 319                }
 320        }
 321
 322        if (!status) {
 323                if (ep->is_in)
 324                        gr_dbgprint_request("SENT", ep, req);
 325                else
 326                        gr_dbgprint_request("RECV", ep, req);
 327        }
 328
 329        /* Prevent changes to ep->queue during callback */
 330        ep->callback = 1;
 331        if (req == dev->ep0reqo && !status) {
 332                if (req->setup)
 333                        gr_ep0_setup(dev, req);
 334                else
 335                        dev_err(dev->dev,
 336                                "Unexpected non setup packet on ep0in\n");
 337        } else if (req->req.complete) {
 338                spin_unlock(&dev->lock);
 339
 340                usb_gadget_giveback_request(&ep->ep, &req->req);
 341
 342                spin_lock(&dev->lock);
 343        }
 344        ep->callback = 0;
 345}
 346
 347static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 348{
 349        struct gr_request *req;
 350
 351        req = kzalloc(sizeof(*req), gfp_flags);
 352        if (!req)
 353                return NULL;
 354
 355        INIT_LIST_HEAD(&req->queue);
 356
 357        return &req->req;
 358}
 359
 360/*
 361 * Starts DMA for endpoint ep if there are requests in the queue.
 362 *
 363 * Must be called with dev->lock held and with !ep->stopped.
 364 */
 365static void gr_start_dma(struct gr_ep *ep)
 366{
 367        struct gr_request *req;
 368        u32 dmactrl;
 369
 370        if (list_empty(&ep->queue)) {
 371                ep->dma_start = 0;
 372                return;
 373        }
 374
 375        req = list_first_entry(&ep->queue, struct gr_request, queue);
 376
 377        /* A descriptor should already have been allocated */
 378        BUG_ON(!req->curr_desc);
 379
 380        /*
 381         * The DMA controller can not handle smaller OUT buffers than
 382         * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
 383         * long packet are received. Therefore an internal bounce buffer gets
 384         * used when such a request gets enabled.
 385         */
 386        if (!ep->is_in && req->oddlen)
 387                req->last_desc->data = ep->tailbuf_paddr;
 388
 389        wmb(); /* Make sure all is settled before handing it over to DMA */
 390
 391        /* Set the descriptor pointer in the hardware */
 392        gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
 393
 394        /* Announce available descriptors */
 395        dmactrl = gr_read32(&ep->regs->dmactrl);
 396        gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
 397
 398        ep->dma_start = 1;
 399}
 400
 401/*
 402 * Finishes the first request in the ep's queue and, if available, starts the
 403 * next request in queue.
 404 *
 405 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
 406 */
 407static void gr_dma_advance(struct gr_ep *ep, int status)
 408{
 409        struct gr_request *req;
 410
 411        req = list_first_entry(&ep->queue, struct gr_request, queue);
 412        gr_finish_request(ep, req, status);
 413        gr_start_dma(ep); /* Regardless of ep->dma_start */
 414}
 415
 416/*
 417 * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
 418 * transfer to be canceled and clears GR_DMACTRL_DA.
 419 *
 420 * Must be called with dev->lock held.
 421 */
 422static void gr_abort_dma(struct gr_ep *ep)
 423{
 424        u32 dmactrl;
 425
 426        dmactrl = gr_read32(&ep->regs->dmactrl);
 427        gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
 428}
 429
 430/*
 431 * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
 432 * chain.
 433 *
 434 * Size is not used for OUT endpoints. Hardware can not be instructed to handle
 435 * smaller buffer than MAXPL in the OUT direction.
 436 */
 437static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
 438                           dma_addr_t data, unsigned size, gfp_t gfp_flags)
 439{
 440        struct gr_dma_desc *desc;
 441
 442        desc = gr_alloc_dma_desc(ep, gfp_flags);
 443        if (!desc)
 444                return -ENOMEM;
 445
 446        desc->data = data;
 447        if (ep->is_in)
 448                desc->ctrl =
 449                        (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
 450        else
 451                desc->ctrl = GR_DESC_OUT_CTRL_IE;
 452
 453        if (!req->first_desc) {
 454                req->first_desc = desc;
 455                req->curr_desc = desc;
 456        } else {
 457                req->last_desc->next_desc = desc;
 458                req->last_desc->next = desc->paddr;
 459                req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
 460        }
 461        req->last_desc = desc;
 462
 463        return 0;
 464}
 465
 466/*
 467 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
 468 * together covers req->req.length bytes of the buffer at DMA address
 469 * req->req.dma for the OUT direction.
 470 *
 471 * The first descriptor in the chain is enabled, the rest disabled. The
 472 * interrupt handler will later enable them one by one when needed so we can
 473 * find out when the transfer is finished. For OUT endpoints, all descriptors
 474 * therefore generate interrutps.
 475 */
 476static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
 477                                  gfp_t gfp_flags)
 478{
 479        u16 bytes_left; /* Bytes left to provide descriptors for */
 480        u16 bytes_used; /* Bytes accommodated for */
 481        int ret = 0;
 482
 483        req->first_desc = NULL; /* Signals that no allocation is done yet */
 484        bytes_left = req->req.length;
 485        bytes_used = 0;
 486        while (bytes_left > 0) {
 487                dma_addr_t start = req->req.dma + bytes_used;
 488                u16 size = min(bytes_left, ep->bytes_per_buffer);
 489
 490                if (size < ep->bytes_per_buffer) {
 491                        /* Prepare using bounce buffer */
 492                        req->evenlen = req->req.length - bytes_left;
 493                        req->oddlen = size;
 494                }
 495
 496                ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
 497                if (ret)
 498                        goto alloc_err;
 499
 500                bytes_left -= size;
 501                bytes_used += size;
 502        }
 503
 504        req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
 505
 506        return 0;
 507
 508alloc_err:
 509        gr_free_dma_desc_chain(ep->dev, req);
 510
 511        return ret;
 512}
 513
 514/*
 515 * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
 516 * together covers req->req.length bytes of the buffer at DMA address
 517 * req->req.dma for the IN direction.
 518 *
 519 * When more data is provided than the maximum payload size, the hardware splits
 520 * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
 521 * is always set to a multiple of the maximum payload (restricted to the valid
 522 * number of maximum payloads during high bandwidth isochronous or interrupt
 523 * transfers)
 524 *
 525 * All descriptors are enabled from the beginning and we only generate an
 526 * interrupt for the last one indicating that the entire request has been pushed
 527 * to hardware.
 528 */
 529static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
 530                                 gfp_t gfp_flags)
 531{
 532        u16 bytes_left; /* Bytes left in req to provide descriptors for */
 533        u16 bytes_used; /* Bytes in req accommodated for */
 534        int ret = 0;
 535
 536        req->first_desc = NULL; /* Signals that no allocation is done yet */
 537        bytes_left = req->req.length;
 538        bytes_used = 0;
 539        do { /* Allow for zero length packets */
 540                dma_addr_t start = req->req.dma + bytes_used;
 541                u16 size = min(bytes_left, ep->bytes_per_buffer);
 542
 543                ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
 544                if (ret)
 545                        goto alloc_err;
 546
 547                bytes_left -= size;
 548                bytes_used += size;
 549        } while (bytes_left > 0);
 550
 551        /*
 552         * Send an extra zero length packet to indicate that no more data is
 553         * available when req->req.zero is set and the data length is even
 554         * multiples of ep->ep.maxpacket.
 555         */
 556        if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
 557                ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
 558                if (ret)
 559                        goto alloc_err;
 560        }
 561
 562        /*
 563         * For IN packets we only want to know when the last packet has been
 564         * transmitted (not just put into internal buffers).
 565         */
 566        req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
 567
 568        return 0;
 569
 570alloc_err:
 571        gr_free_dma_desc_chain(ep->dev, req);
 572
 573        return ret;
 574}
 575
 576/* Must be called with dev->lock held */
 577static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
 578{
 579        struct gr_udc *dev = ep->dev;
 580        int ret;
 581
 582        if (unlikely(!ep->ep.desc && ep->num != 0)) {
 583                dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
 584                return -EINVAL;
 585        }
 586
 587        if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
 588                dev_err(dev->dev,
 589                        "Invalid request for %s: buf=%p list_empty=%d\n",
 590                        ep->ep.name, req->req.buf, list_empty(&req->queue));
 591                return -EINVAL;
 592        }
 593
 594        if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
 595                dev_err(dev->dev, "-ESHUTDOWN");
 596                return -ESHUTDOWN;
 597        }
 598
 599        /* Can't touch registers when suspended */
 600        if (dev->ep0state == GR_EP0_SUSPEND) {
 601                dev_err(dev->dev, "-EBUSY");
 602                return -EBUSY;
 603        }
 604
 605        /* Set up DMA mapping in case the caller didn't */
 606        ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
 607        if (ret) {
 608                dev_err(dev->dev, "usb_gadget_map_request");
 609                return ret;
 610        }
 611
 612        if (ep->is_in)
 613                ret = gr_setup_in_desc_list(ep, req, gfp_flags);
 614        else
 615                ret = gr_setup_out_desc_list(ep, req, gfp_flags);
 616        if (ret)
 617                return ret;
 618
 619        req->req.status = -EINPROGRESS;
 620        req->req.actual = 0;
 621        list_add_tail(&req->queue, &ep->queue);
 622
 623        /* Start DMA if not started, otherwise interrupt handler handles it */
 624        if (!ep->dma_start && likely(!ep->stopped))
 625                gr_start_dma(ep);
 626
 627        return 0;
 628}
 629
 630/*
 631 * Queue a request from within the driver.
 632 *
 633 * Must be called with dev->lock held.
 634 */
 635static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
 636                               gfp_t gfp_flags)
 637{
 638        if (ep->is_in)
 639                gr_dbgprint_request("RESP", ep, req);
 640
 641        return gr_queue(ep, req, gfp_flags);
 642}
 643
 644/* ---------------------------------------------------------------------- */
 645/* General helper functions */
 646
 647/*
 648 * Dequeue ALL requests.
 649 *
 650 * Must be called with dev->lock held and irqs disabled.
 651 */
 652static void gr_ep_nuke(struct gr_ep *ep)
 653{
 654        struct gr_request *req;
 655
 656        ep->stopped = 1;
 657        ep->dma_start = 0;
 658        gr_abort_dma(ep);
 659
 660        while (!list_empty(&ep->queue)) {
 661                req = list_first_entry(&ep->queue, struct gr_request, queue);
 662                gr_finish_request(ep, req, -ESHUTDOWN);
 663        }
 664}
 665
 666/*
 667 * Reset the hardware state of this endpoint.
 668 *
 669 * Must be called with dev->lock held.
 670 */
 671static void gr_ep_reset(struct gr_ep *ep)
 672{
 673        gr_write32(&ep->regs->epctrl, 0);
 674        gr_write32(&ep->regs->dmactrl, 0);
 675
 676        ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
 677        ep->ep.desc = NULL;
 678        ep->stopped = 1;
 679        ep->dma_start = 0;
 680}
 681
 682/*
 683 * Generate STALL on ep0in/out.
 684 *
 685 * Must be called with dev->lock held.
 686 */
 687static void gr_control_stall(struct gr_udc *dev)
 688{
 689        u32 epctrl;
 690
 691        epctrl = gr_read32(&dev->epo[0].regs->epctrl);
 692        gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
 693        epctrl = gr_read32(&dev->epi[0].regs->epctrl);
 694        gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
 695
 696        dev->ep0state = GR_EP0_STALL;
 697}
 698
 699/*
 700 * Halts, halts and wedges, or clears halt for an endpoint.
 701 *
 702 * Must be called with dev->lock held.
 703 */
 704static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
 705{
 706        u32 epctrl;
 707        int retval = 0;
 708
 709        if (ep->num && !ep->ep.desc)
 710                return -EINVAL;
 711
 712        if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
 713                return -EOPNOTSUPP;
 714
 715        /* Never actually halt ep0, and therefore never clear halt for ep0 */
 716        if (!ep->num) {
 717                if (halt && !fromhost) {
 718                        /* ep0 halt from gadget - generate protocol stall */
 719                        gr_control_stall(ep->dev);
 720                        dev_dbg(ep->dev->dev, "EP: stall ep0\n");
 721                        return 0;
 722                }
 723                return -EINVAL;
 724        }
 725
 726        dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
 727                (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
 728
 729        epctrl = gr_read32(&ep->regs->epctrl);
 730        if (halt) {
 731                /* Set HALT */
 732                gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
 733                ep->stopped = 1;
 734                if (wedge)
 735                        ep->wedged = 1;
 736        } else {
 737                gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
 738                ep->stopped = 0;
 739                ep->wedged = 0;
 740
 741                /* Things might have been queued up in the meantime */
 742                if (!ep->dma_start)
 743                        gr_start_dma(ep);
 744        }
 745
 746        return retval;
 747}
 748
 749/* Must be called with dev->lock held */
 750static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
 751{
 752        if (dev->ep0state != value)
 753                dev_vdbg(dev->dev, "STATE:  ep0state=%s\n",
 754                         gr_ep0state_string(value));
 755        dev->ep0state = value;
 756}
 757
 758/*
 759 * Should only be called when endpoints can not generate interrupts.
 760 *
 761 * Must be called with dev->lock held.
 762 */
 763static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
 764{
 765        gr_write32(&dev->regs->control, 0);
 766        wmb(); /* Make sure that we do not deny one of our interrupts */
 767        dev->irq_enabled = 0;
 768}
 769
 770/*
 771 * Stop all device activity and disable data line pullup.
 772 *
 773 * Must be called with dev->lock held and irqs disabled.
 774 */
 775static void gr_stop_activity(struct gr_udc *dev)
 776{
 777        struct gr_ep *ep;
 778
 779        list_for_each_entry(ep, &dev->ep_list, ep_list)
 780                gr_ep_nuke(ep);
 781
 782        gr_disable_interrupts_and_pullup(dev);
 783
 784        gr_set_ep0state(dev, GR_EP0_DISCONNECT);
 785        usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
 786}
 787
 788/* ---------------------------------------------------------------------- */
 789/* ep0 setup packet handling */
 790
 791static void gr_ep0_testmode_complete(struct usb_ep *_ep,
 792                                     struct usb_request *_req)
 793{
 794        struct gr_ep *ep;
 795        struct gr_udc *dev;
 796        u32 control;
 797
 798        ep = container_of(_ep, struct gr_ep, ep);
 799        dev = ep->dev;
 800
 801        spin_lock(&dev->lock);
 802
 803        control = gr_read32(&dev->regs->control);
 804        control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
 805        gr_write32(&dev->regs->control, control);
 806
 807        spin_unlock(&dev->lock);
 808}
 809
 810static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
 811{
 812        /* Nothing needs to be done here */
 813}
 814
 815/*
 816 * Queue a response on ep0in.
 817 *
 818 * Must be called with dev->lock held.
 819 */
 820static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
 821                          void (*complete)(struct usb_ep *ep,
 822                                           struct usb_request *req))
 823{
 824        u8 *reqbuf = dev->ep0reqi->req.buf;
 825        int status;
 826        int i;
 827
 828        for (i = 0; i < length; i++)
 829                reqbuf[i] = buf[i];
 830        dev->ep0reqi->req.length = length;
 831        dev->ep0reqi->req.complete = complete;
 832
 833        status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
 834        if (status < 0)
 835                dev_err(dev->dev,
 836                        "Could not queue ep0in setup response: %d\n", status);
 837
 838        return status;
 839}
 840
 841/*
 842 * Queue a 2 byte response on ep0in.
 843 *
 844 * Must be called with dev->lock held.
 845 */
 846static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
 847{
 848        __le16 le_response = cpu_to_le16(response);
 849
 850        return gr_ep0_respond(dev, (u8 *)&le_response, 2,
 851                              gr_ep0_dummy_complete);
 852}
 853
 854/*
 855 * Queue a ZLP response on ep0in.
 856 *
 857 * Must be called with dev->lock held.
 858 */
 859static inline int gr_ep0_respond_empty(struct gr_udc *dev)
 860{
 861        return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
 862}
 863
 864/*
 865 * This is run when a SET_ADDRESS request is received. First writes
 866 * the new address to the control register which is updated internally
 867 * when the next IN packet is ACKED.
 868 *
 869 * Must be called with dev->lock held.
 870 */
 871static void gr_set_address(struct gr_udc *dev, u8 address)
 872{
 873        u32 control;
 874
 875        control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
 876        control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
 877        control |= GR_CONTROL_SU;
 878        gr_write32(&dev->regs->control, control);
 879}
 880
 881/*
 882 * Returns negative for STALL, 0 for successful handling and positive for
 883 * delegation.
 884 *
 885 * Must be called with dev->lock held.
 886 */
 887static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
 888                             u16 value, u16 index)
 889{
 890        u16 response;
 891        u8 test;
 892
 893        switch (request) {
 894        case USB_REQ_SET_ADDRESS:
 895                dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
 896                gr_set_address(dev, value & 0xff);
 897                if (value)
 898                        usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
 899                else
 900                        usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
 901                return gr_ep0_respond_empty(dev);
 902
 903        case USB_REQ_GET_STATUS:
 904                /* Self powered | remote wakeup */
 905                response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
 906                return gr_ep0_respond_u16(dev, response);
 907
 908        case USB_REQ_SET_FEATURE:
 909                switch (value) {
 910                case USB_DEVICE_REMOTE_WAKEUP:
 911                        /* Allow remote wakeup */
 912                        dev->remote_wakeup = 1;
 913                        return gr_ep0_respond_empty(dev);
 914
 915                case USB_DEVICE_TEST_MODE:
 916                        /* The hardware does not support USB_TEST_FORCE_ENABLE */
 917                        test = index >> 8;
 918                        if (test >= USB_TEST_J && test <= USB_TEST_PACKET) {
 919                                dev->test_mode = test;
 920                                return gr_ep0_respond(dev, NULL, 0,
 921                                                      gr_ep0_testmode_complete);
 922                        }
 923                }
 924                break;
 925
 926        case USB_REQ_CLEAR_FEATURE:
 927                switch (value) {
 928                case USB_DEVICE_REMOTE_WAKEUP:
 929                        /* Disallow remote wakeup */
 930                        dev->remote_wakeup = 0;
 931                        return gr_ep0_respond_empty(dev);
 932                }
 933                break;
 934        }
 935
 936        return 1; /* Delegate the rest */
 937}
 938
 939/*
 940 * Returns negative for STALL, 0 for successful handling and positive for
 941 * delegation.
 942 *
 943 * Must be called with dev->lock held.
 944 */
 945static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
 946                                u16 value, u16 index)
 947{
 948        if (dev->gadget.state != USB_STATE_CONFIGURED)
 949                return -1;
 950
 951        /*
 952         * Should return STALL for invalid interfaces, but udc driver does not
 953         * know anything about that. However, many gadget drivers do not handle
 954         * GET_STATUS so we need to take care of that.
 955         */
 956
 957        switch (request) {
 958        case USB_REQ_GET_STATUS:
 959                return gr_ep0_respond_u16(dev, 0x0000);
 960
 961        case USB_REQ_SET_FEATURE:
 962        case USB_REQ_CLEAR_FEATURE:
 963                /*
 964                 * No possible valid standard requests. Still let gadget drivers
 965                 * have a go at it.
 966                 */
 967                break;
 968        }
 969
 970        return 1; /* Delegate the rest */
 971}
 972
 973/*
 974 * Returns negative for STALL, 0 for successful handling and positive for
 975 * delegation.
 976 *
 977 * Must be called with dev->lock held.
 978 */
 979static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
 980                               u16 value, u16 index)
 981{
 982        struct gr_ep *ep;
 983        int status;
 984        int halted;
 985        u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
 986        u8 is_in = index & USB_ENDPOINT_DIR_MASK;
 987
 988        if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
 989                return -1;
 990
 991        if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
 992                return -1;
 993
 994        ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
 995
 996        switch (request) {
 997        case USB_REQ_GET_STATUS:
 998                halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
 999                return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
1000
1001        case USB_REQ_SET_FEATURE:
1002                switch (value) {
1003                case USB_ENDPOINT_HALT:
1004                        status = gr_ep_halt_wedge(ep, 1, 0, 1);
1005                        if (status >= 0)
1006                                status = gr_ep0_respond_empty(dev);
1007                        return status;
1008                }
1009                break;
1010
1011        case USB_REQ_CLEAR_FEATURE:
1012                switch (value) {
1013                case USB_ENDPOINT_HALT:
1014                        if (ep->wedged)
1015                                return -1;
1016                        status = gr_ep_halt_wedge(ep, 0, 0, 1);
1017                        if (status >= 0)
1018                                status = gr_ep0_respond_empty(dev);
1019                        return status;
1020                }
1021                break;
1022        }
1023
1024        return 1; /* Delegate the rest */
1025}
1026
1027/* Must be called with dev->lock held */
1028static void gr_ep0out_requeue(struct gr_udc *dev)
1029{
1030        int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
1031
1032        if (ret)
1033                dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
1034                        ret);
1035}
1036
1037/*
1038 * The main function dealing with setup requests on ep0.
1039 *
1040 * Must be called with dev->lock held and irqs disabled
1041 */
1042static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1043        __releases(&dev->lock)
1044        __acquires(&dev->lock)
1045{
1046        union {
1047                struct usb_ctrlrequest ctrl;
1048                u8 raw[8];
1049                u32 word[2];
1050        } u;
1051        u8 type;
1052        u8 request;
1053        u16 value;
1054        u16 index;
1055        u16 length;
1056        int i;
1057        int status;
1058
1059        /* Restore from ep0 halt */
1060        if (dev->ep0state == GR_EP0_STALL) {
1061                gr_set_ep0state(dev, GR_EP0_SETUP);
1062                if (!req->req.actual)
1063                        goto out;
1064        }
1065
1066        if (dev->ep0state == GR_EP0_ISTATUS) {
1067                gr_set_ep0state(dev, GR_EP0_SETUP);
1068                if (req->req.actual > 0)
1069                        dev_dbg(dev->dev,
1070                                "Unexpected setup packet at state %s\n",
1071                                gr_ep0state_string(GR_EP0_ISTATUS));
1072                else
1073                        goto out; /* Got expected ZLP */
1074        } else if (dev->ep0state != GR_EP0_SETUP) {
1075                dev_info(dev->dev,
1076                         "Unexpected ep0out request at state %s - stalling\n",
1077                         gr_ep0state_string(dev->ep0state));
1078                gr_control_stall(dev);
1079                gr_set_ep0state(dev, GR_EP0_SETUP);
1080                goto out;
1081        } else if (!req->req.actual) {
1082                dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
1083                        gr_ep0state_string(dev->ep0state));
1084                goto out;
1085        }
1086
1087        /* Handle SETUP packet */
1088        for (i = 0; i < req->req.actual; i++)
1089                u.raw[i] = ((u8 *)req->req.buf)[i];
1090
1091        type = u.ctrl.bRequestType;
1092        request = u.ctrl.bRequest;
1093        value = le16_to_cpu(u.ctrl.wValue);
1094        index = le16_to_cpu(u.ctrl.wIndex);
1095        length = le16_to_cpu(u.ctrl.wLength);
1096
1097        gr_dbgprint_devreq(dev, type, request, value, index, length);
1098
1099        /* Check for data stage */
1100        if (length) {
1101                if (type & USB_DIR_IN)
1102                        gr_set_ep0state(dev, GR_EP0_IDATA);
1103                else
1104                        gr_set_ep0state(dev, GR_EP0_ODATA);
1105        }
1106
1107        status = 1; /* Positive status flags delegation */
1108        if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1109                switch (type & USB_RECIP_MASK) {
1110                case USB_RECIP_DEVICE:
1111                        status = gr_device_request(dev, type, request,
1112                                                   value, index);
1113                        break;
1114                case USB_RECIP_ENDPOINT:
1115                        status =  gr_endpoint_request(dev, type, request,
1116                                                      value, index);
1117                        break;
1118                case USB_RECIP_INTERFACE:
1119                        status = gr_interface_request(dev, type, request,
1120                                                      value, index);
1121                        break;
1122                }
1123        }
1124
1125        if (status > 0) {
1126                spin_unlock(&dev->lock);
1127
1128                dev_vdbg(dev->dev, "DELEGATE\n");
1129                status = dev->driver->setup(&dev->gadget, &u.ctrl);
1130
1131                spin_lock(&dev->lock);
1132        }
1133
1134        /* Generate STALL on both ep0out and ep0in if requested */
1135        if (unlikely(status < 0)) {
1136                dev_vdbg(dev->dev, "STALL\n");
1137                gr_control_stall(dev);
1138        }
1139
1140        if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
1141            request == USB_REQ_SET_CONFIGURATION) {
1142                if (!value) {
1143                        dev_dbg(dev->dev, "STATUS: deconfigured\n");
1144                        usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
1145                } else if (status >= 0) {
1146                        /* Not configured unless gadget OK:s it */
1147                        dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
1148                        usb_gadget_set_state(&dev->gadget,
1149                                             USB_STATE_CONFIGURED);
1150                }
1151        }
1152
1153        /* Get ready for next stage */
1154        if (dev->ep0state == GR_EP0_ODATA)
1155                gr_set_ep0state(dev, GR_EP0_OSTATUS);
1156        else if (dev->ep0state == GR_EP0_IDATA)
1157                gr_set_ep0state(dev, GR_EP0_ISTATUS);
1158        else
1159                gr_set_ep0state(dev, GR_EP0_SETUP);
1160
1161out:
1162        gr_ep0out_requeue(dev);
1163}
1164
1165/* ---------------------------------------------------------------------- */
1166/* VBUS and USB reset handling */
1167
1168/* Must be called with dev->lock held and irqs disabled  */
1169static void gr_vbus_connected(struct gr_udc *dev, u32 status)
1170{
1171        u32 control;
1172
1173        dev->gadget.speed = GR_SPEED(status);
1174        usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
1175
1176        /* Turn on full interrupts and pullup */
1177        control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
1178                   GR_CONTROL_SP | GR_CONTROL_EP);
1179        gr_write32(&dev->regs->control, control);
1180}
1181
1182/* Must be called with dev->lock held */
1183static void gr_enable_vbus_detect(struct gr_udc *dev)
1184{
1185        u32 status;
1186
1187        dev->irq_enabled = 1;
1188        wmb(); /* Make sure we do not ignore an interrupt */
1189        gr_write32(&dev->regs->control, GR_CONTROL_VI);
1190
1191        /* Take care of the case we are already plugged in at this point */
1192        status = gr_read32(&dev->regs->status);
1193        if (status & GR_STATUS_VB)
1194                gr_vbus_connected(dev, status);
1195}
1196
1197/* Must be called with dev->lock held and irqs disabled */
1198static void gr_vbus_disconnected(struct gr_udc *dev)
1199{
1200        gr_stop_activity(dev);
1201
1202        /* Report disconnect */
1203        if (dev->driver && dev->driver->disconnect) {
1204                spin_unlock(&dev->lock);
1205
1206                dev->driver->disconnect(&dev->gadget);
1207
1208                spin_lock(&dev->lock);
1209        }
1210
1211        gr_enable_vbus_detect(dev);
1212}
1213
1214/* Must be called with dev->lock held and irqs disabled */
1215static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
1216{
1217        gr_set_address(dev, 0);
1218        gr_set_ep0state(dev, GR_EP0_SETUP);
1219        usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
1220        dev->gadget.speed = GR_SPEED(status);
1221
1222        gr_ep_nuke(&dev->epo[0]);
1223        gr_ep_nuke(&dev->epi[0]);
1224        dev->epo[0].stopped = 0;
1225        dev->epi[0].stopped = 0;
1226        gr_ep0out_requeue(dev);
1227}
1228
1229/* ---------------------------------------------------------------------- */
1230/* Irq handling */
1231
1232/*
1233 * Handles interrupts from in endpoints. Returns whether something was handled.
1234 *
1235 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1236 */
1237static int gr_handle_in_ep(struct gr_ep *ep)
1238{
1239        struct gr_request *req;
1240
1241        req = list_first_entry(&ep->queue, struct gr_request, queue);
1242        if (!req->last_desc)
1243                return 0;
1244
1245        if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
1246                return 0; /* Not put in hardware buffers yet */
1247
1248        if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
1249                return 0; /* Not transmitted yet, still in hardware buffers */
1250
1251        /* Write complete */
1252        gr_dma_advance(ep, 0);
1253
1254        return 1;
1255}
1256
1257/*
1258 * Handles interrupts from out endpoints. Returns whether something was handled.
1259 *
1260 * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
1261 */
1262static int gr_handle_out_ep(struct gr_ep *ep)
1263{
1264        u32 ep_dmactrl;
1265        u32 ctrl;
1266        u16 len;
1267        struct gr_request *req;
1268        struct gr_udc *dev = ep->dev;
1269
1270        req = list_first_entry(&ep->queue, struct gr_request, queue);
1271        if (!req->curr_desc)
1272                return 0;
1273
1274        ctrl = READ_ONCE(req->curr_desc->ctrl);
1275        if (ctrl & GR_DESC_OUT_CTRL_EN)
1276                return 0; /* Not received yet */
1277
1278        /* Read complete */
1279        len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
1280        req->req.actual += len;
1281        if (ctrl & GR_DESC_OUT_CTRL_SE)
1282                req->setup = 1;
1283
1284        if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
1285                /* Short packet or >= expected size - we are done */
1286
1287                if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
1288                        /*
1289                         * Send a status stage ZLP to ack the DATA stage in the
1290                         * OUT direction. This needs to be done before
1291                         * gr_dma_advance as that can lead to a call to
1292                         * ep0_setup that can change dev->ep0state.
1293                         */
1294                        gr_ep0_respond_empty(dev);
1295                        gr_set_ep0state(dev, GR_EP0_SETUP);
1296                }
1297
1298                gr_dma_advance(ep, 0);
1299        } else {
1300                /* Not done yet. Enable the next descriptor to receive more. */
1301                req->curr_desc = req->curr_desc->next_desc;
1302                req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
1303
1304                ep_dmactrl = gr_read32(&ep->regs->dmactrl);
1305                gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
1306        }
1307
1308        return 1;
1309}
1310
1311/*
1312 * Handle state changes. Returns whether something was handled.
1313 *
1314 * Must be called with dev->lock held and irqs disabled.
1315 */
1316static int gr_handle_state_changes(struct gr_udc *dev)
1317{
1318        u32 status = gr_read32(&dev->regs->status);
1319        int handled = 0;
1320        int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
1321                         dev->gadget.state == USB_STATE_ATTACHED);
1322
1323        /* VBUS valid detected */
1324        if (!powstate && (status & GR_STATUS_VB)) {
1325                dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
1326                gr_vbus_connected(dev, status);
1327                handled = 1;
1328        }
1329
1330        /* Disconnect */
1331        if (powstate && !(status & GR_STATUS_VB)) {
1332                dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
1333                gr_vbus_disconnected(dev);
1334                handled = 1;
1335        }
1336
1337        /* USB reset detected */
1338        if (status & GR_STATUS_UR) {
1339                dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
1340                        GR_SPEED_STR(status));
1341                gr_write32(&dev->regs->status, GR_STATUS_UR);
1342                gr_udc_usbreset(dev, status);
1343                handled = 1;
1344        }
1345
1346        /* Speed change */
1347        if (dev->gadget.speed != GR_SPEED(status)) {
1348                dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
1349                        GR_SPEED_STR(status));
1350                dev->gadget.speed = GR_SPEED(status);
1351                handled = 1;
1352        }
1353
1354        /* Going into suspend */
1355        if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
1356                dev_dbg(dev->dev, "STATUS: USB suspend\n");
1357                gr_set_ep0state(dev, GR_EP0_SUSPEND);
1358                dev->suspended_from = dev->gadget.state;
1359                usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
1360
1361                if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1362                    dev->driver && dev->driver->suspend) {
1363                        spin_unlock(&dev->lock);
1364
1365                        dev->driver->suspend(&dev->gadget);
1366
1367                        spin_lock(&dev->lock);
1368                }
1369                handled = 1;
1370        }
1371
1372        /* Coming out of suspend */
1373        if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
1374                dev_dbg(dev->dev, "STATUS: USB resume\n");
1375                if (dev->suspended_from == USB_STATE_POWERED)
1376                        gr_set_ep0state(dev, GR_EP0_DISCONNECT);
1377                else
1378                        gr_set_ep0state(dev, GR_EP0_SETUP);
1379                usb_gadget_set_state(&dev->gadget, dev->suspended_from);
1380
1381                if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
1382                    dev->driver && dev->driver->resume) {
1383                        spin_unlock(&dev->lock);
1384
1385                        dev->driver->resume(&dev->gadget);
1386
1387                        spin_lock(&dev->lock);
1388                }
1389                handled = 1;
1390        }
1391
1392        return handled;
1393}
1394
1395/* Non-interrupt context irq handler */
1396static irqreturn_t gr_irq_handler(int irq, void *_dev)
1397{
1398        struct gr_udc *dev = _dev;
1399        struct gr_ep *ep;
1400        int handled = 0;
1401        int i;
1402        unsigned long flags;
1403
1404        spin_lock_irqsave(&dev->lock, flags);
1405
1406        if (!dev->irq_enabled)
1407                goto out;
1408
1409        /*
1410         * Check IN ep interrupts. We check these before the OUT eps because
1411         * some gadgets reuse the request that might already be currently
1412         * outstanding and needs to be completed (mainly setup requests).
1413         */
1414        for (i = 0; i < dev->nepi; i++) {
1415                ep = &dev->epi[i];
1416                if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1417                        handled = gr_handle_in_ep(ep) || handled;
1418        }
1419
1420        /* Check OUT ep interrupts */
1421        for (i = 0; i < dev->nepo; i++) {
1422                ep = &dev->epo[i];
1423                if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
1424                        handled = gr_handle_out_ep(ep) || handled;
1425        }
1426
1427        /* Check status interrupts */
1428        handled = gr_handle_state_changes(dev) || handled;
1429
1430        /*
1431         * Check AMBA DMA errors. Only check if we didn't find anything else to
1432         * handle because this shouldn't happen if we did everything right.
1433         */
1434        if (!handled) {
1435                list_for_each_entry(ep, &dev->ep_list, ep_list) {
1436                        if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
1437                                dev_err(dev->dev,
1438                                        "AMBA Error occurred for %s\n",
1439                                        ep->ep.name);
1440                                handled = 1;
1441                        }
1442                }
1443        }
1444
1445out:
1446        spin_unlock_irqrestore(&dev->lock, flags);
1447
1448        return handled ? IRQ_HANDLED : IRQ_NONE;
1449}
1450
1451/* Interrupt context irq handler */
1452static irqreturn_t gr_irq(int irq, void *_dev)
1453{
1454        struct gr_udc *dev = _dev;
1455
1456        if (!dev->irq_enabled)
1457                return IRQ_NONE;
1458
1459        return IRQ_WAKE_THREAD;
1460}
1461
1462/* ---------------------------------------------------------------------- */
1463/* USB ep ops */
1464
1465/* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
1466static int gr_ep_enable(struct usb_ep *_ep,
1467                        const struct usb_endpoint_descriptor *desc)
1468{
1469        struct gr_udc *dev;
1470        struct gr_ep *ep;
1471        u8 mode;
1472        u8 nt;
1473        u16 max;
1474        u16 buffer_size = 0;
1475        u32 epctrl;
1476
1477        ep = container_of(_ep, struct gr_ep, ep);
1478        if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
1479                return -EINVAL;
1480
1481        dev = ep->dev;
1482
1483        /* 'ep0' IN and OUT are reserved */
1484        if (ep == &dev->epo[0] || ep == &dev->epi[0])
1485                return -EINVAL;
1486
1487        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1488                return -ESHUTDOWN;
1489
1490        /* Make sure we are clear for enabling */
1491        epctrl = gr_read32(&ep->regs->epctrl);
1492        if (epctrl & GR_EPCTRL_EV)
1493                return -EBUSY;
1494
1495        /* Check that directions match */
1496        if (!ep->is_in != !usb_endpoint_dir_in(desc))
1497                return -EINVAL;
1498
1499        /* Check ep num */
1500        if ((!ep->is_in && ep->num >= dev->nepo) ||
1501            (ep->is_in && ep->num >= dev->nepi))
1502                return -EINVAL;
1503
1504        if (usb_endpoint_xfer_control(desc)) {
1505                mode = 0;
1506        } else if (usb_endpoint_xfer_isoc(desc)) {
1507                mode = 1;
1508        } else if (usb_endpoint_xfer_bulk(desc)) {
1509                mode = 2;
1510        } else if (usb_endpoint_xfer_int(desc)) {
1511                mode = 3;
1512        } else {
1513                dev_err(dev->dev, "Unknown transfer type for %s\n",
1514                        ep->ep.name);
1515                return -EINVAL;
1516        }
1517
1518        /*
1519         * Bits 10-0 set the max payload. 12-11 set the number of
1520         * additional transactions.
1521         */
1522        max = usb_endpoint_maxp(desc);
1523        nt = usb_endpoint_maxp_mult(desc) - 1;
1524        buffer_size = GR_BUFFER_SIZE(epctrl);
1525        if (nt && (mode == 0 || mode == 2)) {
1526                dev_err(dev->dev,
1527                        "%s mode: multiple trans./microframe not valid\n",
1528                        (mode == 2 ? "Bulk" : "Control"));
1529                return -EINVAL;
1530        } else if (nt == 0x3) {
1531                dev_err(dev->dev,
1532                        "Invalid value 0x3 for additional trans./microframe\n");
1533                return -EINVAL;
1534        } else if ((nt + 1) * max > buffer_size) {
1535                dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
1536                        buffer_size, (nt + 1), max);
1537                return -EINVAL;
1538        } else if (max == 0) {
1539                dev_err(dev->dev, "Max payload cannot be set to 0\n");
1540                return -EINVAL;
1541        } else if (max > ep->ep.maxpacket_limit) {
1542                dev_err(dev->dev, "Requested max payload %d > limit %d\n",
1543                        max, ep->ep.maxpacket_limit);
1544                return -EINVAL;
1545        }
1546
1547        spin_lock(&ep->dev->lock);
1548
1549        if (!ep->stopped) {
1550                spin_unlock(&ep->dev->lock);
1551                return -EBUSY;
1552        }
1553
1554        ep->stopped = 0;
1555        ep->wedged = 0;
1556        ep->ep.desc = desc;
1557        ep->ep.maxpacket = max;
1558        ep->dma_start = 0;
1559
1560
1561        if (nt) {
1562                /*
1563                 * Maximum possible size of all payloads in one microframe
1564                 * regardless of direction when using high-bandwidth mode.
1565                 */
1566                ep->bytes_per_buffer = (nt + 1) * max;
1567        } else if (ep->is_in) {
1568                /*
1569                 * The biggest multiple of maximum packet size that fits into
1570                 * the buffer. The hardware will split up into many packets in
1571                 * the IN direction.
1572                 */
1573                ep->bytes_per_buffer = (buffer_size / max) * max;
1574        } else {
1575                /*
1576                 * Only single packets will be placed the buffers in the OUT
1577                 * direction.
1578                 */
1579                ep->bytes_per_buffer = max;
1580        }
1581
1582        epctrl = (max << GR_EPCTRL_MAXPL_POS)
1583                | (nt << GR_EPCTRL_NT_POS)
1584                | (mode << GR_EPCTRL_TT_POS)
1585                | GR_EPCTRL_EV;
1586        if (ep->is_in)
1587                epctrl |= GR_EPCTRL_PI;
1588        gr_write32(&ep->regs->epctrl, epctrl);
1589
1590        gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
1591
1592        spin_unlock(&ep->dev->lock);
1593
1594        dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
1595                ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
1596        return 0;
1597}
1598
1599/* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
1600static int gr_ep_disable(struct usb_ep *_ep)
1601{
1602        struct gr_ep *ep;
1603        struct gr_udc *dev;
1604        unsigned long flags;
1605
1606        ep = container_of(_ep, struct gr_ep, ep);
1607        if (!_ep || !ep->ep.desc)
1608                return -ENODEV;
1609
1610        dev = ep->dev;
1611
1612        /* 'ep0' IN and OUT are reserved */
1613        if (ep == &dev->epo[0] || ep == &dev->epi[0])
1614                return -EINVAL;
1615
1616        if (dev->ep0state == GR_EP0_SUSPEND)
1617                return -EBUSY;
1618
1619        dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
1620
1621        spin_lock_irqsave(&dev->lock, flags);
1622
1623        gr_ep_nuke(ep);
1624        gr_ep_reset(ep);
1625        ep->ep.desc = NULL;
1626
1627        spin_unlock_irqrestore(&dev->lock, flags);
1628
1629        return 0;
1630}
1631
1632/*
1633 * Frees a request, but not any DMA buffers associated with it
1634 * (gr_finish_request should already have taken care of that).
1635 */
1636static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
1637{
1638        struct gr_request *req;
1639
1640        if (!_ep || !_req)
1641                return;
1642        req = container_of(_req, struct gr_request, req);
1643
1644        /* Leads to memory leak */
1645        WARN(!list_empty(&req->queue),
1646             "request not dequeued properly before freeing\n");
1647
1648        kfree(req);
1649}
1650
1651/* Queue a request from the gadget */
1652static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
1653                        gfp_t gfp_flags)
1654{
1655        struct gr_ep *ep;
1656        struct gr_request *req;
1657        struct gr_udc *dev;
1658        int ret;
1659
1660        if (unlikely(!_ep || !_req))
1661                return -EINVAL;
1662
1663        ep = container_of(_ep, struct gr_ep, ep);
1664        req = container_of(_req, struct gr_request, req);
1665        dev = ep->dev;
1666
1667        spin_lock(&ep->dev->lock);
1668
1669        /*
1670         * The ep0 pointer in the gadget struct is used both for ep0in and
1671         * ep0out. In a data stage in the out direction ep0out needs to be used
1672         * instead of the default ep0in. Completion functions might use
1673         * driver_data, so that needs to be copied as well.
1674         */
1675        if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
1676                ep = &dev->epo[0];
1677                ep->ep.driver_data = dev->epi[0].ep.driver_data;
1678        }
1679
1680        if (ep->is_in)
1681                gr_dbgprint_request("EXTERN", ep, req);
1682
1683        ret = gr_queue(ep, req, GFP_ATOMIC);
1684
1685        spin_unlock(&ep->dev->lock);
1686
1687        return ret;
1688}
1689
1690/* Dequeue JUST ONE request */
1691static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1692{
1693        struct gr_request *req;
1694        struct gr_ep *ep;
1695        struct gr_udc *dev;
1696        int ret = 0;
1697        unsigned long flags;
1698
1699        ep = container_of(_ep, struct gr_ep, ep);
1700        if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
1701                return -EINVAL;
1702        dev = ep->dev;
1703        if (!dev->driver)
1704                return -ESHUTDOWN;
1705
1706        /* We can't touch (DMA) registers when suspended */
1707        if (dev->ep0state == GR_EP0_SUSPEND)
1708                return -EBUSY;
1709
1710        spin_lock_irqsave(&dev->lock, flags);
1711
1712        /* Make sure it's actually queued on this endpoint */
1713        list_for_each_entry(req, &ep->queue, queue) {
1714                if (&req->req == _req)
1715                        break;
1716        }
1717        if (&req->req != _req) {
1718                ret = -EINVAL;
1719                goto out;
1720        }
1721
1722        if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
1723                /* This request is currently being processed */
1724                gr_abort_dma(ep);
1725                if (ep->stopped)
1726                        gr_finish_request(ep, req, -ECONNRESET);
1727                else
1728                        gr_dma_advance(ep, -ECONNRESET);
1729        } else if (!list_empty(&req->queue)) {
1730                /* Not being processed - gr_finish_request dequeues it */
1731                gr_finish_request(ep, req, -ECONNRESET);
1732        } else {
1733                ret = -EOPNOTSUPP;
1734        }
1735
1736out:
1737        spin_unlock_irqrestore(&dev->lock, flags);
1738
1739        return ret;
1740}
1741
1742/* Helper for gr_set_halt and gr_set_wedge */
1743static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
1744{
1745        int ret;
1746        struct gr_ep *ep;
1747
1748        if (!_ep)
1749                return -ENODEV;
1750        ep = container_of(_ep, struct gr_ep, ep);
1751
1752        spin_lock(&ep->dev->lock);
1753
1754        /* Halting an IN endpoint should fail if queue is not empty */
1755        if (halt && ep->is_in && !list_empty(&ep->queue)) {
1756                ret = -EAGAIN;
1757                goto out;
1758        }
1759
1760        ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
1761
1762out:
1763        spin_unlock(&ep->dev->lock);
1764
1765        return ret;
1766}
1767
1768/* Halt endpoint */
1769static int gr_set_halt(struct usb_ep *_ep, int halt)
1770{
1771        return gr_set_halt_wedge(_ep, halt, 0);
1772}
1773
1774/* Halt and wedge endpoint */
1775static int gr_set_wedge(struct usb_ep *_ep)
1776{
1777        return gr_set_halt_wedge(_ep, 1, 1);
1778}
1779
1780/*
1781 * Return the total number of bytes currently stored in the internal buffers of
1782 * the endpoint.
1783 */
1784static int gr_fifo_status(struct usb_ep *_ep)
1785{
1786        struct gr_ep *ep;
1787        u32 epstat;
1788        u32 bytes = 0;
1789
1790        if (!_ep)
1791                return -ENODEV;
1792        ep = container_of(_ep, struct gr_ep, ep);
1793
1794        epstat = gr_read32(&ep->regs->epstat);
1795
1796        if (epstat & GR_EPSTAT_B0)
1797                bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
1798        if (epstat & GR_EPSTAT_B1)
1799                bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
1800
1801        return bytes;
1802}
1803
1804
1805/* Empty data from internal buffers of an endpoint. */
1806static void gr_fifo_flush(struct usb_ep *_ep)
1807{
1808        struct gr_ep *ep;
1809        u32 epctrl;
1810
1811        if (!_ep)
1812                return;
1813        ep = container_of(_ep, struct gr_ep, ep);
1814        dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
1815
1816        spin_lock(&ep->dev->lock);
1817
1818        epctrl = gr_read32(&ep->regs->epctrl);
1819        epctrl |= GR_EPCTRL_CB;
1820        gr_write32(&ep->regs->epctrl, epctrl);
1821
1822        spin_unlock(&ep->dev->lock);
1823}
1824
1825static const struct usb_ep_ops gr_ep_ops = {
1826        .enable         = gr_ep_enable,
1827        .disable        = gr_ep_disable,
1828
1829        .alloc_request  = gr_alloc_request,
1830        .free_request   = gr_free_request,
1831
1832        .queue          = gr_queue_ext,
1833        .dequeue        = gr_dequeue,
1834
1835        .set_halt       = gr_set_halt,
1836        .set_wedge      = gr_set_wedge,
1837        .fifo_status    = gr_fifo_status,
1838        .fifo_flush     = gr_fifo_flush,
1839};
1840
1841/* ---------------------------------------------------------------------- */
1842/* USB Gadget ops */
1843
1844static int gr_get_frame(struct usb_gadget *_gadget)
1845{
1846        struct gr_udc *dev;
1847
1848        if (!_gadget)
1849                return -ENODEV;
1850        dev = container_of(_gadget, struct gr_udc, gadget);
1851        return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
1852}
1853
1854static int gr_wakeup(struct usb_gadget *_gadget)
1855{
1856        struct gr_udc *dev;
1857
1858        if (!_gadget)
1859                return -ENODEV;
1860        dev = container_of(_gadget, struct gr_udc, gadget);
1861
1862        /* Remote wakeup feature not enabled by host*/
1863        if (!dev->remote_wakeup)
1864                return -EINVAL;
1865
1866        spin_lock(&dev->lock);
1867
1868        gr_write32(&dev->regs->control,
1869                   gr_read32(&dev->regs->control) | GR_CONTROL_RW);
1870
1871        spin_unlock(&dev->lock);
1872
1873        return 0;
1874}
1875
1876static int gr_pullup(struct usb_gadget *_gadget, int is_on)
1877{
1878        struct gr_udc *dev;
1879        u32 control;
1880
1881        if (!_gadget)
1882                return -ENODEV;
1883        dev = container_of(_gadget, struct gr_udc, gadget);
1884
1885        spin_lock(&dev->lock);
1886
1887        control = gr_read32(&dev->regs->control);
1888        if (is_on)
1889                control |= GR_CONTROL_EP;
1890        else
1891                control &= ~GR_CONTROL_EP;
1892        gr_write32(&dev->regs->control, control);
1893
1894        spin_unlock(&dev->lock);
1895
1896        return 0;
1897}
1898
1899static int gr_udc_start(struct usb_gadget *gadget,
1900                        struct usb_gadget_driver *driver)
1901{
1902        struct gr_udc *dev = to_gr_udc(gadget);
1903
1904        spin_lock(&dev->lock);
1905
1906        /* Hook up the driver */
1907        driver->driver.bus = NULL;
1908        dev->driver = driver;
1909
1910        /* Get ready for host detection */
1911        gr_enable_vbus_detect(dev);
1912
1913        spin_unlock(&dev->lock);
1914
1915        return 0;
1916}
1917
1918static int gr_udc_stop(struct usb_gadget *gadget)
1919{
1920        struct gr_udc *dev = to_gr_udc(gadget);
1921        unsigned long flags;
1922
1923        spin_lock_irqsave(&dev->lock, flags);
1924
1925        dev->driver = NULL;
1926        gr_stop_activity(dev);
1927
1928        spin_unlock_irqrestore(&dev->lock, flags);
1929
1930        return 0;
1931}
1932
1933static const struct usb_gadget_ops gr_ops = {
1934        .get_frame      = gr_get_frame,
1935        .wakeup         = gr_wakeup,
1936        .pullup         = gr_pullup,
1937        .udc_start      = gr_udc_start,
1938        .udc_stop       = gr_udc_stop,
1939        /* Other operations not supported */
1940};
1941
1942/* ---------------------------------------------------------------------- */
1943/* Module probe, removal and of-matching */
1944
1945static const char * const onames[] = {
1946        "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
1947        "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
1948        "ep12out", "ep13out", "ep14out", "ep15out"
1949};
1950
1951static const char * const inames[] = {
1952        "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
1953        "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
1954        "ep12in", "ep13in", "ep14in", "ep15in"
1955};
1956
1957/* Must be called with dev->lock held */
1958static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
1959{
1960        struct gr_ep *ep;
1961        struct gr_request *req;
1962        struct usb_request *_req;
1963        void *buf;
1964
1965        if (is_in) {
1966                ep = &dev->epi[num];
1967                ep->ep.name = inames[num];
1968                ep->regs = &dev->regs->epi[num];
1969        } else {
1970                ep = &dev->epo[num];
1971                ep->ep.name = onames[num];
1972                ep->regs = &dev->regs->epo[num];
1973        }
1974
1975        gr_ep_reset(ep);
1976        ep->num = num;
1977        ep->is_in = is_in;
1978        ep->dev = dev;
1979        ep->ep.ops = &gr_ep_ops;
1980        INIT_LIST_HEAD(&ep->queue);
1981
1982        if (num == 0) {
1983                _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
1984                if (!_req)
1985                        return -ENOMEM;
1986
1987                buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
1988                if (!buf) {
1989                        gr_free_request(&ep->ep, _req);
1990                        return -ENOMEM;
1991                }
1992
1993                req = container_of(_req, struct gr_request, req);
1994                req->req.buf = buf;
1995                req->req.length = MAX_CTRL_PL_SIZE;
1996
1997                if (is_in)
1998                        dev->ep0reqi = req; /* Complete gets set as used */
1999                else
2000                        dev->ep0reqo = req; /* Completion treated separately */
2001
2002                usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
2003                ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
2004
2005                ep->ep.caps.type_control = true;
2006        } else {
2007                usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
2008                list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2009
2010                ep->ep.caps.type_iso = true;
2011                ep->ep.caps.type_bulk = true;
2012                ep->ep.caps.type_int = true;
2013        }
2014        list_add_tail(&ep->ep_list, &dev->ep_list);
2015
2016        if (is_in)
2017                ep->ep.caps.dir_in = true;
2018        else
2019                ep->ep.caps.dir_out = true;
2020
2021        ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
2022                                         &ep->tailbuf_paddr, GFP_ATOMIC);
2023        if (!ep->tailbuf)
2024                return -ENOMEM;
2025
2026        return 0;
2027}
2028
2029/* Must be called with dev->lock held */
2030static int gr_udc_init(struct gr_udc *dev)
2031{
2032        struct device_node *np = dev->dev->of_node;
2033        u32 epctrl_val;
2034        u32 dmactrl_val;
2035        int i;
2036        int ret = 0;
2037        u32 bufsize;
2038
2039        gr_set_address(dev, 0);
2040
2041        INIT_LIST_HEAD(&dev->gadget.ep_list);
2042        dev->gadget.speed = USB_SPEED_UNKNOWN;
2043        dev->gadget.ep0 = &dev->epi[0].ep;
2044
2045        INIT_LIST_HEAD(&dev->ep_list);
2046        gr_set_ep0state(dev, GR_EP0_DISCONNECT);
2047
2048        for (i = 0; i < dev->nepo; i++) {
2049                if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
2050                        bufsize = 1024;
2051                ret = gr_ep_init(dev, i, 0, bufsize);
2052                if (ret)
2053                        return ret;
2054        }
2055
2056        for (i = 0; i < dev->nepi; i++) {
2057                if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
2058                        bufsize = 1024;
2059                ret = gr_ep_init(dev, i, 1, bufsize);
2060                if (ret)
2061                        return ret;
2062        }
2063
2064        /* Must be disabled by default */
2065        dev->remote_wakeup = 0;
2066
2067        /* Enable ep0out and ep0in */
2068        epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
2069        dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
2070        gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
2071        gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
2072        gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
2073        gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
2074
2075        return 0;
2076}
2077
2078static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
2079{
2080        struct gr_ep *ep;
2081
2082        if (is_in)
2083                ep = &dev->epi[num];
2084        else
2085                ep = &dev->epo[num];
2086
2087        if (ep->tailbuf)
2088                dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
2089                                  ep->tailbuf, ep->tailbuf_paddr);
2090}
2091
2092static int gr_remove(struct platform_device *pdev)
2093{
2094        struct gr_udc *dev = platform_get_drvdata(pdev);
2095        int i;
2096
2097        if (dev->added)
2098                usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
2099        if (dev->driver)
2100                return -EBUSY;
2101
2102        gr_dfs_delete(dev);
2103        dma_pool_destroy(dev->desc_pool);
2104        platform_set_drvdata(pdev, NULL);
2105
2106        gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
2107        gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
2108
2109        for (i = 0; i < dev->nepo; i++)
2110                gr_ep_remove(dev, i, 0);
2111        for (i = 0; i < dev->nepi; i++)
2112                gr_ep_remove(dev, i, 1);
2113
2114        return 0;
2115}
2116static int gr_request_irq(struct gr_udc *dev, int irq)
2117{
2118        return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
2119                                         IRQF_SHARED, driver_name, dev);
2120}
2121
2122static int gr_probe(struct platform_device *pdev)
2123{
2124        struct gr_udc *dev;
2125        struct gr_regs __iomem *regs;
2126        int retval;
2127        u32 status;
2128
2129        dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2130        if (!dev)
2131                return -ENOMEM;
2132        dev->dev = &pdev->dev;
2133
2134        regs = devm_platform_ioremap_resource(pdev, 0);
2135        if (IS_ERR(regs))
2136                return PTR_ERR(regs);
2137
2138        dev->irq = platform_get_irq(pdev, 0);
2139        if (dev->irq <= 0)
2140                return -ENODEV;
2141
2142        /* Some core configurations has separate irqs for IN and OUT events */
2143        dev->irqi = platform_get_irq(pdev, 1);
2144        if (dev->irqi > 0) {
2145                dev->irqo = platform_get_irq(pdev, 2);
2146                if (dev->irqo <= 0)
2147                        return -ENODEV;
2148        } else {
2149                dev->irqi = 0;
2150        }
2151
2152        dev->gadget.name = driver_name;
2153        dev->gadget.max_speed = USB_SPEED_HIGH;
2154        dev->gadget.ops = &gr_ops;
2155
2156        spin_lock_init(&dev->lock);
2157        dev->regs = regs;
2158
2159        platform_set_drvdata(pdev, dev);
2160
2161        /* Determine number of endpoints and data interface mode */
2162        status = gr_read32(&dev->regs->status);
2163        dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
2164        dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
2165
2166        if (!(status & GR_STATUS_DM)) {
2167                dev_err(dev->dev, "Slave mode cores are not supported\n");
2168                return -ENODEV;
2169        }
2170
2171        /* --- Effects of the following calls might need explicit cleanup --- */
2172
2173        /* Create DMA pool for descriptors */
2174        dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
2175                                         sizeof(struct gr_dma_desc), 4, 0);
2176        if (!dev->desc_pool) {
2177                dev_err(dev->dev, "Could not allocate DMA pool");
2178                return -ENOMEM;
2179        }
2180
2181        /* Inside lock so that no gadget can use this udc until probe is done */
2182        retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
2183        if (retval) {
2184                dev_err(dev->dev, "Could not add gadget udc");
2185                goto out;
2186        }
2187        dev->added = 1;
2188
2189        spin_lock(&dev->lock);
2190
2191        retval = gr_udc_init(dev);
2192        if (retval) {
2193                spin_unlock(&dev->lock);
2194                goto out;
2195        }
2196
2197        /* Clear all interrupt enables that might be left on since last boot */
2198        gr_disable_interrupts_and_pullup(dev);
2199
2200        spin_unlock(&dev->lock);
2201
2202        gr_dfs_create(dev);
2203
2204        retval = gr_request_irq(dev, dev->irq);
2205        if (retval) {
2206                dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
2207                goto out;
2208        }
2209
2210        if (dev->irqi) {
2211                retval = gr_request_irq(dev, dev->irqi);
2212                if (retval) {
2213                        dev_err(dev->dev, "Failed to request irqi %d\n",
2214                                dev->irqi);
2215                        goto out;
2216                }
2217                retval = gr_request_irq(dev, dev->irqo);
2218                if (retval) {
2219                        dev_err(dev->dev, "Failed to request irqo %d\n",
2220                                dev->irqo);
2221                        goto out;
2222                }
2223        }
2224
2225        if (dev->irqi)
2226                dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
2227                         dev->irq, dev->irqi, dev->irqo);
2228        else
2229                dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
2230
2231out:
2232        if (retval)
2233                gr_remove(pdev);
2234
2235        return retval;
2236}
2237
2238static const struct of_device_id gr_match[] = {
2239        {.name = "GAISLER_USBDC"},
2240        {.name = "01_021"},
2241        {},
2242};
2243MODULE_DEVICE_TABLE(of, gr_match);
2244
2245static struct platform_driver gr_driver = {
2246        .driver = {
2247                .name = DRIVER_NAME,
2248                .of_match_table = gr_match,
2249        },
2250        .probe = gr_probe,
2251        .remove = gr_remove,
2252};
2253module_platform_driver(gr_driver);
2254
2255MODULE_AUTHOR("Aeroflex Gaisler AB.");
2256MODULE_DESCRIPTION(DRIVER_DESC);
2257MODULE_LICENSE("GPL");
2258