linux/drivers/usb/gadget/amd5536udc.c
<<
>>
Prefs
   1/*
   2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
   3 *
   4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
   5 * Author: Thomas Dahlmann
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13/*
  14 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
  15 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
  16 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
  17 *
  18 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
  19 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
  20 * by BIOS init).
  21 *
  22 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
  23 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
  24 * can be used with gadget ether.
  25 */
  26
  27/* debug control */
  28/* #define UDC_VERBOSE */
  29
  30/* Driver strings */
  31#define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
  32#define UDC_DRIVER_VERSION_STRING       "01.00.0206 - $Revision: #3 $"
  33
  34/* system */
  35#include <linux/module.h>
  36#include <linux/pci.h>
  37#include <linux/kernel.h>
  38#include <linux/delay.h>
  39#include <linux/ioport.h>
  40#include <linux/sched.h>
  41#include <linux/slab.h>
  42#include <linux/errno.h>
  43#include <linux/init.h>
  44#include <linux/timer.h>
  45#include <linux/list.h>
  46#include <linux/interrupt.h>
  47#include <linux/ioctl.h>
  48#include <linux/fs.h>
  49#include <linux/dmapool.h>
  50#include <linux/moduleparam.h>
  51#include <linux/device.h>
  52#include <linux/io.h>
  53#include <linux/irq.h>
  54#include <linux/prefetch.h>
  55
  56#include <asm/byteorder.h>
  57#include <asm/system.h>
  58#include <asm/unaligned.h>
  59
  60/* gadget stack */
  61#include <linux/usb/ch9.h>
  62#include <linux/usb/gadget.h>
  63
  64/* udc specific */
  65#include "amd5536udc.h"
  66
  67
  68static void udc_tasklet_disconnect(unsigned long);
  69static void empty_req_queue(struct udc_ep *);
  70static int udc_probe(struct udc *dev);
  71static void udc_basic_init(struct udc *dev);
  72static void udc_setup_endpoints(struct udc *dev);
  73static void udc_soft_reset(struct udc *dev);
  74static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
  75static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
  76static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
  77static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
  78                                unsigned long buf_len, gfp_t gfp_flags);
  79static int udc_remote_wakeup(struct udc *dev);
  80static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
  81static void udc_pci_remove(struct pci_dev *pdev);
  82
  83/* description */
  84static const char mod_desc[] = UDC_MOD_DESCRIPTION;
  85static const char name[] = "amd5536udc";
  86
  87/* structure to hold endpoint function pointers */
  88static const struct usb_ep_ops udc_ep_ops;
  89
  90/* received setup data */
  91static union udc_setup_data setup_data;
  92
  93/* pointer to device object */
  94static struct udc *udc;
  95
  96/* irq spin lock for soft reset */
  97static DEFINE_SPINLOCK(udc_irq_spinlock);
  98/* stall spin lock */
  99static DEFINE_SPINLOCK(udc_stall_spinlock);
 100
 101/*
 102* slave mode: pending bytes in rx fifo after nyet,
 103* used if EPIN irq came but no req was available
 104*/
 105static unsigned int udc_rxfifo_pending;
 106
 107/* count soft resets after suspend to avoid loop */
 108static int soft_reset_occured;
 109static int soft_reset_after_usbreset_occured;
 110
 111/* timer */
 112static struct timer_list udc_timer;
 113static int stop_timer;
 114
 115/* set_rde -- Is used to control enabling of RX DMA. Problem is
 116 * that UDC has only one bit (RDE) to enable/disable RX DMA for
 117 * all OUT endpoints. So we have to handle race conditions like
 118 * when OUT data reaches the fifo but no request was queued yet.
 119 * This cannot be solved by letting the RX DMA disabled until a
 120 * request gets queued because there may be other OUT packets
 121 * in the FIFO (important for not blocking control traffic).
 122 * The value of set_rde controls the correspondig timer.
 123 *
 124 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
 125 * set_rde  0 == do not touch RDE, do no start the RDE timer
 126 * set_rde  1 == timer function will look whether FIFO has data
 127 * set_rde  2 == set by timer function to enable RX DMA on next call
 128 */
 129static int set_rde = -1;
 130
 131static DECLARE_COMPLETION(on_exit);
 132static struct timer_list udc_pollstall_timer;
 133static int stop_pollstall_timer;
 134static DECLARE_COMPLETION(on_pollstall_exit);
 135
 136/* tasklet for usb disconnect */
 137static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
 138                (unsigned long) &udc);
 139
 140
 141/* endpoint names used for print */
 142static const char ep0_string[] = "ep0in";
 143static const char *ep_string[] = {
 144        ep0_string,
 145        "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
 146        "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
 147        "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
 148        "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
 149        "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
 150        "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
 151        "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
 152};
 153
 154/* DMA usage flag */
 155static bool use_dma = 1;
 156/* packet per buffer dma */
 157static bool use_dma_ppb = 1;
 158/* with per descr. update */
 159static bool use_dma_ppb_du;
 160/* buffer fill mode */
 161static int use_dma_bufferfill_mode;
 162/* full speed only mode */
 163static bool use_fullspeed;
 164/* tx buffer size for high speed */
 165static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
 166
 167/* module parameters */
 168module_param(use_dma, bool, S_IRUGO);
 169MODULE_PARM_DESC(use_dma, "true for DMA");
 170module_param(use_dma_ppb, bool, S_IRUGO);
 171MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
 172module_param(use_dma_ppb_du, bool, S_IRUGO);
 173MODULE_PARM_DESC(use_dma_ppb_du,
 174        "true for DMA in packet per buffer mode with descriptor update");
 175module_param(use_fullspeed, bool, S_IRUGO);
 176MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
 177
 178/*---------------------------------------------------------------------------*/
 179/* Prints UDC device registers and endpoint irq registers */
 180static void print_regs(struct udc *dev)
 181{
 182        DBG(dev, "------- Device registers -------\n");
 183        DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
 184        DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
 185        DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
 186        DBG(dev, "\n");
 187        DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
 188        DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
 189        DBG(dev, "\n");
 190        DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
 191        DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
 192        DBG(dev, "\n");
 193        DBG(dev, "USE DMA        = %d\n", use_dma);
 194        if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
 195                DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
 196                        "WITHOUT desc. update)\n");
 197                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
 198        } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
 199                DBG(dev, "DMA mode       = PPBDU (packet per buffer "
 200                        "WITH desc. update)\n");
 201                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
 202        }
 203        if (use_dma && use_dma_bufferfill_mode) {
 204                DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
 205                dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
 206        }
 207        if (!use_dma) {
 208                dev_info(&dev->pdev->dev, "FIFO mode\n");
 209        }
 210        DBG(dev, "-------------------------------------------------------\n");
 211}
 212
 213/* Masks unused interrupts */
 214static int udc_mask_unused_interrupts(struct udc *dev)
 215{
 216        u32 tmp;
 217
 218        /* mask all dev interrupts */
 219        tmp =   AMD_BIT(UDC_DEVINT_SVC) |
 220                AMD_BIT(UDC_DEVINT_ENUM) |
 221                AMD_BIT(UDC_DEVINT_US) |
 222                AMD_BIT(UDC_DEVINT_UR) |
 223                AMD_BIT(UDC_DEVINT_ES) |
 224                AMD_BIT(UDC_DEVINT_SI) |
 225                AMD_BIT(UDC_DEVINT_SOF)|
 226                AMD_BIT(UDC_DEVINT_SC);
 227        writel(tmp, &dev->regs->irqmsk);
 228
 229        /* mask all ep interrupts */
 230        writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
 231
 232        return 0;
 233}
 234
 235/* Enables endpoint 0 interrupts */
 236static int udc_enable_ep0_interrupts(struct udc *dev)
 237{
 238        u32 tmp;
 239
 240        DBG(dev, "udc_enable_ep0_interrupts()\n");
 241
 242        /* read irq mask */
 243        tmp = readl(&dev->regs->ep_irqmsk);
 244        /* enable ep0 irq's */
 245        tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
 246                & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
 247        writel(tmp, &dev->regs->ep_irqmsk);
 248
 249        return 0;
 250}
 251
 252/* Enables device interrupts for SET_INTF and SET_CONFIG */
 253static int udc_enable_dev_setup_interrupts(struct udc *dev)
 254{
 255        u32 tmp;
 256
 257        DBG(dev, "enable device interrupts for setup data\n");
 258
 259        /* read irq mask */
 260        tmp = readl(&dev->regs->irqmsk);
 261
 262        /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
 263        tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
 264                & AMD_UNMASK_BIT(UDC_DEVINT_SC)
 265                & AMD_UNMASK_BIT(UDC_DEVINT_UR)
 266                & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
 267                & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
 268        writel(tmp, &dev->regs->irqmsk);
 269
 270        return 0;
 271}
 272
 273/* Calculates fifo start of endpoint based on preceding endpoints */
 274static int udc_set_txfifo_addr(struct udc_ep *ep)
 275{
 276        struct udc      *dev;
 277        u32 tmp;
 278        int i;
 279
 280        if (!ep || !(ep->in))
 281                return -EINVAL;
 282
 283        dev = ep->dev;
 284        ep->txfifo = dev->txfifo;
 285
 286        /* traverse ep's */
 287        for (i = 0; i < ep->num; i++) {
 288                if (dev->ep[i].regs) {
 289                        /* read fifo size */
 290                        tmp = readl(&dev->ep[i].regs->bufin_framenum);
 291                        tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
 292                        ep->txfifo += tmp;
 293                }
 294        }
 295        return 0;
 296}
 297
 298/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
 299static u32 cnak_pending;
 300
 301static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
 302{
 303        if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
 304                DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
 305                cnak_pending |= 1 << (num);
 306                ep->naking = 1;
 307        } else
 308                cnak_pending = cnak_pending & (~(1 << (num)));
 309}
 310
 311
 312/* Enables endpoint, is called by gadget driver */
 313static int
 314udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
 315{
 316        struct udc_ep           *ep;
 317        struct udc              *dev;
 318        u32                     tmp;
 319        unsigned long           iflags;
 320        u8 udc_csr_epix;
 321        unsigned                maxpacket;
 322
 323        if (!usbep
 324                        || usbep->name == ep0_string
 325                        || !desc
 326                        || desc->bDescriptorType != USB_DT_ENDPOINT)
 327                return -EINVAL;
 328
 329        ep = container_of(usbep, struct udc_ep, ep);
 330        dev = ep->dev;
 331
 332        DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
 333
 334        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 335                return -ESHUTDOWN;
 336
 337        spin_lock_irqsave(&dev->lock, iflags);
 338        ep->desc = desc;
 339
 340        ep->halted = 0;
 341
 342        /* set traffic type */
 343        tmp = readl(&dev->ep[ep->num].regs->ctl);
 344        tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
 345        writel(tmp, &dev->ep[ep->num].regs->ctl);
 346
 347        /* set max packet size */
 348        maxpacket = usb_endpoint_maxp(desc);
 349        tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
 350        tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
 351        ep->ep.maxpacket = maxpacket;
 352        writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
 353
 354        /* IN ep */
 355        if (ep->in) {
 356
 357                /* ep ix in UDC CSR register space */
 358                udc_csr_epix = ep->num;
 359
 360                /* set buffer size (tx fifo entries) */
 361                tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
 362                /* double buffering: fifo size = 2 x max packet size */
 363                tmp = AMD_ADDBITS(
 364                                tmp,
 365                                maxpacket * UDC_EPIN_BUFF_SIZE_MULT
 366                                          / UDC_DWORD_BYTES,
 367                                UDC_EPIN_BUFF_SIZE);
 368                writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
 369
 370                /* calc. tx fifo base addr */
 371                udc_set_txfifo_addr(ep);
 372
 373                /* flush fifo */
 374                tmp = readl(&ep->regs->ctl);
 375                tmp |= AMD_BIT(UDC_EPCTL_F);
 376                writel(tmp, &ep->regs->ctl);
 377
 378        /* OUT ep */
 379        } else {
 380                /* ep ix in UDC CSR register space */
 381                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 382
 383                /* set max packet size UDC CSR  */
 384                tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 385                tmp = AMD_ADDBITS(tmp, maxpacket,
 386                                        UDC_CSR_NE_MAX_PKT);
 387                writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 388
 389                if (use_dma && !ep->in) {
 390                        /* alloc and init BNA dummy request */
 391                        ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
 392                        ep->bna_occurred = 0;
 393                }
 394
 395                if (ep->num != UDC_EP0OUT_IX)
 396                        dev->data_ep_enabled = 1;
 397        }
 398
 399        /* set ep values */
 400        tmp = readl(&dev->csr->ne[udc_csr_epix]);
 401        /* max packet */
 402        tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
 403        /* ep number */
 404        tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
 405        /* ep direction */
 406        tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
 407        /* ep type */
 408        tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
 409        /* ep config */
 410        tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
 411        /* ep interface */
 412        tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
 413        /* ep alt */
 414        tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
 415        /* write reg */
 416        writel(tmp, &dev->csr->ne[udc_csr_epix]);
 417
 418        /* enable ep irq */
 419        tmp = readl(&dev->regs->ep_irqmsk);
 420        tmp &= AMD_UNMASK_BIT(ep->num);
 421        writel(tmp, &dev->regs->ep_irqmsk);
 422
 423        /*
 424         * clear NAK by writing CNAK
 425         * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
 426         */
 427        if (!use_dma || ep->in) {
 428                tmp = readl(&ep->regs->ctl);
 429                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 430                writel(tmp, &ep->regs->ctl);
 431                ep->naking = 0;
 432                UDC_QUEUE_CNAK(ep, ep->num);
 433        }
 434        tmp = desc->bEndpointAddress;
 435        DBG(dev, "%s enabled\n", usbep->name);
 436
 437        spin_unlock_irqrestore(&dev->lock, iflags);
 438        return 0;
 439}
 440
 441/* Resets endpoint */
 442static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
 443{
 444        u32             tmp;
 445
 446        VDBG(ep->dev, "ep-%d reset\n", ep->num);
 447        ep->desc = NULL;
 448        ep->ep.desc = NULL;
 449        ep->ep.ops = &udc_ep_ops;
 450        INIT_LIST_HEAD(&ep->queue);
 451
 452        ep->ep.maxpacket = (u16) ~0;
 453        /* set NAK */
 454        tmp = readl(&ep->regs->ctl);
 455        tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 456        writel(tmp, &ep->regs->ctl);
 457        ep->naking = 1;
 458
 459        /* disable interrupt */
 460        tmp = readl(&regs->ep_irqmsk);
 461        tmp |= AMD_BIT(ep->num);
 462        writel(tmp, &regs->ep_irqmsk);
 463
 464        if (ep->in) {
 465                /* unset P and IN bit of potential former DMA */
 466                tmp = readl(&ep->regs->ctl);
 467                tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
 468                writel(tmp, &ep->regs->ctl);
 469
 470                tmp = readl(&ep->regs->sts);
 471                tmp |= AMD_BIT(UDC_EPSTS_IN);
 472                writel(tmp, &ep->regs->sts);
 473
 474                /* flush the fifo */
 475                tmp = readl(&ep->regs->ctl);
 476                tmp |= AMD_BIT(UDC_EPCTL_F);
 477                writel(tmp, &ep->regs->ctl);
 478
 479        }
 480        /* reset desc pointer */
 481        writel(0, &ep->regs->desptr);
 482}
 483
 484/* Disables endpoint, is called by gadget driver */
 485static int udc_ep_disable(struct usb_ep *usbep)
 486{
 487        struct udc_ep   *ep = NULL;
 488        unsigned long   iflags;
 489
 490        if (!usbep)
 491                return -EINVAL;
 492
 493        ep = container_of(usbep, struct udc_ep, ep);
 494        if (usbep->name == ep0_string || !ep->desc)
 495                return -EINVAL;
 496
 497        DBG(ep->dev, "Disable ep-%d\n", ep->num);
 498
 499        spin_lock_irqsave(&ep->dev->lock, iflags);
 500        udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
 501        empty_req_queue(ep);
 502        ep_init(ep->dev->regs, ep);
 503        spin_unlock_irqrestore(&ep->dev->lock, iflags);
 504
 505        return 0;
 506}
 507
 508/* Allocates request packet, called by gadget driver */
 509static struct usb_request *
 510udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
 511{
 512        struct udc_request      *req;
 513        struct udc_data_dma     *dma_desc;
 514        struct udc_ep   *ep;
 515
 516        if (!usbep)
 517                return NULL;
 518
 519        ep = container_of(usbep, struct udc_ep, ep);
 520
 521        VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
 522        req = kzalloc(sizeof(struct udc_request), gfp);
 523        if (!req)
 524                return NULL;
 525
 526        req->req.dma = DMA_DONT_USE;
 527        INIT_LIST_HEAD(&req->queue);
 528
 529        if (ep->dma) {
 530                /* ep0 in requests are allocated from data pool here */
 531                dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
 532                                                &req->td_phys);
 533                if (!dma_desc) {
 534                        kfree(req);
 535                        return NULL;
 536                }
 537
 538                VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
 539                                "td_phys = %lx\n",
 540                                req, dma_desc,
 541                                (unsigned long)req->td_phys);
 542                /* prevent from using desc. - set HOST BUSY */
 543                dma_desc->status = AMD_ADDBITS(dma_desc->status,
 544                                                UDC_DMA_STP_STS_BS_HOST_BUSY,
 545                                                UDC_DMA_STP_STS_BS);
 546                dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
 547                req->td_data = dma_desc;
 548                req->td_data_last = NULL;
 549                req->chain_len = 1;
 550        }
 551
 552        return &req->req;
 553}
 554
 555/* Frees request packet, called by gadget driver */
 556static void
 557udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
 558{
 559        struct udc_ep   *ep;
 560        struct udc_request      *req;
 561
 562        if (!usbep || !usbreq)
 563                return;
 564
 565        ep = container_of(usbep, struct udc_ep, ep);
 566        req = container_of(usbreq, struct udc_request, req);
 567        VDBG(ep->dev, "free_req req=%p\n", req);
 568        BUG_ON(!list_empty(&req->queue));
 569        if (req->td_data) {
 570                VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
 571
 572                /* free dma chain if created */
 573                if (req->chain_len > 1) {
 574                        udc_free_dma_chain(ep->dev, req);
 575                }
 576
 577                pci_pool_free(ep->dev->data_requests, req->td_data,
 578                                                        req->td_phys);
 579        }
 580        kfree(req);
 581}
 582
 583/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
 584static void udc_init_bna_dummy(struct udc_request *req)
 585{
 586        if (req) {
 587                /* set last bit */
 588                req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 589                /* set next pointer to itself */
 590                req->td_data->next = req->td_phys;
 591                /* set HOST BUSY */
 592                req->td_data->status
 593                        = AMD_ADDBITS(req->td_data->status,
 594                                        UDC_DMA_STP_STS_BS_DMA_DONE,
 595                                        UDC_DMA_STP_STS_BS);
 596#ifdef UDC_VERBOSE
 597                pr_debug("bna desc = %p, sts = %08x\n",
 598                        req->td_data, req->td_data->status);
 599#endif
 600        }
 601}
 602
 603/* Allocate BNA dummy descriptor */
 604static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
 605{
 606        struct udc_request *req = NULL;
 607        struct usb_request *_req = NULL;
 608
 609        /* alloc the dummy request */
 610        _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
 611        if (_req) {
 612                req = container_of(_req, struct udc_request, req);
 613                ep->bna_dummy_req = req;
 614                udc_init_bna_dummy(req);
 615        }
 616        return req;
 617}
 618
 619/* Write data to TX fifo for IN packets */
 620static void
 621udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
 622{
 623        u8                      *req_buf;
 624        u32                     *buf;
 625        int                     i, j;
 626        unsigned                bytes = 0;
 627        unsigned                remaining = 0;
 628
 629        if (!req || !ep)
 630                return;
 631
 632        req_buf = req->buf + req->actual;
 633        prefetch(req_buf);
 634        remaining = req->length - req->actual;
 635
 636        buf = (u32 *) req_buf;
 637
 638        bytes = ep->ep.maxpacket;
 639        if (bytes > remaining)
 640                bytes = remaining;
 641
 642        /* dwords first */
 643        for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 644                writel(*(buf + i), ep->txfifo);
 645        }
 646
 647        /* remaining bytes must be written by byte access */
 648        for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 649                writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
 650                                                        ep->txfifo);
 651        }
 652
 653        /* dummy write confirm */
 654        writel(0, &ep->regs->confirm);
 655}
 656
 657/* Read dwords from RX fifo for OUT transfers */
 658static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
 659{
 660        int i;
 661
 662        VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
 663
 664        for (i = 0; i < dwords; i++) {
 665                *(buf + i) = readl(dev->rxfifo);
 666        }
 667        return 0;
 668}
 669
 670/* Read bytes from RX fifo for OUT transfers */
 671static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
 672{
 673        int i, j;
 674        u32 tmp;
 675
 676        VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
 677
 678        /* dwords first */
 679        for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 680                *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
 681        }
 682
 683        /* remaining bytes must be read by byte access */
 684        if (bytes % UDC_DWORD_BYTES) {
 685                tmp = readl(dev->rxfifo);
 686                for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 687                        *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
 688                        tmp = tmp >> UDC_BITS_PER_BYTE;
 689                }
 690        }
 691
 692        return 0;
 693}
 694
 695/* Read data from RX fifo for OUT transfers */
 696static int
 697udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
 698{
 699        u8 *buf;
 700        unsigned buf_space;
 701        unsigned bytes = 0;
 702        unsigned finished = 0;
 703
 704        /* received number bytes */
 705        bytes = readl(&ep->regs->sts);
 706        bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
 707
 708        buf_space = req->req.length - req->req.actual;
 709        buf = req->req.buf + req->req.actual;
 710        if (bytes > buf_space) {
 711                if ((buf_space % ep->ep.maxpacket) != 0) {
 712                        DBG(ep->dev,
 713                                "%s: rx %d bytes, rx-buf space = %d bytesn\n",
 714                                ep->ep.name, bytes, buf_space);
 715                        req->req.status = -EOVERFLOW;
 716                }
 717                bytes = buf_space;
 718        }
 719        req->req.actual += bytes;
 720
 721        /* last packet ? */
 722        if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
 723                || ((req->req.actual == req->req.length) && !req->req.zero))
 724                finished = 1;
 725
 726        /* read rx fifo bytes */
 727        VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
 728        udc_rxfifo_read_bytes(ep->dev, buf, bytes);
 729
 730        return finished;
 731}
 732
 733/* create/re-init a DMA descriptor or a DMA descriptor chain */
 734static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
 735{
 736        int     retval = 0;
 737        u32     tmp;
 738
 739        VDBG(ep->dev, "prep_dma\n");
 740        VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
 741                        ep->num, req->td_data);
 742
 743        /* set buffer pointer */
 744        req->td_data->bufptr = req->req.dma;
 745
 746        /* set last bit */
 747        req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 748
 749        /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
 750        if (use_dma_ppb) {
 751
 752                retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
 753                if (retval != 0) {
 754                        if (retval == -ENOMEM)
 755                                DBG(ep->dev, "Out of DMA memory\n");
 756                        return retval;
 757                }
 758                if (ep->in) {
 759                        if (req->req.length == ep->ep.maxpacket) {
 760                                /* write tx bytes */
 761                                req->td_data->status =
 762                                        AMD_ADDBITS(req->td_data->status,
 763                                                ep->ep.maxpacket,
 764                                                UDC_DMA_IN_STS_TXBYTES);
 765
 766                        }
 767                }
 768
 769        }
 770
 771        if (ep->in) {
 772                VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
 773                                "maxpacket=%d ep%d\n",
 774                                use_dma_ppb, req->req.length,
 775                                ep->ep.maxpacket, ep->num);
 776                /*
 777                 * if bytes < max packet then tx bytes must
 778                 * be written in packet per buffer mode
 779                 */
 780                if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
 781                                || ep->num == UDC_EP0OUT_IX
 782                                || ep->num == UDC_EP0IN_IX) {
 783                        /* write tx bytes */
 784                        req->td_data->status =
 785                                AMD_ADDBITS(req->td_data->status,
 786                                                req->req.length,
 787                                                UDC_DMA_IN_STS_TXBYTES);
 788                        /* reset frame num */
 789                        req->td_data->status =
 790                                AMD_ADDBITS(req->td_data->status,
 791                                                0,
 792                                                UDC_DMA_IN_STS_FRAMENUM);
 793                }
 794                /* set HOST BUSY */
 795                req->td_data->status =
 796                        AMD_ADDBITS(req->td_data->status,
 797                                UDC_DMA_STP_STS_BS_HOST_BUSY,
 798                                UDC_DMA_STP_STS_BS);
 799        } else {
 800                VDBG(ep->dev, "OUT set host ready\n");
 801                /* set HOST READY */
 802                req->td_data->status =
 803                        AMD_ADDBITS(req->td_data->status,
 804                                UDC_DMA_STP_STS_BS_HOST_READY,
 805                                UDC_DMA_STP_STS_BS);
 806
 807
 808                        /* clear NAK by writing CNAK */
 809                        if (ep->naking) {
 810                                tmp = readl(&ep->regs->ctl);
 811                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 812                                writel(tmp, &ep->regs->ctl);
 813                                ep->naking = 0;
 814                                UDC_QUEUE_CNAK(ep, ep->num);
 815                        }
 816
 817        }
 818
 819        return retval;
 820}
 821
 822/* Completes request packet ... caller MUST hold lock */
 823static void
 824complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
 825__releases(ep->dev->lock)
 826__acquires(ep->dev->lock)
 827{
 828        struct udc              *dev;
 829        unsigned                halted;
 830
 831        VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
 832
 833        dev = ep->dev;
 834        /* unmap DMA */
 835        if (req->dma_mapping) {
 836                if (ep->in)
 837                        pci_unmap_single(dev->pdev,
 838                                        req->req.dma,
 839                                        req->req.length,
 840                                        PCI_DMA_TODEVICE);
 841                else
 842                        pci_unmap_single(dev->pdev,
 843                                        req->req.dma,
 844                                        req->req.length,
 845                                        PCI_DMA_FROMDEVICE);
 846                req->dma_mapping = 0;
 847                req->req.dma = DMA_DONT_USE;
 848        }
 849
 850        halted = ep->halted;
 851        ep->halted = 1;
 852
 853        /* set new status if pending */
 854        if (req->req.status == -EINPROGRESS)
 855                req->req.status = sts;
 856
 857        /* remove from ep queue */
 858        list_del_init(&req->queue);
 859
 860        VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
 861                &req->req, req->req.length, ep->ep.name, sts);
 862
 863        spin_unlock(&dev->lock);
 864        req->req.complete(&ep->ep, &req->req);
 865        spin_lock(&dev->lock);
 866        ep->halted = halted;
 867}
 868
 869/* frees pci pool descriptors of a DMA chain */
 870static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
 871{
 872
 873        int ret_val = 0;
 874        struct udc_data_dma     *td;
 875        struct udc_data_dma     *td_last = NULL;
 876        unsigned int i;
 877
 878        DBG(dev, "free chain req = %p\n", req);
 879
 880        /* do not free first desc., will be done by free for request */
 881        td_last = req->td_data;
 882        td = phys_to_virt(td_last->next);
 883
 884        for (i = 1; i < req->chain_len; i++) {
 885
 886                pci_pool_free(dev->data_requests, td,
 887                                (dma_addr_t) td_last->next);
 888                td_last = td;
 889                td = phys_to_virt(td_last->next);
 890        }
 891
 892        return ret_val;
 893}
 894
 895/* Iterates to the end of a DMA chain and returns last descriptor */
 896static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
 897{
 898        struct udc_data_dma     *td;
 899
 900        td = req->td_data;
 901        while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 902                td = phys_to_virt(td->next);
 903        }
 904
 905        return td;
 906
 907}
 908
 909/* Iterates to the end of a DMA chain and counts bytes received */
 910static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
 911{
 912        struct udc_data_dma     *td;
 913        u32 count;
 914
 915        td = req->td_data;
 916        /* received number bytes */
 917        count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
 918
 919        while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 920                td = phys_to_virt(td->next);
 921                /* received number bytes */
 922                if (td) {
 923                        count += AMD_GETBITS(td->status,
 924                                UDC_DMA_OUT_STS_RXBYTES);
 925                }
 926        }
 927
 928        return count;
 929
 930}
 931
 932/* Creates or re-inits a DMA chain */
 933static int udc_create_dma_chain(
 934        struct udc_ep *ep,
 935        struct udc_request *req,
 936        unsigned long buf_len, gfp_t gfp_flags
 937)
 938{
 939        unsigned long bytes = req->req.length;
 940        unsigned int i;
 941        dma_addr_t dma_addr;
 942        struct udc_data_dma     *td = NULL;
 943        struct udc_data_dma     *last = NULL;
 944        unsigned long txbytes;
 945        unsigned create_new_chain = 0;
 946        unsigned len;
 947
 948        VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
 949                        bytes, buf_len);
 950        dma_addr = DMA_DONT_USE;
 951
 952        /* unset L bit in first desc for OUT */
 953        if (!ep->in) {
 954                req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
 955        }
 956
 957        /* alloc only new desc's if not already available */
 958        len = req->req.length / ep->ep.maxpacket;
 959        if (req->req.length % ep->ep.maxpacket) {
 960                len++;
 961        }
 962
 963        if (len > req->chain_len) {
 964                /* shorter chain already allocated before */
 965                if (req->chain_len > 1) {
 966                        udc_free_dma_chain(ep->dev, req);
 967                }
 968                req->chain_len = len;
 969                create_new_chain = 1;
 970        }
 971
 972        td = req->td_data;
 973        /* gen. required number of descriptors and buffers */
 974        for (i = buf_len; i < bytes; i += buf_len) {
 975                /* create or determine next desc. */
 976                if (create_new_chain) {
 977
 978                        td = pci_pool_alloc(ep->dev->data_requests,
 979                                        gfp_flags, &dma_addr);
 980                        if (!td)
 981                                return -ENOMEM;
 982
 983                        td->status = 0;
 984                } else if (i == buf_len) {
 985                        /* first td */
 986                        td = (struct udc_data_dma *) phys_to_virt(
 987                                                req->td_data->next);
 988                        td->status = 0;
 989                } else {
 990                        td = (struct udc_data_dma *) phys_to_virt(last->next);
 991                        td->status = 0;
 992                }
 993
 994
 995                if (td)
 996                        td->bufptr = req->req.dma + i; /* assign buffer */
 997                else
 998                        break;
 999
1000                /* short packet ? */
1001                if ((bytes - i) >= buf_len) {
1002                        txbytes = buf_len;
1003                } else {
1004                        /* short packet */
1005                        txbytes = bytes - i;
1006                }
1007
1008                /* link td and assign tx bytes */
1009                if (i == buf_len) {
1010                        if (create_new_chain) {
1011                                req->td_data->next = dma_addr;
1012                        } else {
1013                                /* req->td_data->next = virt_to_phys(td); */
1014                        }
1015                        /* write tx bytes */
1016                        if (ep->in) {
1017                                /* first desc */
1018                                req->td_data->status =
1019                                        AMD_ADDBITS(req->td_data->status,
1020                                                        ep->ep.maxpacket,
1021                                                        UDC_DMA_IN_STS_TXBYTES);
1022                                /* second desc */
1023                                td->status = AMD_ADDBITS(td->status,
1024                                                        txbytes,
1025                                                        UDC_DMA_IN_STS_TXBYTES);
1026                        }
1027                } else {
1028                        if (create_new_chain) {
1029                                last->next = dma_addr;
1030                        } else {
1031                                /* last->next = virt_to_phys(td); */
1032                        }
1033                        if (ep->in) {
1034                                /* write tx bytes */
1035                                td->status = AMD_ADDBITS(td->status,
1036                                                        txbytes,
1037                                                        UDC_DMA_IN_STS_TXBYTES);
1038                        }
1039                }
1040                last = td;
1041        }
1042        /* set last bit */
1043        if (td) {
1044                td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1045                /* last desc. points to itself */
1046                req->td_data_last = td;
1047        }
1048
1049        return 0;
1050}
1051
1052/* Enabling RX DMA */
1053static void udc_set_rde(struct udc *dev)
1054{
1055        u32 tmp;
1056
1057        VDBG(dev, "udc_set_rde()\n");
1058        /* stop RDE timer */
1059        if (timer_pending(&udc_timer)) {
1060                set_rde = 0;
1061                mod_timer(&udc_timer, jiffies - 1);
1062        }
1063        /* set RDE */
1064        tmp = readl(&dev->regs->ctl);
1065        tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1066        writel(tmp, &dev->regs->ctl);
1067}
1068
1069/* Queues a request packet, called by gadget driver */
1070static int
1071udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1072{
1073        int                     retval = 0;
1074        u8                      open_rxfifo = 0;
1075        unsigned long           iflags;
1076        struct udc_ep           *ep;
1077        struct udc_request      *req;
1078        struct udc              *dev;
1079        u32                     tmp;
1080
1081        /* check the inputs */
1082        req = container_of(usbreq, struct udc_request, req);
1083
1084        if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1085                        || !list_empty(&req->queue))
1086                return -EINVAL;
1087
1088        ep = container_of(usbep, struct udc_ep, ep);
1089        if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1090                return -EINVAL;
1091
1092        VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1093        dev = ep->dev;
1094
1095        if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1096                return -ESHUTDOWN;
1097
1098        /* map dma (usually done before) */
1099        if (ep->dma && usbreq->length != 0
1100                        && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
1101                VDBG(dev, "DMA map req %p\n", req);
1102                if (ep->in)
1103                        usbreq->dma = pci_map_single(dev->pdev,
1104                                                usbreq->buf,
1105                                                usbreq->length,
1106                                                PCI_DMA_TODEVICE);
1107                else
1108                        usbreq->dma = pci_map_single(dev->pdev,
1109                                                usbreq->buf,
1110                                                usbreq->length,
1111                                                PCI_DMA_FROMDEVICE);
1112                req->dma_mapping = 1;
1113        }
1114
1115        VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1116                        usbep->name, usbreq, usbreq->length,
1117                        req->td_data, usbreq->buf);
1118
1119        spin_lock_irqsave(&dev->lock, iflags);
1120        usbreq->actual = 0;
1121        usbreq->status = -EINPROGRESS;
1122        req->dma_done = 0;
1123
1124        /* on empty queue just do first transfer */
1125        if (list_empty(&ep->queue)) {
1126                /* zlp */
1127                if (usbreq->length == 0) {
1128                        /* IN zlp's are handled by hardware */
1129                        complete_req(ep, req, 0);
1130                        VDBG(dev, "%s: zlp\n", ep->ep.name);
1131                        /*
1132                         * if set_config or set_intf is waiting for ack by zlp
1133                         * then set CSR_DONE
1134                         */
1135                        if (dev->set_cfg_not_acked) {
1136                                tmp = readl(&dev->regs->ctl);
1137                                tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1138                                writel(tmp, &dev->regs->ctl);
1139                                dev->set_cfg_not_acked = 0;
1140                        }
1141                        /* setup command is ACK'ed now by zlp */
1142                        if (dev->waiting_zlp_ack_ep0in) {
1143                                /* clear NAK by writing CNAK in EP0_IN */
1144                                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1145                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1146                                writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1147                                dev->ep[UDC_EP0IN_IX].naking = 0;
1148                                UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1149                                                        UDC_EP0IN_IX);
1150                                dev->waiting_zlp_ack_ep0in = 0;
1151                        }
1152                        goto finished;
1153                }
1154                if (ep->dma) {
1155                        retval = prep_dma(ep, req, gfp);
1156                        if (retval != 0)
1157                                goto finished;
1158                        /* write desc pointer to enable DMA */
1159                        if (ep->in) {
1160                                /* set HOST READY */
1161                                req->td_data->status =
1162                                        AMD_ADDBITS(req->td_data->status,
1163                                                UDC_DMA_IN_STS_BS_HOST_READY,
1164                                                UDC_DMA_IN_STS_BS);
1165                        }
1166
1167                        /* disabled rx dma while descriptor update */
1168                        if (!ep->in) {
1169                                /* stop RDE timer */
1170                                if (timer_pending(&udc_timer)) {
1171                                        set_rde = 0;
1172                                        mod_timer(&udc_timer, jiffies - 1);
1173                                }
1174                                /* clear RDE */
1175                                tmp = readl(&dev->regs->ctl);
1176                                tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1177                                writel(tmp, &dev->regs->ctl);
1178                                open_rxfifo = 1;
1179
1180                                /*
1181                                 * if BNA occurred then let BNA dummy desc.
1182                                 * point to current desc.
1183                                 */
1184                                if (ep->bna_occurred) {
1185                                        VDBG(dev, "copy to BNA dummy desc.\n");
1186                                        memcpy(ep->bna_dummy_req->td_data,
1187                                                req->td_data,
1188                                                sizeof(struct udc_data_dma));
1189                                }
1190                        }
1191                        /* write desc pointer */
1192                        writel(req->td_phys, &ep->regs->desptr);
1193
1194                        /* clear NAK by writing CNAK */
1195                        if (ep->naking) {
1196                                tmp = readl(&ep->regs->ctl);
1197                                tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1198                                writel(tmp, &ep->regs->ctl);
1199                                ep->naking = 0;
1200                                UDC_QUEUE_CNAK(ep, ep->num);
1201                        }
1202
1203                        if (ep->in) {
1204                                /* enable ep irq */
1205                                tmp = readl(&dev->regs->ep_irqmsk);
1206                                tmp &= AMD_UNMASK_BIT(ep->num);
1207                                writel(tmp, &dev->regs->ep_irqmsk);
1208                        }
1209                } else if (ep->in) {
1210                                /* enable ep irq */
1211                                tmp = readl(&dev->regs->ep_irqmsk);
1212                                tmp &= AMD_UNMASK_BIT(ep->num);
1213                                writel(tmp, &dev->regs->ep_irqmsk);
1214                        }
1215
1216        } else if (ep->dma) {
1217
1218                /*
1219                 * prep_dma not used for OUT ep's, this is not possible
1220                 * for PPB modes, because of chain creation reasons
1221                 */
1222                if (ep->in) {
1223                        retval = prep_dma(ep, req, gfp);
1224                        if (retval != 0)
1225                                goto finished;
1226                }
1227        }
1228        VDBG(dev, "list_add\n");
1229        /* add request to ep queue */
1230        if (req) {
1231
1232                list_add_tail(&req->queue, &ep->queue);
1233
1234                /* open rxfifo if out data queued */
1235                if (open_rxfifo) {
1236                        /* enable DMA */
1237                        req->dma_going = 1;
1238                        udc_set_rde(dev);
1239                        if (ep->num != UDC_EP0OUT_IX)
1240                                dev->data_ep_queued = 1;
1241                }
1242                /* stop OUT naking */
1243                if (!ep->in) {
1244                        if (!use_dma && udc_rxfifo_pending) {
1245                                DBG(dev, "udc_queue(): pending bytes in "
1246                                        "rxfifo after nyet\n");
1247                                /*
1248                                 * read pending bytes afer nyet:
1249                                 * referring to isr
1250                                 */
1251                                if (udc_rxfifo_read(ep, req)) {
1252                                        /* finish */
1253                                        complete_req(ep, req, 0);
1254                                }
1255                                udc_rxfifo_pending = 0;
1256
1257                        }
1258                }
1259        }
1260
1261finished:
1262        spin_unlock_irqrestore(&dev->lock, iflags);
1263        return retval;
1264}
1265
1266/* Empty request queue of an endpoint; caller holds spinlock */
1267static void empty_req_queue(struct udc_ep *ep)
1268{
1269        struct udc_request      *req;
1270
1271        ep->halted = 1;
1272        while (!list_empty(&ep->queue)) {
1273                req = list_entry(ep->queue.next,
1274                        struct udc_request,
1275                        queue);
1276                complete_req(ep, req, -ESHUTDOWN);
1277        }
1278}
1279
1280/* Dequeues a request packet, called by gadget driver */
1281static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1282{
1283        struct udc_ep           *ep;
1284        struct udc_request      *req;
1285        unsigned                halted;
1286        unsigned long           iflags;
1287
1288        ep = container_of(usbep, struct udc_ep, ep);
1289        if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
1290                                && ep->num != UDC_EP0OUT_IX)))
1291                return -EINVAL;
1292
1293        req = container_of(usbreq, struct udc_request, req);
1294
1295        spin_lock_irqsave(&ep->dev->lock, iflags);
1296        halted = ep->halted;
1297        ep->halted = 1;
1298        /* request in processing or next one */
1299        if (ep->queue.next == &req->queue) {
1300                if (ep->dma && req->dma_going) {
1301                        if (ep->in)
1302                                ep->cancel_transfer = 1;
1303                        else {
1304                                u32 tmp;
1305                                u32 dma_sts;
1306                                /* stop potential receive DMA */
1307                                tmp = readl(&udc->regs->ctl);
1308                                writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1309                                                        &udc->regs->ctl);
1310                                /*
1311                                 * Cancel transfer later in ISR
1312                                 * if descriptor was touched.
1313                                 */
1314                                dma_sts = AMD_GETBITS(req->td_data->status,
1315                                                        UDC_DMA_OUT_STS_BS);
1316                                if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1317                                        ep->cancel_transfer = 1;
1318                                else {
1319                                        udc_init_bna_dummy(ep->req);
1320                                        writel(ep->bna_dummy_req->td_phys,
1321                                                &ep->regs->desptr);
1322                                }
1323                                writel(tmp, &udc->regs->ctl);
1324                        }
1325                }
1326        }
1327        complete_req(ep, req, -ECONNRESET);
1328        ep->halted = halted;
1329
1330        spin_unlock_irqrestore(&ep->dev->lock, iflags);
1331        return 0;
1332}
1333
1334/* Halt or clear halt of endpoint */
1335static int
1336udc_set_halt(struct usb_ep *usbep, int halt)
1337{
1338        struct udc_ep   *ep;
1339        u32 tmp;
1340        unsigned long iflags;
1341        int retval = 0;
1342
1343        if (!usbep)
1344                return -EINVAL;
1345
1346        pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1347
1348        ep = container_of(usbep, struct udc_ep, ep);
1349        if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1350                return -EINVAL;
1351        if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1352                return -ESHUTDOWN;
1353
1354        spin_lock_irqsave(&udc_stall_spinlock, iflags);
1355        /* halt or clear halt */
1356        if (halt) {
1357                if (ep->num == 0)
1358                        ep->dev->stall_ep0in = 1;
1359                else {
1360                        /*
1361                         * set STALL
1362                         * rxfifo empty not taken into acount
1363                         */
1364                        tmp = readl(&ep->regs->ctl);
1365                        tmp |= AMD_BIT(UDC_EPCTL_S);
1366                        writel(tmp, &ep->regs->ctl);
1367                        ep->halted = 1;
1368
1369                        /* setup poll timer */
1370                        if (!timer_pending(&udc_pollstall_timer)) {
1371                                udc_pollstall_timer.expires = jiffies +
1372                                        HZ * UDC_POLLSTALL_TIMER_USECONDS
1373                                        / (1000 * 1000);
1374                                if (!stop_pollstall_timer) {
1375                                        DBG(ep->dev, "start polltimer\n");
1376                                        add_timer(&udc_pollstall_timer);
1377                                }
1378                        }
1379                }
1380        } else {
1381                /* ep is halted by set_halt() before */
1382                if (ep->halted) {
1383                        tmp = readl(&ep->regs->ctl);
1384                        /* clear stall bit */
1385                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1386                        /* clear NAK by writing CNAK */
1387                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1388                        writel(tmp, &ep->regs->ctl);
1389                        ep->halted = 0;
1390                        UDC_QUEUE_CNAK(ep, ep->num);
1391                }
1392        }
1393        spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1394        return retval;
1395}
1396
1397/* gadget interface */
1398static const struct usb_ep_ops udc_ep_ops = {
1399        .enable         = udc_ep_enable,
1400        .disable        = udc_ep_disable,
1401
1402        .alloc_request  = udc_alloc_request,
1403        .free_request   = udc_free_request,
1404
1405        .queue          = udc_queue,
1406        .dequeue        = udc_dequeue,
1407
1408        .set_halt       = udc_set_halt,
1409        /* fifo ops not implemented */
1410};
1411
1412/*-------------------------------------------------------------------------*/
1413
1414/* Get frame counter (not implemented) */
1415static int udc_get_frame(struct usb_gadget *gadget)
1416{
1417        return -EOPNOTSUPP;
1418}
1419
1420/* Remote wakeup gadget interface */
1421static int udc_wakeup(struct usb_gadget *gadget)
1422{
1423        struct udc              *dev;
1424
1425        if (!gadget)
1426                return -EINVAL;
1427        dev = container_of(gadget, struct udc, gadget);
1428        udc_remote_wakeup(dev);
1429
1430        return 0;
1431}
1432
1433static int amd5536_start(struct usb_gadget_driver *driver,
1434                int (*bind)(struct usb_gadget *));
1435static int amd5536_stop(struct usb_gadget_driver *driver);
1436/* gadget operations */
1437static const struct usb_gadget_ops udc_ops = {
1438        .wakeup         = udc_wakeup,
1439        .get_frame      = udc_get_frame,
1440        .start          = amd5536_start,
1441        .stop           = amd5536_stop,
1442};
1443
1444/* Setups endpoint parameters, adds endpoints to linked list */
1445static void make_ep_lists(struct udc *dev)
1446{
1447        /* make gadget ep lists */
1448        INIT_LIST_HEAD(&dev->gadget.ep_list);
1449        list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1450                                                &dev->gadget.ep_list);
1451        list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1452                                                &dev->gadget.ep_list);
1453        list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1454                                                &dev->gadget.ep_list);
1455
1456        /* fifo config */
1457        dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1458        if (dev->gadget.speed == USB_SPEED_FULL)
1459                dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1460        else if (dev->gadget.speed == USB_SPEED_HIGH)
1461                dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1462        dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1463}
1464
1465/* init registers at driver load time */
1466static int startup_registers(struct udc *dev)
1467{
1468        u32 tmp;
1469
1470        /* init controller by soft reset */
1471        udc_soft_reset(dev);
1472
1473        /* mask not needed interrupts */
1474        udc_mask_unused_interrupts(dev);
1475
1476        /* put into initial config */
1477        udc_basic_init(dev);
1478        /* link up all endpoints */
1479        udc_setup_endpoints(dev);
1480
1481        /* program speed */
1482        tmp = readl(&dev->regs->cfg);
1483        if (use_fullspeed) {
1484                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1485        } else {
1486                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1487        }
1488        writel(tmp, &dev->regs->cfg);
1489
1490        return 0;
1491}
1492
1493/* Inits UDC context */
1494static void udc_basic_init(struct udc *dev)
1495{
1496        u32     tmp;
1497
1498        DBG(dev, "udc_basic_init()\n");
1499
1500        dev->gadget.speed = USB_SPEED_UNKNOWN;
1501
1502        /* stop RDE timer */
1503        if (timer_pending(&udc_timer)) {
1504                set_rde = 0;
1505                mod_timer(&udc_timer, jiffies - 1);
1506        }
1507        /* stop poll stall timer */
1508        if (timer_pending(&udc_pollstall_timer)) {
1509                mod_timer(&udc_pollstall_timer, jiffies - 1);
1510        }
1511        /* disable DMA */
1512        tmp = readl(&dev->regs->ctl);
1513        tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1514        tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1515        writel(tmp, &dev->regs->ctl);
1516
1517        /* enable dynamic CSR programming */
1518        tmp = readl(&dev->regs->cfg);
1519        tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1520        /* set self powered */
1521        tmp |= AMD_BIT(UDC_DEVCFG_SP);
1522        /* set remote wakeupable */
1523        tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1524        writel(tmp, &dev->regs->cfg);
1525
1526        make_ep_lists(dev);
1527
1528        dev->data_ep_enabled = 0;
1529        dev->data_ep_queued = 0;
1530}
1531
1532/* Sets initial endpoint parameters */
1533static void udc_setup_endpoints(struct udc *dev)
1534{
1535        struct udc_ep   *ep;
1536        u32     tmp;
1537        u32     reg;
1538
1539        DBG(dev, "udc_setup_endpoints()\n");
1540
1541        /* read enum speed */
1542        tmp = readl(&dev->regs->sts);
1543        tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1544        if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
1545                dev->gadget.speed = USB_SPEED_HIGH;
1546        } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
1547                dev->gadget.speed = USB_SPEED_FULL;
1548        }
1549
1550        /* set basic ep parameters */
1551        for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1552                ep = &dev->ep[tmp];
1553                ep->dev = dev;
1554                ep->ep.name = ep_string[tmp];
1555                ep->num = tmp;
1556                /* txfifo size is calculated at enable time */
1557                ep->txfifo = dev->txfifo;
1558
1559                /* fifo size */
1560                if (tmp < UDC_EPIN_NUM) {
1561                        ep->fifo_depth = UDC_TXFIFO_SIZE;
1562                        ep->in = 1;
1563                } else {
1564                        ep->fifo_depth = UDC_RXFIFO_SIZE;
1565                        ep->in = 0;
1566
1567                }
1568                ep->regs = &dev->ep_regs[tmp];
1569                /*
1570                 * ep will be reset only if ep was not enabled before to avoid
1571                 * disabling ep interrupts when ENUM interrupt occurs but ep is
1572                 * not enabled by gadget driver
1573                 */
1574                if (!ep->desc) {
1575                        ep_init(dev->regs, ep);
1576                }
1577
1578                if (use_dma) {
1579                        /*
1580                         * ep->dma is not really used, just to indicate that
1581                         * DMA is active: remove this
1582                         * dma regs = dev control regs
1583                         */
1584                        ep->dma = &dev->regs->ctl;
1585
1586                        /* nak OUT endpoints until enable - not for ep0 */
1587                        if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1588                                                && tmp > UDC_EPIN_NUM) {
1589                                /* set NAK */
1590                                reg = readl(&dev->ep[tmp].regs->ctl);
1591                                reg |= AMD_BIT(UDC_EPCTL_SNAK);
1592                                writel(reg, &dev->ep[tmp].regs->ctl);
1593                                dev->ep[tmp].naking = 1;
1594
1595                        }
1596                }
1597        }
1598        /* EP0 max packet */
1599        if (dev->gadget.speed == USB_SPEED_FULL) {
1600                dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
1601                dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
1602                                                UDC_FS_EP0OUT_MAX_PKT_SIZE;
1603        } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1604                dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
1605                dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
1606        }
1607
1608        /*
1609         * with suspend bug workaround, ep0 params for gadget driver
1610         * are set at gadget driver bind() call
1611         */
1612        dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1613        dev->ep[UDC_EP0IN_IX].halted = 0;
1614        INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1615
1616        /* init cfg/alt/int */
1617        dev->cur_config = 0;
1618        dev->cur_intf = 0;
1619        dev->cur_alt = 0;
1620}
1621
1622/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1623static void usb_connect(struct udc *dev)
1624{
1625
1626        dev_info(&dev->pdev->dev, "USB Connect\n");
1627
1628        dev->connected = 1;
1629
1630        /* put into initial config */
1631        udc_basic_init(dev);
1632
1633        /* enable device setup interrupts */
1634        udc_enable_dev_setup_interrupts(dev);
1635}
1636
1637/*
1638 * Calls gadget with disconnect event and resets the UDC and makes
1639 * initial bringup to be ready for ep0 events
1640 */
1641static void usb_disconnect(struct udc *dev)
1642{
1643
1644        dev_info(&dev->pdev->dev, "USB Disconnect\n");
1645
1646        dev->connected = 0;
1647
1648        /* mask interrupts */
1649        udc_mask_unused_interrupts(dev);
1650
1651        /* REVISIT there doesn't seem to be a point to having this
1652         * talk to a tasklet ... do it directly, we already hold
1653         * the spinlock needed to process the disconnect.
1654         */
1655
1656        tasklet_schedule(&disconnect_tasklet);
1657}
1658
1659/* Tasklet for disconnect to be outside of interrupt context */
1660static void udc_tasklet_disconnect(unsigned long par)
1661{
1662        struct udc *dev = (struct udc *)(*((struct udc **) par));
1663        u32 tmp;
1664
1665        DBG(dev, "Tasklet disconnect\n");
1666        spin_lock_irq(&dev->lock);
1667
1668        if (dev->driver) {
1669                spin_unlock(&dev->lock);
1670                dev->driver->disconnect(&dev->gadget);
1671                spin_lock(&dev->lock);
1672
1673                /* empty queues */
1674                for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1675                        empty_req_queue(&dev->ep[tmp]);
1676                }
1677
1678        }
1679
1680        /* disable ep0 */
1681        ep_init(dev->regs,
1682                        &dev->ep[UDC_EP0IN_IX]);
1683
1684
1685        if (!soft_reset_occured) {
1686                /* init controller by soft reset */
1687                udc_soft_reset(dev);
1688                soft_reset_occured++;
1689        }
1690
1691        /* re-enable dev interrupts */
1692        udc_enable_dev_setup_interrupts(dev);
1693        /* back to full speed ? */
1694        if (use_fullspeed) {
1695                tmp = readl(&dev->regs->cfg);
1696                tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1697                writel(tmp, &dev->regs->cfg);
1698        }
1699
1700        spin_unlock_irq(&dev->lock);
1701}
1702
1703/* Reset the UDC core */
1704static void udc_soft_reset(struct udc *dev)
1705{
1706        unsigned long   flags;
1707
1708        DBG(dev, "Soft reset\n");
1709        /*
1710         * reset possible waiting interrupts, because int.
1711         * status is lost after soft reset,
1712         * ep int. status reset
1713         */
1714        writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1715        /* device int. status reset */
1716        writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1717
1718        spin_lock_irqsave(&udc_irq_spinlock, flags);
1719        writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1720        readl(&dev->regs->cfg);
1721        spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1722
1723}
1724
1725/* RDE timer callback to set RDE bit */
1726static void udc_timer_function(unsigned long v)
1727{
1728        u32 tmp;
1729
1730        spin_lock_irq(&udc_irq_spinlock);
1731
1732        if (set_rde > 0) {
1733                /*
1734                 * open the fifo if fifo was filled on last timer call
1735                 * conditionally
1736                 */
1737                if (set_rde > 1) {
1738                        /* set RDE to receive setup data */
1739                        tmp = readl(&udc->regs->ctl);
1740                        tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1741                        writel(tmp, &udc->regs->ctl);
1742                        set_rde = -1;
1743                } else if (readl(&udc->regs->sts)
1744                                & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1745                        /*
1746                         * if fifo empty setup polling, do not just
1747                         * open the fifo
1748                         */
1749                        udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1750                        if (!stop_timer) {
1751                                add_timer(&udc_timer);
1752                        }
1753                } else {
1754                        /*
1755                         * fifo contains data now, setup timer for opening
1756                         * the fifo when timer expires to be able to receive
1757                         * setup packets, when data packets gets queued by
1758                         * gadget layer then timer will forced to expire with
1759                         * set_rde=0 (RDE is set in udc_queue())
1760                         */
1761                        set_rde++;
1762                        /* debug: lhadmot_timer_start = 221070 */
1763                        udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1764                        if (!stop_timer) {
1765                                add_timer(&udc_timer);
1766                        }
1767                }
1768
1769        } else
1770                set_rde = -1; /* RDE was set by udc_queue() */
1771        spin_unlock_irq(&udc_irq_spinlock);
1772        if (stop_timer)
1773                complete(&on_exit);
1774
1775}
1776
1777/* Handle halt state, used in stall poll timer */
1778static void udc_handle_halt_state(struct udc_ep *ep)
1779{
1780        u32 tmp;
1781        /* set stall as long not halted */
1782        if (ep->halted == 1) {
1783                tmp = readl(&ep->regs->ctl);
1784                /* STALL cleared ? */
1785                if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1786                        /*
1787                         * FIXME: MSC spec requires that stall remains
1788                         * even on receivng of CLEAR_FEATURE HALT. So
1789                         * we would set STALL again here to be compliant.
1790                         * But with current mass storage drivers this does
1791                         * not work (would produce endless host retries).
1792                         * So we clear halt on CLEAR_FEATURE.
1793                         *
1794                        DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1795                        tmp |= AMD_BIT(UDC_EPCTL_S);
1796                        writel(tmp, &ep->regs->ctl);*/
1797
1798                        /* clear NAK by writing CNAK */
1799                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1800                        writel(tmp, &ep->regs->ctl);
1801                        ep->halted = 0;
1802                        UDC_QUEUE_CNAK(ep, ep->num);
1803                }
1804        }
1805}
1806
1807/* Stall timer callback to poll S bit and set it again after */
1808static void udc_pollstall_timer_function(unsigned long v)
1809{
1810        struct udc_ep *ep;
1811        int halted = 0;
1812
1813        spin_lock_irq(&udc_stall_spinlock);
1814        /*
1815         * only one IN and OUT endpoints are handled
1816         * IN poll stall
1817         */
1818        ep = &udc->ep[UDC_EPIN_IX];
1819        udc_handle_halt_state(ep);
1820        if (ep->halted)
1821                halted = 1;
1822        /* OUT poll stall */
1823        ep = &udc->ep[UDC_EPOUT_IX];
1824        udc_handle_halt_state(ep);
1825        if (ep->halted)
1826                halted = 1;
1827
1828        /* setup timer again when still halted */
1829        if (!stop_pollstall_timer && halted) {
1830                udc_pollstall_timer.expires = jiffies +
1831                                        HZ * UDC_POLLSTALL_TIMER_USECONDS
1832                                        / (1000 * 1000);
1833                add_timer(&udc_pollstall_timer);
1834        }
1835        spin_unlock_irq(&udc_stall_spinlock);
1836
1837        if (stop_pollstall_timer)
1838                complete(&on_pollstall_exit);
1839}
1840
1841/* Inits endpoint 0 so that SETUP packets are processed */
1842static void activate_control_endpoints(struct udc *dev)
1843{
1844        u32 tmp;
1845
1846        DBG(dev, "activate_control_endpoints\n");
1847
1848        /* flush fifo */
1849        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1850        tmp |= AMD_BIT(UDC_EPCTL_F);
1851        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1852
1853        /* set ep0 directions */
1854        dev->ep[UDC_EP0IN_IX].in = 1;
1855        dev->ep[UDC_EP0OUT_IX].in = 0;
1856
1857        /* set buffer size (tx fifo entries) of EP0_IN */
1858        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1859        if (dev->gadget.speed == USB_SPEED_FULL)
1860                tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1861                                        UDC_EPIN_BUFF_SIZE);
1862        else if (dev->gadget.speed == USB_SPEED_HIGH)
1863                tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1864                                        UDC_EPIN_BUFF_SIZE);
1865        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1866
1867        /* set max packet size of EP0_IN */
1868        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1869        if (dev->gadget.speed == USB_SPEED_FULL)
1870                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1871                                        UDC_EP_MAX_PKT_SIZE);
1872        else if (dev->gadget.speed == USB_SPEED_HIGH)
1873                tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1874                                UDC_EP_MAX_PKT_SIZE);
1875        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1876
1877        /* set max packet size of EP0_OUT */
1878        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1879        if (dev->gadget.speed == USB_SPEED_FULL)
1880                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1881                                        UDC_EP_MAX_PKT_SIZE);
1882        else if (dev->gadget.speed == USB_SPEED_HIGH)
1883                tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1884                                        UDC_EP_MAX_PKT_SIZE);
1885        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1886
1887        /* set max packet size of EP0 in UDC CSR */
1888        tmp = readl(&dev->csr->ne[0]);
1889        if (dev->gadget.speed == USB_SPEED_FULL)
1890                tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1891                                        UDC_CSR_NE_MAX_PKT);
1892        else if (dev->gadget.speed == USB_SPEED_HIGH)
1893                tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1894                                        UDC_CSR_NE_MAX_PKT);
1895        writel(tmp, &dev->csr->ne[0]);
1896
1897        if (use_dma) {
1898                dev->ep[UDC_EP0OUT_IX].td->status |=
1899                        AMD_BIT(UDC_DMA_OUT_STS_L);
1900                /* write dma desc address */
1901                writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1902                        &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1903                writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1904                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1905                /* stop RDE timer */
1906                if (timer_pending(&udc_timer)) {
1907                        set_rde = 0;
1908                        mod_timer(&udc_timer, jiffies - 1);
1909                }
1910                /* stop pollstall timer */
1911                if (timer_pending(&udc_pollstall_timer)) {
1912                        mod_timer(&udc_pollstall_timer, jiffies - 1);
1913                }
1914                /* enable DMA */
1915                tmp = readl(&dev->regs->ctl);
1916                tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1917                                | AMD_BIT(UDC_DEVCTL_RDE)
1918                                | AMD_BIT(UDC_DEVCTL_TDE);
1919                if (use_dma_bufferfill_mode) {
1920                        tmp |= AMD_BIT(UDC_DEVCTL_BF);
1921                } else if (use_dma_ppb_du) {
1922                        tmp |= AMD_BIT(UDC_DEVCTL_DU);
1923                }
1924                writel(tmp, &dev->regs->ctl);
1925        }
1926
1927        /* clear NAK by writing CNAK for EP0IN */
1928        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1929        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1930        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1931        dev->ep[UDC_EP0IN_IX].naking = 0;
1932        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1933
1934        /* clear NAK by writing CNAK for EP0OUT */
1935        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1936        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1937        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1938        dev->ep[UDC_EP0OUT_IX].naking = 0;
1939        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1940}
1941
1942/* Make endpoint 0 ready for control traffic */
1943static int setup_ep0(struct udc *dev)
1944{
1945        activate_control_endpoints(dev);
1946        /* enable ep0 interrupts */
1947        udc_enable_ep0_interrupts(dev);
1948        /* enable device setup interrupts */
1949        udc_enable_dev_setup_interrupts(dev);
1950
1951        return 0;
1952}
1953
1954/* Called by gadget driver to register itself */
1955static int amd5536_start(struct usb_gadget_driver *driver,
1956                int (*bind)(struct usb_gadget *))
1957{
1958        struct udc              *dev = udc;
1959        int                     retval;
1960        u32 tmp;
1961
1962        if (!driver || !bind || !driver->setup
1963                        || driver->max_speed < USB_SPEED_HIGH)
1964                return -EINVAL;
1965        if (!dev)
1966                return -ENODEV;
1967        if (dev->driver)
1968                return -EBUSY;
1969
1970        driver->driver.bus = NULL;
1971        dev->driver = driver;
1972        dev->gadget.dev.driver = &driver->driver;
1973
1974        retval = bind(&dev->gadget);
1975
1976        /* Some gadget drivers use both ep0 directions.
1977         * NOTE: to gadget driver, ep0 is just one endpoint...
1978         */
1979        dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1980                dev->ep[UDC_EP0IN_IX].ep.driver_data;
1981
1982        if (retval) {
1983                DBG(dev, "binding to %s returning %d\n",
1984                                driver->driver.name, retval);
1985                dev->driver = NULL;
1986                dev->gadget.dev.driver = NULL;
1987                return retval;
1988        }
1989
1990        /* get ready for ep0 traffic */
1991        setup_ep0(dev);
1992
1993        /* clear SD */
1994        tmp = readl(&dev->regs->ctl);
1995        tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1996        writel(tmp, &dev->regs->ctl);
1997
1998        usb_connect(dev);
1999
2000        return 0;
2001}
2002
2003/* shutdown requests and disconnect from gadget */
2004static void
2005shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2006__releases(dev->lock)
2007__acquires(dev->lock)
2008{
2009        int tmp;
2010
2011        if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2012                spin_unlock(&dev->lock);
2013                driver->disconnect(&dev->gadget);
2014                spin_lock(&dev->lock);
2015        }
2016
2017        /* empty queues and init hardware */
2018        udc_basic_init(dev);
2019        for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2020                empty_req_queue(&dev->ep[tmp]);
2021
2022        udc_setup_endpoints(dev);
2023}
2024
2025/* Called by gadget driver to unregister itself */
2026static int amd5536_stop(struct usb_gadget_driver *driver)
2027{
2028        struct udc      *dev = udc;
2029        unsigned long   flags;
2030        u32 tmp;
2031
2032        if (!dev)
2033                return -ENODEV;
2034        if (!driver || driver != dev->driver || !driver->unbind)
2035                return -EINVAL;
2036
2037        spin_lock_irqsave(&dev->lock, flags);
2038        udc_mask_unused_interrupts(dev);
2039        shutdown(dev, driver);
2040        spin_unlock_irqrestore(&dev->lock, flags);
2041
2042        driver->unbind(&dev->gadget);
2043        dev->gadget.dev.driver = NULL;
2044        dev->driver = NULL;
2045
2046        /* set SD */
2047        tmp = readl(&dev->regs->ctl);
2048        tmp |= AMD_BIT(UDC_DEVCTL_SD);
2049        writel(tmp, &dev->regs->ctl);
2050
2051
2052        DBG(dev, "%s: unregistered\n", driver->driver.name);
2053
2054        return 0;
2055}
2056
2057/* Clear pending NAK bits */
2058static void udc_process_cnak_queue(struct udc *dev)
2059{
2060        u32 tmp;
2061        u32 reg;
2062
2063        /* check epin's */
2064        DBG(dev, "CNAK pending queue processing\n");
2065        for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2066                if (cnak_pending & (1 << tmp)) {
2067                        DBG(dev, "CNAK pending for ep%d\n", tmp);
2068                        /* clear NAK by writing CNAK */
2069                        reg = readl(&dev->ep[tmp].regs->ctl);
2070                        reg |= AMD_BIT(UDC_EPCTL_CNAK);
2071                        writel(reg, &dev->ep[tmp].regs->ctl);
2072                        dev->ep[tmp].naking = 0;
2073                        UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2074                }
2075        }
2076        /* ...  and ep0out */
2077        if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2078                DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2079                /* clear NAK by writing CNAK */
2080                reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2081                reg |= AMD_BIT(UDC_EPCTL_CNAK);
2082                writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2083                dev->ep[UDC_EP0OUT_IX].naking = 0;
2084                UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2085                                dev->ep[UDC_EP0OUT_IX].num);
2086        }
2087}
2088
2089/* Enabling RX DMA after setup packet */
2090static void udc_ep0_set_rde(struct udc *dev)
2091{
2092        if (use_dma) {
2093                /*
2094                 * only enable RXDMA when no data endpoint enabled
2095                 * or data is queued
2096                 */
2097                if (!dev->data_ep_enabled || dev->data_ep_queued) {
2098                        udc_set_rde(dev);
2099                } else {
2100                        /*
2101                         * setup timer for enabling RDE (to not enable
2102                         * RXFIFO DMA for data endpoints to early)
2103                         */
2104                        if (set_rde != 0 && !timer_pending(&udc_timer)) {
2105                                udc_timer.expires =
2106                                        jiffies + HZ/UDC_RDE_TIMER_DIV;
2107                                set_rde = 1;
2108                                if (!stop_timer) {
2109                                        add_timer(&udc_timer);
2110                                }
2111                        }
2112                }
2113        }
2114}
2115
2116
2117/* Interrupt handler for data OUT traffic */
2118static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2119{
2120        irqreturn_t             ret_val = IRQ_NONE;
2121        u32                     tmp;
2122        struct udc_ep           *ep;
2123        struct udc_request      *req;
2124        unsigned int            count;
2125        struct udc_data_dma     *td = NULL;
2126        unsigned                dma_done;
2127
2128        VDBG(dev, "ep%d irq\n", ep_ix);
2129        ep = &dev->ep[ep_ix];
2130
2131        tmp = readl(&ep->regs->sts);
2132        if (use_dma) {
2133                /* BNA event ? */
2134                if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2135                        DBG(dev, "BNA ep%dout occurred - DESPTR = %x \n",
2136                                        ep->num, readl(&ep->regs->desptr));
2137                        /* clear BNA */
2138                        writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2139                        if (!ep->cancel_transfer)
2140                                ep->bna_occurred = 1;
2141                        else
2142                                ep->cancel_transfer = 0;
2143                        ret_val = IRQ_HANDLED;
2144                        goto finished;
2145                }
2146        }
2147        /* HE event ? */
2148        if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2149                dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
2150
2151                /* clear HE */
2152                writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2153                ret_val = IRQ_HANDLED;
2154                goto finished;
2155        }
2156
2157        if (!list_empty(&ep->queue)) {
2158
2159                /* next request */
2160                req = list_entry(ep->queue.next,
2161                        struct udc_request, queue);
2162        } else {
2163                req = NULL;
2164                udc_rxfifo_pending = 1;
2165        }
2166        VDBG(dev, "req = %p\n", req);
2167        /* fifo mode */
2168        if (!use_dma) {
2169
2170                /* read fifo */
2171                if (req && udc_rxfifo_read(ep, req)) {
2172                        ret_val = IRQ_HANDLED;
2173
2174                        /* finish */
2175                        complete_req(ep, req, 0);
2176                        /* next request */
2177                        if (!list_empty(&ep->queue) && !ep->halted) {
2178                                req = list_entry(ep->queue.next,
2179                                        struct udc_request, queue);
2180                        } else
2181                                req = NULL;
2182                }
2183
2184        /* DMA */
2185        } else if (!ep->cancel_transfer && req != NULL) {
2186                ret_val = IRQ_HANDLED;
2187
2188                /* check for DMA done */
2189                if (!use_dma_ppb) {
2190                        dma_done = AMD_GETBITS(req->td_data->status,
2191                                                UDC_DMA_OUT_STS_BS);
2192                /* packet per buffer mode - rx bytes */
2193                } else {
2194                        /*
2195                         * if BNA occurred then recover desc. from
2196                         * BNA dummy desc.
2197                         */
2198                        if (ep->bna_occurred) {
2199                                VDBG(dev, "Recover desc. from BNA dummy\n");
2200                                memcpy(req->td_data, ep->bna_dummy_req->td_data,
2201                                                sizeof(struct udc_data_dma));
2202                                ep->bna_occurred = 0;
2203                                udc_init_bna_dummy(ep->req);
2204                        }
2205                        td = udc_get_last_dma_desc(req);
2206                        dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2207                }
2208                if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2209                        /* buffer fill mode - rx bytes */
2210                        if (!use_dma_ppb) {
2211                                /* received number bytes */
2212                                count = AMD_GETBITS(req->td_data->status,
2213                                                UDC_DMA_OUT_STS_RXBYTES);
2214                                VDBG(dev, "rx bytes=%u\n", count);
2215                        /* packet per buffer mode - rx bytes */
2216                        } else {
2217                                VDBG(dev, "req->td_data=%p\n", req->td_data);
2218                                VDBG(dev, "last desc = %p\n", td);
2219                                /* received number bytes */
2220                                if (use_dma_ppb_du) {
2221                                        /* every desc. counts bytes */
2222                                        count = udc_get_ppbdu_rxbytes(req);
2223                                } else {
2224                                        /* last desc. counts bytes */
2225                                        count = AMD_GETBITS(td->status,
2226                                                UDC_DMA_OUT_STS_RXBYTES);
2227                                        if (!count && req->req.length
2228                                                == UDC_DMA_MAXPACKET) {
2229                                                /*
2230                                                 * on 64k packets the RXBYTES
2231                                                 * field is zero
2232                                                 */
2233                                                count = UDC_DMA_MAXPACKET;
2234                                        }
2235                                }
2236                                VDBG(dev, "last desc rx bytes=%u\n", count);
2237                        }
2238
2239                        tmp = req->req.length - req->req.actual;
2240                        if (count > tmp) {
2241                                if ((tmp % ep->ep.maxpacket) != 0) {
2242                                        DBG(dev, "%s: rx %db, space=%db\n",
2243                                                ep->ep.name, count, tmp);
2244                                        req->req.status = -EOVERFLOW;
2245                                }
2246                                count = tmp;
2247                        }
2248                        req->req.actual += count;
2249                        req->dma_going = 0;
2250                        /* complete request */
2251                        complete_req(ep, req, 0);
2252
2253                        /* next request */
2254                        if (!list_empty(&ep->queue) && !ep->halted) {
2255                                req = list_entry(ep->queue.next,
2256                                        struct udc_request,
2257                                        queue);
2258                                /*
2259                                 * DMA may be already started by udc_queue()
2260                                 * called by gadget drivers completion
2261                                 * routine. This happens when queue
2262                                 * holds one request only.
2263                                 */
2264                                if (req->dma_going == 0) {
2265                                        /* next dma */
2266                                        if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2267                                                goto finished;
2268                                        /* write desc pointer */
2269                                        writel(req->td_phys,
2270                                                &ep->regs->desptr);
2271                                        req->dma_going = 1;
2272                                        /* enable DMA */
2273                                        udc_set_rde(dev);
2274                                }
2275                        } else {
2276                                /*
2277                                 * implant BNA dummy descriptor to allow
2278                                 * RXFIFO opening by RDE
2279                                 */
2280                                if (ep->bna_dummy_req) {
2281                                        /* write desc pointer */
2282                                        writel(ep->bna_dummy_req->td_phys,
2283                                                &ep->regs->desptr);
2284                                        ep->bna_occurred = 0;
2285                                }
2286
2287                                /*
2288                                 * schedule timer for setting RDE if queue
2289                                 * remains empty to allow ep0 packets pass
2290                                 * through
2291                                 */
2292                                if (set_rde != 0
2293                                                && !timer_pending(&udc_timer)) {
2294                                        udc_timer.expires =
2295                                                jiffies
2296                                                + HZ*UDC_RDE_TIMER_SECONDS;
2297                                        set_rde = 1;
2298                                        if (!stop_timer) {
2299                                                add_timer(&udc_timer);
2300                                        }
2301                                }
2302                                if (ep->num != UDC_EP0OUT_IX)
2303                                        dev->data_ep_queued = 0;
2304                        }
2305
2306                } else {
2307                        /*
2308                        * RX DMA must be reenabled for each desc in PPBDU mode
2309                        * and must be enabled for PPBNDU mode in case of BNA
2310                        */
2311                        udc_set_rde(dev);
2312                }
2313
2314        } else if (ep->cancel_transfer) {
2315                ret_val = IRQ_HANDLED;
2316                ep->cancel_transfer = 0;
2317        }
2318
2319        /* check pending CNAKS */
2320        if (cnak_pending) {
2321                /* CNAk processing when rxfifo empty only */
2322                if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2323                        udc_process_cnak_queue(dev);
2324                }
2325        }
2326
2327        /* clear OUT bits in ep status */
2328        writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2329finished:
2330        return ret_val;
2331}
2332
2333/* Interrupt handler for data IN traffic */
2334static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2335{
2336        irqreturn_t ret_val = IRQ_NONE;
2337        u32 tmp;
2338        u32 epsts;
2339        struct udc_ep *ep;
2340        struct udc_request *req;
2341        struct udc_data_dma *td;
2342        unsigned dma_done;
2343        unsigned len;
2344
2345        ep = &dev->ep[ep_ix];
2346
2347        epsts = readl(&ep->regs->sts);
2348        if (use_dma) {
2349                /* BNA ? */
2350                if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2351                        dev_err(&dev->pdev->dev,
2352                                "BNA ep%din occurred - DESPTR = %08lx \n",
2353                                ep->num,
2354                                (unsigned long) readl(&ep->regs->desptr));
2355
2356                        /* clear BNA */
2357                        writel(epsts, &ep->regs->sts);
2358                        ret_val = IRQ_HANDLED;
2359                        goto finished;
2360                }
2361        }
2362        /* HE event ? */
2363        if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2364                dev_err(&dev->pdev->dev,
2365                        "HE ep%dn occurred - DESPTR = %08lx \n",
2366                        ep->num, (unsigned long) readl(&ep->regs->desptr));
2367
2368                /* clear HE */
2369                writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2370                ret_val = IRQ_HANDLED;
2371                goto finished;
2372        }
2373
2374        /* DMA completion */
2375        if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2376                VDBG(dev, "TDC set- completion\n");
2377                ret_val = IRQ_HANDLED;
2378                if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2379                        req = list_entry(ep->queue.next,
2380                                        struct udc_request, queue);
2381                        /*
2382                         * length bytes transferred
2383                         * check dma done of last desc. in PPBDU mode
2384                         */
2385                        if (use_dma_ppb_du) {
2386                                td = udc_get_last_dma_desc(req);
2387                                if (td) {
2388                                        dma_done =
2389                                                AMD_GETBITS(td->status,
2390                                                UDC_DMA_IN_STS_BS);
2391                                        /* don't care DMA done */
2392                                        req->req.actual = req->req.length;
2393                                }
2394                        } else {
2395                                /* assume all bytes transferred */
2396                                req->req.actual = req->req.length;
2397                        }
2398
2399                        if (req->req.actual == req->req.length) {
2400                                /* complete req */
2401                                complete_req(ep, req, 0);
2402                                req->dma_going = 0;
2403                                /* further request available ? */
2404                                if (list_empty(&ep->queue)) {
2405                                        /* disable interrupt */
2406                                        tmp = readl(&dev->regs->ep_irqmsk);
2407                                        tmp |= AMD_BIT(ep->num);
2408                                        writel(tmp, &dev->regs->ep_irqmsk);
2409                                }
2410                        }
2411                }
2412                ep->cancel_transfer = 0;
2413
2414        }
2415        /*
2416         * status reg has IN bit set and TDC not set (if TDC was handled,
2417         * IN must not be handled (UDC defect) ?
2418         */
2419        if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2420                        && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2421                ret_val = IRQ_HANDLED;
2422                if (!list_empty(&ep->queue)) {
2423                        /* next request */
2424                        req = list_entry(ep->queue.next,
2425                                        struct udc_request, queue);
2426                        /* FIFO mode */
2427                        if (!use_dma) {
2428                                /* write fifo */
2429                                udc_txfifo_write(ep, &req->req);
2430                                len = req->req.length - req->req.actual;
2431                                                if (len > ep->ep.maxpacket)
2432                                                        len = ep->ep.maxpacket;
2433                                                req->req.actual += len;
2434                                if (req->req.actual == req->req.length
2435                                        || (len != ep->ep.maxpacket)) {
2436                                        /* complete req */
2437                                        complete_req(ep, req, 0);
2438                                }
2439                        /* DMA */
2440                        } else if (req && !req->dma_going) {
2441                                VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2442                                        req, req->td_data);
2443                                if (req->td_data) {
2444
2445                                        req->dma_going = 1;
2446
2447                                        /*
2448                                         * unset L bit of first desc.
2449                                         * for chain
2450                                         */
2451                                        if (use_dma_ppb && req->req.length >
2452                                                        ep->ep.maxpacket) {
2453                                                req->td_data->status &=
2454                                                        AMD_CLEAR_BIT(
2455                                                        UDC_DMA_IN_STS_L);
2456                                        }
2457
2458                                        /* write desc pointer */
2459                                        writel(req->td_phys, &ep->regs->desptr);
2460
2461                                        /* set HOST READY */
2462                                        req->td_data->status =
2463                                                AMD_ADDBITS(
2464                                                req->td_data->status,
2465                                                UDC_DMA_IN_STS_BS_HOST_READY,
2466                                                UDC_DMA_IN_STS_BS);
2467
2468                                        /* set poll demand bit */
2469                                        tmp = readl(&ep->regs->ctl);
2470                                        tmp |= AMD_BIT(UDC_EPCTL_P);
2471                                        writel(tmp, &ep->regs->ctl);
2472                                }
2473                        }
2474
2475                } else if (!use_dma && ep->in) {
2476                        /* disable interrupt */
2477                        tmp = readl(
2478                                &dev->regs->ep_irqmsk);
2479                        tmp |= AMD_BIT(ep->num);
2480                        writel(tmp,
2481                                &dev->regs->ep_irqmsk);
2482                }
2483        }
2484        /* clear status bits */
2485        writel(epsts, &ep->regs->sts);
2486
2487finished:
2488        return ret_val;
2489
2490}
2491
2492/* Interrupt handler for Control OUT traffic */
2493static irqreturn_t udc_control_out_isr(struct udc *dev)
2494__releases(dev->lock)
2495__acquires(dev->lock)
2496{
2497        irqreturn_t ret_val = IRQ_NONE;
2498        u32 tmp;
2499        int setup_supported;
2500        u32 count;
2501        int set = 0;
2502        struct udc_ep   *ep;
2503        struct udc_ep   *ep_tmp;
2504
2505        ep = &dev->ep[UDC_EP0OUT_IX];
2506
2507        /* clear irq */
2508        writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2509
2510        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2511        /* check BNA and clear if set */
2512        if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2513                VDBG(dev, "ep0: BNA set\n");
2514                writel(AMD_BIT(UDC_EPSTS_BNA),
2515                        &dev->ep[UDC_EP0OUT_IX].regs->sts);
2516                ep->bna_occurred = 1;
2517                ret_val = IRQ_HANDLED;
2518                goto finished;
2519        }
2520
2521        /* type of data: SETUP or DATA 0 bytes */
2522        tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2523        VDBG(dev, "data_typ = %x\n", tmp);
2524
2525        /* setup data */
2526        if (tmp == UDC_EPSTS_OUT_SETUP) {
2527                ret_val = IRQ_HANDLED;
2528
2529                ep->dev->stall_ep0in = 0;
2530                dev->waiting_zlp_ack_ep0in = 0;
2531
2532                /* set NAK for EP0_IN */
2533                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2534                tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2535                writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2536                dev->ep[UDC_EP0IN_IX].naking = 1;
2537                /* get setup data */
2538                if (use_dma) {
2539
2540                        /* clear OUT bits in ep status */
2541                        writel(UDC_EPSTS_OUT_CLEAR,
2542                                &dev->ep[UDC_EP0OUT_IX].regs->sts);
2543
2544                        setup_data.data[0] =
2545                                dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2546                        setup_data.data[1] =
2547                                dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2548                        /* set HOST READY */
2549                        dev->ep[UDC_EP0OUT_IX].td_stp->status =
2550                                        UDC_DMA_STP_STS_BS_HOST_READY;
2551                } else {
2552                        /* read fifo */
2553                        udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2554                }
2555
2556                /* determine direction of control data */
2557                if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2558                        dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2559                        /* enable RDE */
2560                        udc_ep0_set_rde(dev);
2561                        set = 0;
2562                } else {
2563                        dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2564                        /*
2565                         * implant BNA dummy descriptor to allow RXFIFO opening
2566                         * by RDE
2567                         */
2568                        if (ep->bna_dummy_req) {
2569                                /* write desc pointer */
2570                                writel(ep->bna_dummy_req->td_phys,
2571                                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2572                                ep->bna_occurred = 0;
2573                        }
2574
2575                        set = 1;
2576                        dev->ep[UDC_EP0OUT_IX].naking = 1;
2577                        /*
2578                         * setup timer for enabling RDE (to not enable
2579                         * RXFIFO DMA for data to early)
2580                         */
2581                        set_rde = 1;
2582                        if (!timer_pending(&udc_timer)) {
2583                                udc_timer.expires = jiffies +
2584                                                        HZ/UDC_RDE_TIMER_DIV;
2585                                if (!stop_timer) {
2586                                        add_timer(&udc_timer);
2587                                }
2588                        }
2589                }
2590
2591                /*
2592                 * mass storage reset must be processed here because
2593                 * next packet may be a CLEAR_FEATURE HALT which would not
2594                 * clear the stall bit when no STALL handshake was received
2595                 * before (autostall can cause this)
2596                 */
2597                if (setup_data.data[0] == UDC_MSCRES_DWORD0
2598                                && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2599                        DBG(dev, "MSC Reset\n");
2600                        /*
2601                         * clear stall bits
2602                         * only one IN and OUT endpoints are handled
2603                         */
2604                        ep_tmp = &udc->ep[UDC_EPIN_IX];
2605                        udc_set_halt(&ep_tmp->ep, 0);
2606                        ep_tmp = &udc->ep[UDC_EPOUT_IX];
2607                        udc_set_halt(&ep_tmp->ep, 0);
2608                }
2609
2610                /* call gadget with setup data received */
2611                spin_unlock(&dev->lock);
2612                setup_supported = dev->driver->setup(&dev->gadget,
2613                                                &setup_data.request);
2614                spin_lock(&dev->lock);
2615
2616                tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2617                /* ep0 in returns data (not zlp) on IN phase */
2618                if (setup_supported >= 0 && setup_supported <
2619                                UDC_EP0IN_MAXPACKET) {
2620                        /* clear NAK by writing CNAK in EP0_IN */
2621                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2622                        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2623                        dev->ep[UDC_EP0IN_IX].naking = 0;
2624                        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2625
2626                /* if unsupported request then stall */
2627                } else if (setup_supported < 0) {
2628                        tmp |= AMD_BIT(UDC_EPCTL_S);
2629                        writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2630                } else
2631                        dev->waiting_zlp_ack_ep0in = 1;
2632
2633
2634                /* clear NAK by writing CNAK in EP0_OUT */
2635                if (!set) {
2636                        tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2637                        tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2638                        writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2639                        dev->ep[UDC_EP0OUT_IX].naking = 0;
2640                        UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2641                }
2642
2643                if (!use_dma) {
2644                        /* clear OUT bits in ep status */
2645                        writel(UDC_EPSTS_OUT_CLEAR,
2646                                &dev->ep[UDC_EP0OUT_IX].regs->sts);
2647                }
2648
2649        /* data packet 0 bytes */
2650        } else if (tmp == UDC_EPSTS_OUT_DATA) {
2651                /* clear OUT bits in ep status */
2652                writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2653
2654                /* get setup data: only 0 packet */
2655                if (use_dma) {
2656                        /* no req if 0 packet, just reactivate */
2657                        if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2658                                VDBG(dev, "ZLP\n");
2659
2660                                /* set HOST READY */
2661                                dev->ep[UDC_EP0OUT_IX].td->status =
2662                                        AMD_ADDBITS(
2663                                        dev->ep[UDC_EP0OUT_IX].td->status,
2664                                        UDC_DMA_OUT_STS_BS_HOST_READY,
2665                                        UDC_DMA_OUT_STS_BS);
2666                                /* enable RDE */
2667                                udc_ep0_set_rde(dev);
2668                                ret_val = IRQ_HANDLED;
2669
2670                        } else {
2671                                /* control write */
2672                                ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2673                                /* re-program desc. pointer for possible ZLPs */
2674                                writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2675                                        &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2676                                /* enable RDE */
2677                                udc_ep0_set_rde(dev);
2678                        }
2679                } else {
2680
2681                        /* received number bytes */
2682                        count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2683                        count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2684                        /* out data for fifo mode not working */
2685                        count = 0;
2686
2687                        /* 0 packet or real data ? */
2688                        if (count != 0) {
2689                                ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2690                        } else {
2691                                /* dummy read confirm */
2692                                readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2693                                ret_val = IRQ_HANDLED;
2694                        }
2695                }
2696        }
2697
2698        /* check pending CNAKS */
2699        if (cnak_pending) {
2700                /* CNAk processing when rxfifo empty only */
2701                if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2702                        udc_process_cnak_queue(dev);
2703                }
2704        }
2705
2706finished:
2707        return ret_val;
2708}
2709
2710/* Interrupt handler for Control IN traffic */
2711static irqreturn_t udc_control_in_isr(struct udc *dev)
2712{
2713        irqreturn_t ret_val = IRQ_NONE;
2714        u32 tmp;
2715        struct udc_ep *ep;
2716        struct udc_request *req;
2717        unsigned len;
2718
2719        ep = &dev->ep[UDC_EP0IN_IX];
2720
2721        /* clear irq */
2722        writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2723
2724        tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2725        /* DMA completion */
2726        if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2727                VDBG(dev, "isr: TDC clear \n");
2728                ret_val = IRQ_HANDLED;
2729
2730                /* clear TDC bit */
2731                writel(AMD_BIT(UDC_EPSTS_TDC),
2732                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2733
2734        /* status reg has IN bit set ? */
2735        } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2736                ret_val = IRQ_HANDLED;
2737
2738                if (ep->dma) {
2739                        /* clear IN bit */
2740                        writel(AMD_BIT(UDC_EPSTS_IN),
2741                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2742                }
2743                if (dev->stall_ep0in) {
2744                        DBG(dev, "stall ep0in\n");
2745                        /* halt ep0in */
2746                        tmp = readl(&ep->regs->ctl);
2747                        tmp |= AMD_BIT(UDC_EPCTL_S);
2748                        writel(tmp, &ep->regs->ctl);
2749                } else {
2750                        if (!list_empty(&ep->queue)) {
2751                                /* next request */
2752                                req = list_entry(ep->queue.next,
2753                                                struct udc_request, queue);
2754
2755                                if (ep->dma) {
2756                                        /* write desc pointer */
2757                                        writel(req->td_phys, &ep->regs->desptr);
2758                                        /* set HOST READY */
2759                                        req->td_data->status =
2760                                                AMD_ADDBITS(
2761                                                req->td_data->status,
2762                                                UDC_DMA_STP_STS_BS_HOST_READY,
2763                                                UDC_DMA_STP_STS_BS);
2764
2765                                        /* set poll demand bit */
2766                                        tmp =
2767                                        readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2768                                        tmp |= AMD_BIT(UDC_EPCTL_P);
2769                                        writel(tmp,
2770                                        &dev->ep[UDC_EP0IN_IX].regs->ctl);
2771
2772                                        /* all bytes will be transferred */
2773                                        req->req.actual = req->req.length;
2774
2775                                        /* complete req */
2776                                        complete_req(ep, req, 0);
2777
2778                                } else {
2779                                        /* write fifo */
2780                                        udc_txfifo_write(ep, &req->req);
2781
2782                                        /* lengh bytes transferred */
2783                                        len = req->req.length - req->req.actual;
2784                                        if (len > ep->ep.maxpacket)
2785                                                len = ep->ep.maxpacket;
2786
2787                                        req->req.actual += len;
2788                                        if (req->req.actual == req->req.length
2789                                                || (len != ep->ep.maxpacket)) {
2790                                                /* complete req */
2791                                                complete_req(ep, req, 0);
2792                                        }
2793                                }
2794
2795                        }
2796                }
2797                ep->halted = 0;
2798                dev->stall_ep0in = 0;
2799                if (!ep->dma) {
2800                        /* clear IN bit */
2801                        writel(AMD_BIT(UDC_EPSTS_IN),
2802                                &dev->ep[UDC_EP0IN_IX].regs->sts);
2803                }
2804        }
2805
2806        return ret_val;
2807}
2808
2809
2810/* Interrupt handler for global device events */
2811static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2812__releases(dev->lock)
2813__acquires(dev->lock)
2814{
2815        irqreturn_t ret_val = IRQ_NONE;
2816        u32 tmp;
2817        u32 cfg;
2818        struct udc_ep *ep;
2819        u16 i;
2820        u8 udc_csr_epix;
2821
2822        /* SET_CONFIG irq ? */
2823        if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2824                ret_val = IRQ_HANDLED;
2825
2826                /* read config value */
2827                tmp = readl(&dev->regs->sts);
2828                cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2829                DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2830                dev->cur_config = cfg;
2831                dev->set_cfg_not_acked = 1;
2832
2833                /* make usb request for gadget driver */
2834                memset(&setup_data, 0 , sizeof(union udc_setup_data));
2835                setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2836                setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2837
2838                /* programm the NE registers */
2839                for (i = 0; i < UDC_EP_NUM; i++) {
2840                        ep = &dev->ep[i];
2841                        if (ep->in) {
2842
2843                                /* ep ix in UDC CSR register space */
2844                                udc_csr_epix = ep->num;
2845
2846
2847                        /* OUT ep */
2848                        } else {
2849                                /* ep ix in UDC CSR register space */
2850                                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2851                        }
2852
2853                        tmp = readl(&dev->csr->ne[udc_csr_epix]);
2854                        /* ep cfg */
2855                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2856                                                UDC_CSR_NE_CFG);
2857                        /* write reg */
2858                        writel(tmp, &dev->csr->ne[udc_csr_epix]);
2859
2860                        /* clear stall bits */
2861                        ep->halted = 0;
2862                        tmp = readl(&ep->regs->ctl);
2863                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2864                        writel(tmp, &ep->regs->ctl);
2865                }
2866                /* call gadget zero with setup data received */
2867                spin_unlock(&dev->lock);
2868                tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2869                spin_lock(&dev->lock);
2870
2871        } /* SET_INTERFACE ? */
2872        if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2873                ret_val = IRQ_HANDLED;
2874
2875                dev->set_cfg_not_acked = 1;
2876                /* read interface and alt setting values */
2877                tmp = readl(&dev->regs->sts);
2878                dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2879                dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2880
2881                /* make usb request for gadget driver */
2882                memset(&setup_data, 0 , sizeof(union udc_setup_data));
2883                setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2884                setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2885                setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2886                setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2887
2888                DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2889                                dev->cur_alt, dev->cur_intf);
2890
2891                /* programm the NE registers */
2892                for (i = 0; i < UDC_EP_NUM; i++) {
2893                        ep = &dev->ep[i];
2894                        if (ep->in) {
2895
2896                                /* ep ix in UDC CSR register space */
2897                                udc_csr_epix = ep->num;
2898
2899
2900                        /* OUT ep */
2901                        } else {
2902                                /* ep ix in UDC CSR register space */
2903                                udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2904                        }
2905
2906                        /* UDC CSR reg */
2907                        /* set ep values */
2908                        tmp = readl(&dev->csr->ne[udc_csr_epix]);
2909                        /* ep interface */
2910                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2911                                                UDC_CSR_NE_INTF);
2912                        /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2913                        /* ep alt */
2914                        tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2915                                                UDC_CSR_NE_ALT);
2916                        /* write reg */
2917                        writel(tmp, &dev->csr->ne[udc_csr_epix]);
2918
2919                        /* clear stall bits */
2920                        ep->halted = 0;
2921                        tmp = readl(&ep->regs->ctl);
2922                        tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2923                        writel(tmp, &ep->regs->ctl);
2924                }
2925
2926                /* call gadget zero with setup data received */
2927                spin_unlock(&dev->lock);
2928                tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2929                spin_lock(&dev->lock);
2930
2931        } /* USB reset */
2932        if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2933                DBG(dev, "USB Reset interrupt\n");
2934                ret_val = IRQ_HANDLED;
2935
2936                /* allow soft reset when suspend occurs */
2937                soft_reset_occured = 0;
2938
2939                dev->waiting_zlp_ack_ep0in = 0;
2940                dev->set_cfg_not_acked = 0;
2941
2942                /* mask not needed interrupts */
2943                udc_mask_unused_interrupts(dev);
2944
2945                /* call gadget to resume and reset configs etc. */
2946                spin_unlock(&dev->lock);
2947                if (dev->sys_suspended && dev->driver->resume) {
2948                        dev->driver->resume(&dev->gadget);
2949                        dev->sys_suspended = 0;
2950                }
2951                dev->driver->disconnect(&dev->gadget);
2952                spin_lock(&dev->lock);
2953
2954                /* disable ep0 to empty req queue */
2955                empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2956                ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2957
2958                /* soft reset when rxfifo not empty */
2959                tmp = readl(&dev->regs->sts);
2960                if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2961                                && !soft_reset_after_usbreset_occured) {
2962                        udc_soft_reset(dev);
2963                        soft_reset_after_usbreset_occured++;
2964                }
2965
2966                /*
2967                 * DMA reset to kill potential old DMA hw hang,
2968                 * POLL bit is already reset by ep_init() through
2969                 * disconnect()
2970                 */
2971                DBG(dev, "DMA machine reset\n");
2972                tmp = readl(&dev->regs->cfg);
2973                writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2974                writel(tmp, &dev->regs->cfg);
2975
2976                /* put into initial config */
2977                udc_basic_init(dev);
2978
2979                /* enable device setup interrupts */
2980                udc_enable_dev_setup_interrupts(dev);
2981
2982                /* enable suspend interrupt */
2983                tmp = readl(&dev->regs->irqmsk);
2984                tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2985                writel(tmp, &dev->regs->irqmsk);
2986
2987        } /* USB suspend */
2988        if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2989                DBG(dev, "USB Suspend interrupt\n");
2990                ret_val = IRQ_HANDLED;
2991                if (dev->driver->suspend) {
2992                        spin_unlock(&dev->lock);
2993                        dev->sys_suspended = 1;
2994                        dev->driver->suspend(&dev->gadget);
2995                        spin_lock(&dev->lock);
2996                }
2997        } /* new speed ? */
2998        if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2999                DBG(dev, "ENUM interrupt\n");
3000                ret_val = IRQ_HANDLED;
3001                soft_reset_after_usbreset_occured = 0;
3002
3003                /* disable ep0 to empty req queue */
3004                empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3005                ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3006
3007                /* link up all endpoints */
3008                udc_setup_endpoints(dev);
3009                dev_info(&dev->pdev->dev, "Connect: %s\n",
3010                         usb_speed_string(dev->gadget.speed));
3011
3012                /* init ep 0 */
3013                activate_control_endpoints(dev);
3014
3015                /* enable ep0 interrupts */
3016                udc_enable_ep0_interrupts(dev);
3017        }
3018        /* session valid change interrupt */
3019        if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3020                DBG(dev, "USB SVC interrupt\n");
3021                ret_val = IRQ_HANDLED;
3022
3023                /* check that session is not valid to detect disconnect */
3024                tmp = readl(&dev->regs->sts);
3025                if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3026                        /* disable suspend interrupt */
3027                        tmp = readl(&dev->regs->irqmsk);
3028                        tmp |= AMD_BIT(UDC_DEVINT_US);
3029                        writel(tmp, &dev->regs->irqmsk);
3030                        DBG(dev, "USB Disconnect (session valid low)\n");
3031                        /* cleanup on disconnect */
3032                        usb_disconnect(udc);
3033                }
3034
3035        }
3036
3037        return ret_val;
3038}
3039
3040/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3041static irqreturn_t udc_irq(int irq, void *pdev)
3042{
3043        struct udc *dev = pdev;
3044        u32 reg;
3045        u16 i;
3046        u32 ep_irq;
3047        irqreturn_t ret_val = IRQ_NONE;
3048
3049        spin_lock(&dev->lock);
3050
3051        /* check for ep irq */
3052        reg = readl(&dev->regs->ep_irqsts);
3053        if (reg) {
3054                if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3055                        ret_val |= udc_control_out_isr(dev);
3056                if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3057                        ret_val |= udc_control_in_isr(dev);
3058
3059                /*
3060                 * data endpoint
3061                 * iterate ep's
3062                 */
3063                for (i = 1; i < UDC_EP_NUM; i++) {
3064                        ep_irq = 1 << i;
3065                        if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3066                                continue;
3067
3068                        /* clear irq status */
3069                        writel(ep_irq, &dev->regs->ep_irqsts);
3070
3071                        /* irq for out ep ? */
3072                        if (i > UDC_EPIN_NUM)
3073                                ret_val |= udc_data_out_isr(dev, i);
3074                        else
3075                                ret_val |= udc_data_in_isr(dev, i);
3076                }
3077
3078        }
3079
3080
3081        /* check for dev irq */
3082        reg = readl(&dev->regs->irqsts);
3083        if (reg) {
3084                /* clear irq */
3085                writel(reg, &dev->regs->irqsts);
3086                ret_val |= udc_dev_isr(dev, reg);
3087        }
3088
3089
3090        spin_unlock(&dev->lock);
3091        return ret_val;
3092}
3093
3094/* Tears down device */
3095static void gadget_release(struct device *pdev)
3096{
3097        struct amd5536udc *dev = dev_get_drvdata(pdev);
3098        kfree(dev);
3099}
3100
3101/* Cleanup on device remove */
3102static void udc_remove(struct udc *dev)
3103{
3104        /* remove timer */
3105        stop_timer++;
3106        if (timer_pending(&udc_timer))
3107                wait_for_completion(&on_exit);
3108        if (udc_timer.data)
3109                del_timer_sync(&udc_timer);
3110        /* remove pollstall timer */
3111        stop_pollstall_timer++;
3112        if (timer_pending(&udc_pollstall_timer))
3113                wait_for_completion(&on_pollstall_exit);
3114        if (udc_pollstall_timer.data)
3115                del_timer_sync(&udc_pollstall_timer);
3116        udc = NULL;
3117}
3118
3119/* Reset all pci context */
3120static void udc_pci_remove(struct pci_dev *pdev)
3121{
3122        struct udc              *dev;
3123
3124        dev = pci_get_drvdata(pdev);
3125
3126        usb_del_gadget_udc(&udc->gadget);
3127        /* gadget driver must not be registered */
3128        BUG_ON(dev->driver != NULL);
3129
3130        /* dma pool cleanup */
3131        if (dev->data_requests)
3132                pci_pool_destroy(dev->data_requests);
3133
3134        if (dev->stp_requests) {
3135                /* cleanup DMA desc's for ep0in */
3136                pci_pool_free(dev->stp_requests,
3137                        dev->ep[UDC_EP0OUT_IX].td_stp,
3138                        dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3139                pci_pool_free(dev->stp_requests,
3140                        dev->ep[UDC_EP0OUT_IX].td,
3141                        dev->ep[UDC_EP0OUT_IX].td_phys);
3142
3143                pci_pool_destroy(dev->stp_requests);
3144        }
3145
3146        /* reset controller */
3147        writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3148        if (dev->irq_registered)
3149                free_irq(pdev->irq, dev);
3150        if (dev->regs)
3151                iounmap(dev->regs);
3152        if (dev->mem_region)
3153                release_mem_region(pci_resource_start(pdev, 0),
3154                                pci_resource_len(pdev, 0));
3155        if (dev->active)
3156                pci_disable_device(pdev);
3157
3158        device_unregister(&dev->gadget.dev);
3159        pci_set_drvdata(pdev, NULL);
3160
3161        udc_remove(dev);
3162}
3163
3164/* create dma pools on init */
3165static int init_dma_pools(struct udc *dev)
3166{
3167        struct udc_stp_dma      *td_stp;
3168        struct udc_data_dma     *td_data;
3169        int retval;
3170
3171        /* consistent DMA mode setting ? */
3172        if (use_dma_ppb) {
3173                use_dma_bufferfill_mode = 0;
3174        } else {
3175                use_dma_ppb_du = 0;
3176                use_dma_bufferfill_mode = 1;
3177        }
3178
3179        /* DMA setup */
3180        dev->data_requests = dma_pool_create("data_requests", NULL,
3181                sizeof(struct udc_data_dma), 0, 0);
3182        if (!dev->data_requests) {
3183                DBG(dev, "can't get request data pool\n");
3184                retval = -ENOMEM;
3185                goto finished;
3186        }
3187
3188        /* EP0 in dma regs = dev control regs */
3189        dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3190
3191        /* dma desc for setup data */
3192        dev->stp_requests = dma_pool_create("setup requests", NULL,
3193                sizeof(struct udc_stp_dma), 0, 0);
3194        if (!dev->stp_requests) {
3195                DBG(dev, "can't get stp request pool\n");
3196                retval = -ENOMEM;
3197                goto finished;
3198        }
3199        /* setup */
3200        td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3201                                &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3202        if (td_stp == NULL) {
3203                retval = -ENOMEM;
3204                goto finished;
3205        }
3206        dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3207
3208        /* data: 0 packets !? */
3209        td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3210                                &dev->ep[UDC_EP0OUT_IX].td_phys);
3211        if (td_data == NULL) {
3212                retval = -ENOMEM;
3213                goto finished;
3214        }
3215        dev->ep[UDC_EP0OUT_IX].td = td_data;
3216        return 0;
3217
3218finished:
3219        return retval;
3220}
3221
3222/* Called by pci bus driver to init pci context */
3223static int udc_pci_probe(
3224        struct pci_dev *pdev,
3225        const struct pci_device_id *id
3226)
3227{
3228        struct udc              *dev;
3229        unsigned long           resource;
3230        unsigned long           len;
3231        int                     retval = 0;
3232
3233        /* one udc only */
3234        if (udc) {
3235                dev_dbg(&pdev->dev, "already probed\n");
3236                return -EBUSY;
3237        }
3238
3239        /* init */
3240        dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3241        if (!dev) {
3242                retval = -ENOMEM;
3243                goto finished;
3244        }
3245
3246        /* pci setup */
3247        if (pci_enable_device(pdev) < 0) {
3248                kfree(dev);
3249                dev = NULL;
3250                retval = -ENODEV;
3251                goto finished;
3252        }
3253        dev->active = 1;
3254
3255        /* PCI resource allocation */
3256        resource = pci_resource_start(pdev, 0);
3257        len = pci_resource_len(pdev, 0);
3258
3259        if (!request_mem_region(resource, len, name)) {
3260                dev_dbg(&pdev->dev, "pci device used already\n");
3261                kfree(dev);
3262                dev = NULL;
3263                retval = -EBUSY;
3264                goto finished;
3265        }
3266        dev->mem_region = 1;
3267
3268        dev->virt_addr = ioremap_nocache(resource, len);
3269        if (dev->virt_addr == NULL) {
3270                dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3271                kfree(dev);
3272                dev = NULL;
3273                retval = -EFAULT;
3274                goto finished;
3275        }
3276
3277        if (!pdev->irq) {
3278                dev_err(&dev->pdev->dev, "irq not set\n");
3279                kfree(dev);
3280                dev = NULL;
3281                retval = -ENODEV;
3282                goto finished;
3283        }
3284
3285        spin_lock_init(&dev->lock);
3286        /* udc csr registers base */
3287        dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3288        /* dev registers base */
3289        dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3290        /* ep registers base */
3291        dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3292        /* fifo's base */
3293        dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3294        dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3295
3296        if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3297                dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3298                kfree(dev);
3299                dev = NULL;
3300                retval = -EBUSY;
3301                goto finished;
3302        }
3303        dev->irq_registered = 1;
3304
3305        pci_set_drvdata(pdev, dev);
3306
3307        /* chip revision for Hs AMD5536 */
3308        dev->chiprev = pdev->revision;
3309
3310        pci_set_master(pdev);
3311        pci_try_set_mwi(pdev);
3312
3313        /* init dma pools */
3314        if (use_dma) {
3315                retval = init_dma_pools(dev);
3316                if (retval != 0)
3317                        goto finished;
3318        }
3319
3320        dev->phys_addr = resource;
3321        dev->irq = pdev->irq;
3322        dev->pdev = pdev;
3323        dev->gadget.dev.parent = &pdev->dev;
3324        dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3325
3326        /* general probing */
3327        if (udc_probe(dev) == 0)
3328                return 0;
3329
3330finished:
3331        if (dev)
3332                udc_pci_remove(pdev);
3333        return retval;
3334}
3335
3336/* general probe */
3337static int udc_probe(struct udc *dev)
3338{
3339        char            tmp[128];
3340        u32             reg;
3341        int             retval;
3342
3343        /* mark timer as not initialized */
3344        udc_timer.data = 0;
3345        udc_pollstall_timer.data = 0;
3346
3347        /* device struct setup */
3348        dev->gadget.ops = &udc_ops;
3349
3350        dev_set_name(&dev->gadget.dev, "gadget");
3351        dev->gadget.dev.release = gadget_release;
3352        dev->gadget.name = name;
3353        dev->gadget.max_speed = USB_SPEED_HIGH;
3354
3355        /* init registers, interrupts, ... */
3356        startup_registers(dev);
3357
3358        dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3359
3360        snprintf(tmp, sizeof tmp, "%d", dev->irq);
3361        dev_info(&dev->pdev->dev,
3362                "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3363                tmp, dev->phys_addr, dev->chiprev,
3364                (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3365        strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3366        if (dev->chiprev == UDC_HSA0_REV) {
3367                dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3368                retval = -ENODEV;
3369                goto finished;
3370        }
3371        dev_info(&dev->pdev->dev,
3372                "driver version: %s(for Geode5536 B1)\n", tmp);
3373        udc = dev;
3374
3375        retval = usb_add_gadget_udc(&udc->pdev->dev, &dev->gadget);
3376        if (retval)
3377                goto finished;
3378
3379        retval = device_register(&dev->gadget.dev);
3380        if (retval) {
3381                usb_del_gadget_udc(&dev->gadget);
3382                put_device(&dev->gadget.dev);
3383                goto finished;
3384        }
3385
3386        /* timer init */
3387        init_timer(&udc_timer);
3388        udc_timer.function = udc_timer_function;
3389        udc_timer.data = 1;
3390        /* timer pollstall init */
3391        init_timer(&udc_pollstall_timer);
3392        udc_pollstall_timer.function = udc_pollstall_timer_function;
3393        udc_pollstall_timer.data = 1;
3394
3395        /* set SD */
3396        reg = readl(&dev->regs->ctl);
3397        reg |= AMD_BIT(UDC_DEVCTL_SD);
3398        writel(reg, &dev->regs->ctl);
3399
3400        /* print dev register info */
3401        print_regs(dev);
3402
3403        return 0;
3404
3405finished:
3406        return retval;
3407}
3408
3409/* Initiates a remote wakeup */
3410static int udc_remote_wakeup(struct udc *dev)
3411{
3412        unsigned long flags;
3413        u32 tmp;
3414
3415        DBG(dev, "UDC initiates remote wakeup\n");
3416
3417        spin_lock_irqsave(&dev->lock, flags);
3418
3419        tmp = readl(&dev->regs->ctl);
3420        tmp |= AMD_BIT(UDC_DEVCTL_RES);
3421        writel(tmp, &dev->regs->ctl);
3422        tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3423        writel(tmp, &dev->regs->ctl);
3424
3425        spin_unlock_irqrestore(&dev->lock, flags);
3426        return 0;
3427}
3428
3429/* PCI device parameters */
3430static const struct pci_device_id pci_id[] = {
3431        {
3432                PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3433                .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3434                .class_mask =   0xffffffff,
3435        },
3436        {},
3437};
3438MODULE_DEVICE_TABLE(pci, pci_id);
3439
3440/* PCI functions */
3441static struct pci_driver udc_pci_driver = {
3442        .name =         (char *) name,
3443        .id_table =     pci_id,
3444        .probe =        udc_pci_probe,
3445        .remove =       udc_pci_remove,
3446};
3447
3448/* Inits driver */
3449static int __init init(void)
3450{
3451        return pci_register_driver(&udc_pci_driver);
3452}
3453module_init(init);
3454
3455/* Cleans driver */
3456static void __exit cleanup(void)
3457{
3458        pci_unregister_driver(&udc_pci_driver);
3459}
3460module_exit(cleanup);
3461
3462MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3463MODULE_AUTHOR("Thomas Dahlmann");
3464MODULE_LICENSE("GPL");
3465
3466