1
2
3
4
5
6
7
8
9
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/slab.h>
16#include <linux/device.h>
17#include <linux/dma-mapping.h>
18#include <linux/list.h>
19#include <linux/platform_device.h>
20#include <linux/usb/ch9.h>
21#include <linux/usb/gadget.h>
22#include <linux/usb/atmel_usba_udc.h>
23#include <linux/delay.h>
24
25#include <asm/gpio.h>
26#include <mach/board.h>
27
28#include "atmel_usba_udc.h"
29
30
31static struct usba_udc the_udc;
32static struct usba_ep *usba_ep;
33
34#ifdef CONFIG_USB_GADGET_DEBUG_FS
35#include <linux/debugfs.h>
36#include <linux/uaccess.h>
37
38static int queue_dbg_open(struct inode *inode, struct file *file)
39{
40 struct usba_ep *ep = inode->i_private;
41 struct usba_request *req, *req_copy;
42 struct list_head *queue_data;
43
44 queue_data = kmalloc(sizeof(*queue_data), GFP_KERNEL);
45 if (!queue_data)
46 return -ENOMEM;
47 INIT_LIST_HEAD(queue_data);
48
49 spin_lock_irq(&ep->udc->lock);
50 list_for_each_entry(req, &ep->queue, queue) {
51 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC);
52 if (!req_copy)
53 goto fail;
54 list_add_tail(&req_copy->queue, queue_data);
55 }
56 spin_unlock_irq(&ep->udc->lock);
57
58 file->private_data = queue_data;
59 return 0;
60
61fail:
62 spin_unlock_irq(&ep->udc->lock);
63 list_for_each_entry_safe(req, req_copy, queue_data, queue) {
64 list_del(&req->queue);
65 kfree(req);
66 }
67 kfree(queue_data);
68 return -ENOMEM;
69}
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85static ssize_t queue_dbg_read(struct file *file, char __user *buf,
86 size_t nbytes, loff_t *ppos)
87{
88 struct list_head *queue = file->private_data;
89 struct usba_request *req, *tmp_req;
90 size_t len, remaining, actual = 0;
91 char tmpbuf[38];
92
93 if (!access_ok(VERIFY_WRITE, buf, nbytes))
94 return -EFAULT;
95
96 mutex_lock(&file->f_dentry->d_inode->i_mutex);
97 list_for_each_entry_safe(req, tmp_req, queue, queue) {
98 len = snprintf(tmpbuf, sizeof(tmpbuf),
99 "%8p %08x %c%c%c %5d %c%c%c\n",
100 req->req.buf, req->req.length,
101 req->req.no_interrupt ? 'i' : 'I',
102 req->req.zero ? 'Z' : 'z',
103 req->req.short_not_ok ? 's' : 'S',
104 req->req.status,
105 req->submitted ? 'F' : 'f',
106 req->using_dma ? 'D' : 'd',
107 req->last_transaction ? 'L' : 'l');
108 len = min(len, sizeof(tmpbuf));
109 if (len > nbytes)
110 break;
111
112 list_del(&req->queue);
113 kfree(req);
114
115 remaining = __copy_to_user(buf, tmpbuf, len);
116 actual += len - remaining;
117 if (remaining)
118 break;
119
120 nbytes -= len;
121 buf += len;
122 }
123 mutex_unlock(&file->f_dentry->d_inode->i_mutex);
124
125 return actual;
126}
127
128static int queue_dbg_release(struct inode *inode, struct file *file)
129{
130 struct list_head *queue_data = file->private_data;
131 struct usba_request *req, *tmp_req;
132
133 list_for_each_entry_safe(req, tmp_req, queue_data, queue) {
134 list_del(&req->queue);
135 kfree(req);
136 }
137 kfree(queue_data);
138 return 0;
139}
140
141static int regs_dbg_open(struct inode *inode, struct file *file)
142{
143 struct usba_udc *udc;
144 unsigned int i;
145 u32 *data;
146 int ret = -ENOMEM;
147
148 mutex_lock(&inode->i_mutex);
149 udc = inode->i_private;
150 data = kmalloc(inode->i_size, GFP_KERNEL);
151 if (!data)
152 goto out;
153
154 spin_lock_irq(&udc->lock);
155 for (i = 0; i < inode->i_size / 4; i++)
156 data[i] = __raw_readl(udc->regs + i * 4);
157 spin_unlock_irq(&udc->lock);
158
159 file->private_data = data;
160 ret = 0;
161
162out:
163 mutex_unlock(&inode->i_mutex);
164
165 return ret;
166}
167
168static ssize_t regs_dbg_read(struct file *file, char __user *buf,
169 size_t nbytes, loff_t *ppos)
170{
171 struct inode *inode = file->f_dentry->d_inode;
172 int ret;
173
174 mutex_lock(&inode->i_mutex);
175 ret = simple_read_from_buffer(buf, nbytes, ppos,
176 file->private_data,
177 file->f_dentry->d_inode->i_size);
178 mutex_unlock(&inode->i_mutex);
179
180 return ret;
181}
182
183static int regs_dbg_release(struct inode *inode, struct file *file)
184{
185 kfree(file->private_data);
186 return 0;
187}
188
189const struct file_operations queue_dbg_fops = {
190 .owner = THIS_MODULE,
191 .open = queue_dbg_open,
192 .llseek = no_llseek,
193 .read = queue_dbg_read,
194 .release = queue_dbg_release,
195};
196
197const struct file_operations regs_dbg_fops = {
198 .owner = THIS_MODULE,
199 .open = regs_dbg_open,
200 .llseek = generic_file_llseek,
201 .read = regs_dbg_read,
202 .release = regs_dbg_release,
203};
204
205static void usba_ep_init_debugfs(struct usba_udc *udc,
206 struct usba_ep *ep)
207{
208 struct dentry *ep_root;
209
210 ep_root = debugfs_create_dir(ep->ep.name, udc->debugfs_root);
211 if (!ep_root)
212 goto err_root;
213 ep->debugfs_dir = ep_root;
214
215 ep->debugfs_queue = debugfs_create_file("queue", 0400, ep_root,
216 ep, &queue_dbg_fops);
217 if (!ep->debugfs_queue)
218 goto err_queue;
219
220 if (ep->can_dma) {
221 ep->debugfs_dma_status
222 = debugfs_create_u32("dma_status", 0400, ep_root,
223 &ep->last_dma_status);
224 if (!ep->debugfs_dma_status)
225 goto err_dma_status;
226 }
227 if (ep_is_control(ep)) {
228 ep->debugfs_state
229 = debugfs_create_u32("state", 0400, ep_root,
230 &ep->state);
231 if (!ep->debugfs_state)
232 goto err_state;
233 }
234
235 return;
236
237err_state:
238 if (ep->can_dma)
239 debugfs_remove(ep->debugfs_dma_status);
240err_dma_status:
241 debugfs_remove(ep->debugfs_queue);
242err_queue:
243 debugfs_remove(ep_root);
244err_root:
245 dev_err(&ep->udc->pdev->dev,
246 "failed to create debugfs directory for %s\n", ep->ep.name);
247}
248
249static void usba_ep_cleanup_debugfs(struct usba_ep *ep)
250{
251 debugfs_remove(ep->debugfs_queue);
252 debugfs_remove(ep->debugfs_dma_status);
253 debugfs_remove(ep->debugfs_state);
254 debugfs_remove(ep->debugfs_dir);
255 ep->debugfs_dma_status = NULL;
256 ep->debugfs_dir = NULL;
257}
258
259static void usba_init_debugfs(struct usba_udc *udc)
260{
261 struct dentry *root, *regs;
262 struct resource *regs_resource;
263
264 root = debugfs_create_dir(udc->gadget.name, NULL);
265 if (IS_ERR(root) || !root)
266 goto err_root;
267 udc->debugfs_root = root;
268
269 regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops);
270 if (!regs)
271 goto err_regs;
272
273 regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM,
274 CTRL_IOMEM_ID);
275 regs->d_inode->i_size = resource_size(regs_resource);
276 udc->debugfs_regs = regs;
277
278 usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0));
279
280 return;
281
282err_regs:
283 debugfs_remove(root);
284err_root:
285 udc->debugfs_root = NULL;
286 dev_err(&udc->pdev->dev, "debugfs is not available\n");
287}
288
289static void usba_cleanup_debugfs(struct usba_udc *udc)
290{
291 usba_ep_cleanup_debugfs(to_usba_ep(udc->gadget.ep0));
292 debugfs_remove(udc->debugfs_regs);
293 debugfs_remove(udc->debugfs_root);
294 udc->debugfs_regs = NULL;
295 udc->debugfs_root = NULL;
296}
297#else
298static inline void usba_ep_init_debugfs(struct usba_udc *udc,
299 struct usba_ep *ep)
300{
301
302}
303
304static inline void usba_ep_cleanup_debugfs(struct usba_ep *ep)
305{
306
307}
308
309static inline void usba_init_debugfs(struct usba_udc *udc)
310{
311
312}
313
314static inline void usba_cleanup_debugfs(struct usba_udc *udc)
315{
316
317}
318#endif
319
320static int vbus_is_present(struct usba_udc *udc)
321{
322 if (gpio_is_valid(udc->vbus_pin))
323 return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
324
325
326 return 1;
327}
328
329#if defined(CONFIG_ARCH_AT91SAM9RL)
330
331#include <mach/at91_pmc.h>
332
333static void toggle_bias(int is_on)
334{
335 unsigned int uckr = at91_sys_read(AT91_CKGR_UCKR);
336
337 if (is_on)
338 at91_sys_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN);
339 else
340 at91_sys_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN));
341}
342
343#else
344
345static void toggle_bias(int is_on)
346{
347}
348
349#endif
350
351static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req)
352{
353 unsigned int transaction_len;
354
355 transaction_len = req->req.length - req->req.actual;
356 req->last_transaction = 1;
357 if (transaction_len > ep->ep.maxpacket) {
358 transaction_len = ep->ep.maxpacket;
359 req->last_transaction = 0;
360 } else if (transaction_len == ep->ep.maxpacket && req->req.zero)
361 req->last_transaction = 0;
362
363 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n",
364 ep->ep.name, req, transaction_len,
365 req->last_transaction ? ", done" : "");
366
367 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len);
368 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
369 req->req.actual += transaction_len;
370}
371
372static void submit_request(struct usba_ep *ep, struct usba_request *req)
373{
374 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n",
375 ep->ep.name, req, req->req.length);
376
377 req->req.actual = 0;
378 req->submitted = 1;
379
380 if (req->using_dma) {
381 if (req->req.length == 0) {
382 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
383 return;
384 }
385
386 if (req->req.zero)
387 usba_ep_writel(ep, CTL_ENB, USBA_SHORT_PACKET);
388 else
389 usba_ep_writel(ep, CTL_DIS, USBA_SHORT_PACKET);
390
391 usba_dma_writel(ep, ADDRESS, req->req.dma);
392 usba_dma_writel(ep, CONTROL, req->ctrl);
393 } else {
394 next_fifo_transaction(ep, req);
395 if (req->last_transaction) {
396 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
397 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
398 } else {
399 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
400 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
401 }
402 }
403}
404
405static void submit_next_request(struct usba_ep *ep)
406{
407 struct usba_request *req;
408
409 if (list_empty(&ep->queue)) {
410 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY);
411 return;
412 }
413
414 req = list_entry(ep->queue.next, struct usba_request, queue);
415 if (!req->submitted)
416 submit_request(ep, req);
417}
418
419static void send_status(struct usba_udc *udc, struct usba_ep *ep)
420{
421 ep->state = STATUS_STAGE_IN;
422 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
423 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
424}
425
426static void receive_data(struct usba_ep *ep)
427{
428 struct usba_udc *udc = ep->udc;
429 struct usba_request *req;
430 unsigned long status;
431 unsigned int bytecount, nr_busy;
432 int is_complete = 0;
433
434 status = usba_ep_readl(ep, STA);
435 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
436
437 DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy);
438
439 while (nr_busy > 0) {
440 if (list_empty(&ep->queue)) {
441 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
442 break;
443 }
444 req = list_entry(ep->queue.next,
445 struct usba_request, queue);
446
447 bytecount = USBA_BFEXT(BYTE_COUNT, status);
448
449 if (status & (1 << 31))
450 is_complete = 1;
451 if (req->req.actual + bytecount >= req->req.length) {
452 is_complete = 1;
453 bytecount = req->req.length - req->req.actual;
454 }
455
456 memcpy_fromio(req->req.buf + req->req.actual,
457 ep->fifo, bytecount);
458 req->req.actual += bytecount;
459
460 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
461
462 if (is_complete) {
463 DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name);
464 req->req.status = 0;
465 list_del_init(&req->queue);
466 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
467 spin_unlock(&udc->lock);
468 req->req.complete(&ep->ep, &req->req);
469 spin_lock(&udc->lock);
470 }
471
472 status = usba_ep_readl(ep, STA);
473 nr_busy = USBA_BFEXT(BUSY_BANKS, status);
474
475 if (is_complete && ep_is_control(ep)) {
476 send_status(udc, ep);
477 break;
478 }
479 }
480}
481
482static void
483request_complete(struct usba_ep *ep, struct usba_request *req, int status)
484{
485 struct usba_udc *udc = ep->udc;
486
487 WARN_ON(!list_empty(&req->queue));
488
489 if (req->req.status == -EINPROGRESS)
490 req->req.status = status;
491
492 if (req->mapped) {
493 dma_unmap_single(
494 &udc->pdev->dev, req->req.dma, req->req.length,
495 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
496 req->req.dma = DMA_ADDR_INVALID;
497 req->mapped = 0;
498 }
499
500 DBG(DBG_GADGET | DBG_REQ,
501 "%s: req %p complete: status %d, actual %u\n",
502 ep->ep.name, req, req->req.status, req->req.actual);
503
504 spin_unlock(&udc->lock);
505 req->req.complete(&ep->ep, &req->req);
506 spin_lock(&udc->lock);
507}
508
509static void
510request_complete_list(struct usba_ep *ep, struct list_head *list, int status)
511{
512 struct usba_request *req, *tmp_req;
513
514 list_for_each_entry_safe(req, tmp_req, list, queue) {
515 list_del_init(&req->queue);
516 request_complete(ep, req, status);
517 }
518}
519
520static int
521usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
522{
523 struct usba_ep *ep = to_usba_ep(_ep);
524 struct usba_udc *udc = ep->udc;
525 unsigned long flags, ept_cfg, maxpacket;
526 unsigned int nr_trans;
527
528 DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
529
530 maxpacket = usb_endpoint_maxp(desc) & 0x7ff;
531
532 if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) != ep->index)
533 || ep->index == 0
534 || desc->bDescriptorType != USB_DT_ENDPOINT
535 || maxpacket == 0
536 || maxpacket > ep->fifo_size) {
537 DBG(DBG_ERR, "ep_enable: Invalid argument");
538 return -EINVAL;
539 }
540
541 ep->is_isoc = 0;
542 ep->is_in = 0;
543
544 if (maxpacket <= 8)
545 ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8);
546 else
547
548 ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3);
549
550 DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n",
551 ep->ep.name, ept_cfg, maxpacket);
552
553 if (usb_endpoint_dir_in(desc)) {
554 ep->is_in = 1;
555 ept_cfg |= USBA_EPT_DIR_IN;
556 }
557
558 switch (usb_endpoint_type(desc)) {
559 case USB_ENDPOINT_XFER_CONTROL:
560 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL);
561 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE);
562 break;
563 case USB_ENDPOINT_XFER_ISOC:
564 if (!ep->can_isoc) {
565 DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n",
566 ep->ep.name);
567 return -EINVAL;
568 }
569
570
571
572
573
574 nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1;
575 if (nr_trans > 3)
576 return -EINVAL;
577
578 ep->is_isoc = 1;
579 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO);
580
581
582
583
584 if (nr_trans > 1 && ep->nr_banks == 3)
585 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE);
586 else
587 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
588 ept_cfg |= USBA_BF(NB_TRANS, nr_trans);
589 break;
590 case USB_ENDPOINT_XFER_BULK:
591 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK);
592 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
593 break;
594 case USB_ENDPOINT_XFER_INT:
595 ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT);
596 ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE);
597 break;
598 }
599
600 spin_lock_irqsave(&ep->udc->lock, flags);
601
602 if (ep->desc) {
603 spin_unlock_irqrestore(&ep->udc->lock, flags);
604 DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
605 return -EBUSY;
606 }
607
608 ep->desc = desc;
609 ep->ep.maxpacket = maxpacket;
610
611 usba_ep_writel(ep, CFG, ept_cfg);
612 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
613
614 if (ep->can_dma) {
615 u32 ctrl;
616
617 usba_writel(udc, INT_ENB,
618 (usba_readl(udc, INT_ENB)
619 | USBA_BF(EPT_INT, 1 << ep->index)
620 | USBA_BF(DMA_INT, 1 << ep->index)));
621 ctrl = USBA_AUTO_VALID | USBA_INTDIS_DMA;
622 usba_ep_writel(ep, CTL_ENB, ctrl);
623 } else {
624 usba_writel(udc, INT_ENB,
625 (usba_readl(udc, INT_ENB)
626 | USBA_BF(EPT_INT, 1 << ep->index)));
627 }
628
629 spin_unlock_irqrestore(&udc->lock, flags);
630
631 DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index,
632 (unsigned long)usba_ep_readl(ep, CFG));
633 DBG(DBG_HW, "INT_ENB after init: %#08lx\n",
634 (unsigned long)usba_readl(udc, INT_ENB));
635
636 return 0;
637}
638
639static int usba_ep_disable(struct usb_ep *_ep)
640{
641 struct usba_ep *ep = to_usba_ep(_ep);
642 struct usba_udc *udc = ep->udc;
643 LIST_HEAD(req_list);
644 unsigned long flags;
645
646 DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name);
647
648 spin_lock_irqsave(&udc->lock, flags);
649
650 if (!ep->desc) {
651 spin_unlock_irqrestore(&udc->lock, flags);
652
653
654
655
656 if (udc->gadget.speed != USB_SPEED_UNKNOWN)
657 DBG(DBG_ERR, "ep_disable: %s not enabled\n",
658 ep->ep.name);
659 return -EINVAL;
660 }
661 ep->desc = NULL;
662 ep->ep.desc = NULL;
663
664 list_splice_init(&ep->queue, &req_list);
665 if (ep->can_dma) {
666 usba_dma_writel(ep, CONTROL, 0);
667 usba_dma_writel(ep, ADDRESS, 0);
668 usba_dma_readl(ep, STATUS);
669 }
670 usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE);
671 usba_writel(udc, INT_ENB,
672 usba_readl(udc, INT_ENB)
673 & ~USBA_BF(EPT_INT, 1 << ep->index));
674
675 request_complete_list(ep, &req_list, -ESHUTDOWN);
676
677 spin_unlock_irqrestore(&udc->lock, flags);
678
679 return 0;
680}
681
682static struct usb_request *
683usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
684{
685 struct usba_request *req;
686
687 DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags);
688
689 req = kzalloc(sizeof(*req), gfp_flags);
690 if (!req)
691 return NULL;
692
693 INIT_LIST_HEAD(&req->queue);
694 req->req.dma = DMA_ADDR_INVALID;
695
696 return &req->req;
697}
698
699static void
700usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
701{
702 struct usba_request *req = to_usba_req(_req);
703
704 DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req);
705
706 kfree(req);
707}
708
709static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
710 struct usba_request *req, gfp_t gfp_flags)
711{
712 unsigned long flags;
713 int ret;
714
715 DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n",
716 ep->ep.name, req->req.length, req->req.dma,
717 req->req.zero ? 'Z' : 'z',
718 req->req.short_not_ok ? 'S' : 's',
719 req->req.no_interrupt ? 'I' : 'i');
720
721 if (req->req.length > 0x10000) {
722
723 DBG(DBG_ERR, "invalid request length %u\n", req->req.length);
724 return -EINVAL;
725 }
726
727 req->using_dma = 1;
728
729 if (req->req.dma == DMA_ADDR_INVALID) {
730 req->req.dma = dma_map_single(
731 &udc->pdev->dev, req->req.buf, req->req.length,
732 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
733 req->mapped = 1;
734 } else {
735 dma_sync_single_for_device(
736 &udc->pdev->dev, req->req.dma, req->req.length,
737 ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
738 req->mapped = 0;
739 }
740
741 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
742 | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
743 | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
744
745 if (ep->is_in)
746 req->ctrl |= USBA_DMA_END_BUF_EN;
747
748
749
750
751
752
753 ret = -ESHUTDOWN;
754 spin_lock_irqsave(&udc->lock, flags);
755 if (ep->desc) {
756 if (list_empty(&ep->queue))
757 submit_request(ep, req);
758
759 list_add_tail(&req->queue, &ep->queue);
760 ret = 0;
761 }
762 spin_unlock_irqrestore(&udc->lock, flags);
763
764 return ret;
765}
766
767static int
768usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
769{
770 struct usba_request *req = to_usba_req(_req);
771 struct usba_ep *ep = to_usba_ep(_ep);
772 struct usba_udc *udc = ep->udc;
773 unsigned long flags;
774 int ret;
775
776 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n",
777 ep->ep.name, req, _req->length);
778
779 if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || !ep->desc)
780 return -ESHUTDOWN;
781
782 req->submitted = 0;
783 req->using_dma = 0;
784 req->last_transaction = 0;
785
786 _req->status = -EINPROGRESS;
787 _req->actual = 0;
788
789 if (ep->can_dma)
790 return queue_dma(udc, ep, req, gfp_flags);
791
792
793 ret = -ESHUTDOWN;
794 spin_lock_irqsave(&udc->lock, flags);
795 if (ep->desc) {
796 list_add_tail(&req->queue, &ep->queue);
797
798 if ((!ep_is_control(ep) && ep->is_in) ||
799 (ep_is_control(ep)
800 && (ep->state == DATA_STAGE_IN
801 || ep->state == STATUS_STAGE_IN)))
802 usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
803 else
804 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
805 ret = 0;
806 }
807 spin_unlock_irqrestore(&udc->lock, flags);
808
809 return ret;
810}
811
812static void
813usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status)
814{
815 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status);
816}
817
818static int stop_dma(struct usba_ep *ep, u32 *pstatus)
819{
820 unsigned int timeout;
821 u32 status;
822
823
824
825
826
827 usba_dma_writel(ep, CONTROL, 0);
828
829
830 for (timeout = 40; timeout; --timeout) {
831 status = usba_dma_readl(ep, STATUS);
832 if (!(status & USBA_DMA_CH_EN))
833 break;
834 udelay(1);
835 }
836
837 if (pstatus)
838 *pstatus = status;
839
840 if (timeout == 0) {
841 dev_err(&ep->udc->pdev->dev,
842 "%s: timed out waiting for DMA FIFO to empty\n",
843 ep->ep.name);
844 return -ETIMEDOUT;
845 }
846
847 return 0;
848}
849
850static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
851{
852 struct usba_ep *ep = to_usba_ep(_ep);
853 struct usba_udc *udc = ep->udc;
854 struct usba_request *req = to_usba_req(_req);
855 unsigned long flags;
856 u32 status;
857
858 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
859 ep->ep.name, req);
860
861 spin_lock_irqsave(&udc->lock, flags);
862
863 if (req->using_dma) {
864
865
866
867
868 if (ep->queue.next == &req->queue) {
869 status = usba_dma_readl(ep, STATUS);
870 if (status & USBA_DMA_CH_EN)
871 stop_dma(ep, &status);
872
873#ifdef CONFIG_USB_GADGET_DEBUG_FS
874 ep->last_dma_status = status;
875#endif
876
877 usba_writel(udc, EPT_RST, 1 << ep->index);
878
879 usba_update_req(ep, req, status);
880 }
881 }
882
883
884
885
886
887 list_del_init(&req->queue);
888
889 request_complete(ep, req, -ECONNRESET);
890
891
892 submit_next_request(ep);
893 spin_unlock_irqrestore(&udc->lock, flags);
894
895 return 0;
896}
897
898static int usba_ep_set_halt(struct usb_ep *_ep, int value)
899{
900 struct usba_ep *ep = to_usba_ep(_ep);
901 struct usba_udc *udc = ep->udc;
902 unsigned long flags;
903 int ret = 0;
904
905 DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name,
906 value ? "set" : "clear");
907
908 if (!ep->desc) {
909 DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n",
910 ep->ep.name);
911 return -ENODEV;
912 }
913 if (ep->is_isoc) {
914 DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n",
915 ep->ep.name);
916 return -ENOTTY;
917 }
918
919 spin_lock_irqsave(&udc->lock, flags);
920
921
922
923
924
925 if (!list_empty(&ep->queue)
926 || ((value && ep->is_in && (usba_ep_readl(ep, STA)
927 & USBA_BF(BUSY_BANKS, -1L))))) {
928 ret = -EAGAIN;
929 } else {
930 if (value)
931 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
932 else
933 usba_ep_writel(ep, CLR_STA,
934 USBA_FORCE_STALL | USBA_TOGGLE_CLR);
935 usba_ep_readl(ep, STA);
936 }
937
938 spin_unlock_irqrestore(&udc->lock, flags);
939
940 return ret;
941}
942
943static int usba_ep_fifo_status(struct usb_ep *_ep)
944{
945 struct usba_ep *ep = to_usba_ep(_ep);
946
947 return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
948}
949
950static void usba_ep_fifo_flush(struct usb_ep *_ep)
951{
952 struct usba_ep *ep = to_usba_ep(_ep);
953 struct usba_udc *udc = ep->udc;
954
955 usba_writel(udc, EPT_RST, 1 << ep->index);
956}
957
958static const struct usb_ep_ops usba_ep_ops = {
959 .enable = usba_ep_enable,
960 .disable = usba_ep_disable,
961 .alloc_request = usba_ep_alloc_request,
962 .free_request = usba_ep_free_request,
963 .queue = usba_ep_queue,
964 .dequeue = usba_ep_dequeue,
965 .set_halt = usba_ep_set_halt,
966 .fifo_status = usba_ep_fifo_status,
967 .fifo_flush = usba_ep_fifo_flush,
968};
969
970static int usba_udc_get_frame(struct usb_gadget *gadget)
971{
972 struct usba_udc *udc = to_usba_udc(gadget);
973
974 return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM));
975}
976
977static int usba_udc_wakeup(struct usb_gadget *gadget)
978{
979 struct usba_udc *udc = to_usba_udc(gadget);
980 unsigned long flags;
981 u32 ctrl;
982 int ret = -EINVAL;
983
984 spin_lock_irqsave(&udc->lock, flags);
985 if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) {
986 ctrl = usba_readl(udc, CTRL);
987 usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP);
988 ret = 0;
989 }
990 spin_unlock_irqrestore(&udc->lock, flags);
991
992 return ret;
993}
994
995static int
996usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered)
997{
998 struct usba_udc *udc = to_usba_udc(gadget);
999 unsigned long flags;
1000
1001 spin_lock_irqsave(&udc->lock, flags);
1002 if (is_selfpowered)
1003 udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED;
1004 else
1005 udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
1006 spin_unlock_irqrestore(&udc->lock, flags);
1007
1008 return 0;
1009}
1010
1011static int atmel_usba_start(struct usb_gadget_driver *driver,
1012 int (*bind)(struct usb_gadget *));
1013static int atmel_usba_stop(struct usb_gadget_driver *driver);
1014
1015static const struct usb_gadget_ops usba_udc_ops = {
1016 .get_frame = usba_udc_get_frame,
1017 .wakeup = usba_udc_wakeup,
1018 .set_selfpowered = usba_udc_set_selfpowered,
1019 .start = atmel_usba_start,
1020 .stop = atmel_usba_stop,
1021};
1022
1023static struct usb_endpoint_descriptor usba_ep0_desc = {
1024 .bLength = USB_DT_ENDPOINT_SIZE,
1025 .bDescriptorType = USB_DT_ENDPOINT,
1026 .bEndpointAddress = 0,
1027 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1028 .wMaxPacketSize = cpu_to_le16(64),
1029
1030 .bInterval = 1,
1031};
1032
1033static void nop_release(struct device *dev)
1034{
1035
1036}
1037
1038static struct usba_udc the_udc = {
1039 .gadget = {
1040 .ops = &usba_udc_ops,
1041 .ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
1042 .max_speed = USB_SPEED_HIGH,
1043 .name = "atmel_usba_udc",
1044 .dev = {
1045 .init_name = "gadget",
1046 .release = nop_release,
1047 },
1048 },
1049};
1050
1051
1052
1053
1054static void reset_all_endpoints(struct usba_udc *udc)
1055{
1056 struct usba_ep *ep;
1057 struct usba_request *req, *tmp_req;
1058
1059 usba_writel(udc, EPT_RST, ~0UL);
1060
1061 ep = to_usba_ep(udc->gadget.ep0);
1062 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) {
1063 list_del_init(&req->queue);
1064 request_complete(ep, req, -ECONNRESET);
1065 }
1066
1067
1068
1069
1070
1071
1072
1073 list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1074 if (ep->desc) {
1075 spin_unlock(&udc->lock);
1076 usba_ep_disable(&ep->ep);
1077 spin_lock(&udc->lock);
1078 }
1079 }
1080}
1081
1082static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex)
1083{
1084 struct usba_ep *ep;
1085
1086 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
1087 return to_usba_ep(udc->gadget.ep0);
1088
1089 list_for_each_entry (ep, &udc->gadget.ep_list, ep.ep_list) {
1090 u8 bEndpointAddress;
1091
1092 if (!ep->desc)
1093 continue;
1094 bEndpointAddress = ep->desc->bEndpointAddress;
1095 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
1096 continue;
1097 if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK)
1098 == (wIndex & USB_ENDPOINT_NUMBER_MASK))
1099 return ep;
1100 }
1101
1102 return NULL;
1103}
1104
1105
1106static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep)
1107{
1108 usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL);
1109 ep->state = WAIT_FOR_SETUP;
1110}
1111
1112static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep)
1113{
1114 if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL)
1115 return 1;
1116 return 0;
1117}
1118
1119static inline void set_address(struct usba_udc *udc, unsigned int addr)
1120{
1121 u32 regval;
1122
1123 DBG(DBG_BUS, "setting address %u...\n", addr);
1124 regval = usba_readl(udc, CTRL);
1125 regval = USBA_BFINS(DEV_ADDR, addr, regval);
1126 usba_writel(udc, CTRL, regval);
1127}
1128
1129static int do_test_mode(struct usba_udc *udc)
1130{
1131 static const char test_packet_buffer[] = {
1132
1133 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1134
1135 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
1136
1137 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
1138
1139 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1140 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1141
1142 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
1143
1144 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E
1145 };
1146 struct usba_ep *ep;
1147 struct device *dev = &udc->pdev->dev;
1148 int test_mode;
1149
1150 test_mode = udc->test_mode;
1151
1152
1153 reset_all_endpoints(udc);
1154
1155 switch (test_mode) {
1156 case 0x0100:
1157
1158 usba_writel(udc, TST, USBA_TST_J_MODE);
1159 dev_info(dev, "Entering Test_J mode...\n");
1160 break;
1161 case 0x0200:
1162
1163 usba_writel(udc, TST, USBA_TST_K_MODE);
1164 dev_info(dev, "Entering Test_K mode...\n");
1165 break;
1166 case 0x0300:
1167
1168
1169
1170
1171 ep = &usba_ep[0];
1172 usba_writel(udc, TST,
1173 USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH));
1174 usba_ep_writel(ep, CFG,
1175 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1176 | USBA_EPT_DIR_IN
1177 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1178 | USBA_BF(BK_NUMBER, 1));
1179 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1180 set_protocol_stall(udc, ep);
1181 dev_err(dev, "Test_SE0_NAK: ep0 not mapped\n");
1182 } else {
1183 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1184 dev_info(dev, "Entering Test_SE0_NAK mode...\n");
1185 }
1186 break;
1187 case 0x0400:
1188
1189 ep = &usba_ep[0];
1190 usba_ep_writel(ep, CFG,
1191 USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64)
1192 | USBA_EPT_DIR_IN
1193 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK)
1194 | USBA_BF(BK_NUMBER, 1));
1195 if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) {
1196 set_protocol_stall(udc, ep);
1197 dev_err(dev, "Test_Packet: ep0 not mapped\n");
1198 } else {
1199 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE);
1200 usba_writel(udc, TST, USBA_TST_PKT_MODE);
1201 memcpy_toio(ep->fifo, test_packet_buffer,
1202 sizeof(test_packet_buffer));
1203 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1204 dev_info(dev, "Entering Test_Packet mode...\n");
1205 }
1206 break;
1207 default:
1208 dev_err(dev, "Invalid test mode: 0x%04x\n", test_mode);
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213}
1214
1215
1216static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq)
1217{
1218 if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
1219 return true;
1220 return false;
1221}
1222
1223static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq)
1224{
1225 if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE))
1226 return true;
1227 return false;
1228}
1229
1230static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq)
1231{
1232 if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT))
1233 return true;
1234 return false;
1235}
1236
1237static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep,
1238 struct usb_ctrlrequest *crq)
1239{
1240 int retval = 0;
1241
1242 switch (crq->bRequest) {
1243 case USB_REQ_GET_STATUS: {
1244 u16 status;
1245
1246 if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) {
1247 status = cpu_to_le16(udc->devstatus);
1248 } else if (crq->bRequestType
1249 == (USB_DIR_IN | USB_RECIP_INTERFACE)) {
1250 status = cpu_to_le16(0);
1251 } else if (crq->bRequestType
1252 == (USB_DIR_IN | USB_RECIP_ENDPOINT)) {
1253 struct usba_ep *target;
1254
1255 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1256 if (!target)
1257 goto stall;
1258
1259 status = 0;
1260 if (is_stalled(udc, target))
1261 status |= cpu_to_le16(1);
1262 } else
1263 goto delegate;
1264
1265
1266 if (crq->wLength != cpu_to_le16(sizeof(status)))
1267 goto stall;
1268 ep->state = DATA_STAGE_IN;
1269 __raw_writew(status, ep->fifo);
1270 usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY);
1271 break;
1272 }
1273
1274 case USB_REQ_CLEAR_FEATURE: {
1275 if (crq->bRequestType == USB_RECIP_DEVICE) {
1276 if (feature_is_dev_remote_wakeup(crq))
1277 udc->devstatus
1278 &= ~(1 << USB_DEVICE_REMOTE_WAKEUP);
1279 else
1280
1281 goto stall;
1282 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1283 struct usba_ep *target;
1284
1285 if (crq->wLength != cpu_to_le16(0)
1286 || !feature_is_ep_halt(crq))
1287 goto stall;
1288 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1289 if (!target)
1290 goto stall;
1291
1292 usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL);
1293 if (target->index != 0)
1294 usba_ep_writel(target, CLR_STA,
1295 USBA_TOGGLE_CLR);
1296 } else {
1297 goto delegate;
1298 }
1299
1300 send_status(udc, ep);
1301 break;
1302 }
1303
1304 case USB_REQ_SET_FEATURE: {
1305 if (crq->bRequestType == USB_RECIP_DEVICE) {
1306 if (feature_is_dev_test_mode(crq)) {
1307 send_status(udc, ep);
1308 ep->state = STATUS_STAGE_TEST;
1309 udc->test_mode = le16_to_cpu(crq->wIndex);
1310 return 0;
1311 } else if (feature_is_dev_remote_wakeup(crq)) {
1312 udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP;
1313 } else {
1314 goto stall;
1315 }
1316 } else if (crq->bRequestType == USB_RECIP_ENDPOINT) {
1317 struct usba_ep *target;
1318
1319 if (crq->wLength != cpu_to_le16(0)
1320 || !feature_is_ep_halt(crq))
1321 goto stall;
1322
1323 target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex));
1324 if (!target)
1325 goto stall;
1326
1327 usba_ep_writel(target, SET_STA, USBA_FORCE_STALL);
1328 } else
1329 goto delegate;
1330
1331 send_status(udc, ep);
1332 break;
1333 }
1334
1335 case USB_REQ_SET_ADDRESS:
1336 if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
1337 goto delegate;
1338
1339 set_address(udc, le16_to_cpu(crq->wValue));
1340 send_status(udc, ep);
1341 ep->state = STATUS_STAGE_ADDR;
1342 break;
1343
1344 default:
1345delegate:
1346 spin_unlock(&udc->lock);
1347 retval = udc->driver->setup(&udc->gadget, crq);
1348 spin_lock(&udc->lock);
1349 }
1350
1351 return retval;
1352
1353stall:
1354 pr_err("udc: %s: Invalid setup request: %02x.%02x v%04x i%04x l%d, "
1355 "halting endpoint...\n",
1356 ep->ep.name, crq->bRequestType, crq->bRequest,
1357 le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex),
1358 le16_to_cpu(crq->wLength));
1359 set_protocol_stall(udc, ep);
1360 return -1;
1361}
1362
1363static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep)
1364{
1365 struct usba_request *req;
1366 u32 epstatus;
1367 u32 epctrl;
1368
1369restart:
1370 epstatus = usba_ep_readl(ep, STA);
1371 epctrl = usba_ep_readl(ep, CTL);
1372
1373 DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n",
1374 ep->ep.name, ep->state, epstatus, epctrl);
1375
1376 req = NULL;
1377 if (!list_empty(&ep->queue))
1378 req = list_entry(ep->queue.next,
1379 struct usba_request, queue);
1380
1381 if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1382 if (req->submitted)
1383 next_fifo_transaction(ep, req);
1384 else
1385 submit_request(ep, req);
1386
1387 if (req->last_transaction) {
1388 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1389 usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
1390 }
1391 goto restart;
1392 }
1393 if ((epstatus & epctrl) & USBA_TX_COMPLETE) {
1394 usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE);
1395
1396 switch (ep->state) {
1397 case DATA_STAGE_IN:
1398 usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY);
1399 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1400 ep->state = STATUS_STAGE_OUT;
1401 break;
1402 case STATUS_STAGE_ADDR:
1403
1404 usba_writel(udc, CTRL, (usba_readl(udc, CTRL)
1405 | USBA_FADDR_EN));
1406 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1407 ep->state = WAIT_FOR_SETUP;
1408 break;
1409 case STATUS_STAGE_IN:
1410 if (req) {
1411 list_del_init(&req->queue);
1412 request_complete(ep, req, 0);
1413 submit_next_request(ep);
1414 }
1415 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1416 ep->state = WAIT_FOR_SETUP;
1417 break;
1418 case STATUS_STAGE_TEST:
1419 usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
1420 ep->state = WAIT_FOR_SETUP;
1421 if (do_test_mode(udc))
1422 set_protocol_stall(udc, ep);
1423 break;
1424 default:
1425 pr_err("udc: %s: TXCOMP: Invalid endpoint state %d, "
1426 "halting endpoint...\n",
1427 ep->ep.name, ep->state);
1428 set_protocol_stall(udc, ep);
1429 break;
1430 }
1431
1432 goto restart;
1433 }
1434 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1435 switch (ep->state) {
1436 case STATUS_STAGE_OUT:
1437 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1438 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1439
1440 if (req) {
1441 list_del_init(&req->queue);
1442 request_complete(ep, req, 0);
1443 }
1444 ep->state = WAIT_FOR_SETUP;
1445 break;
1446
1447 case DATA_STAGE_OUT:
1448 receive_data(ep);
1449 break;
1450
1451 default:
1452 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1453 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1454 pr_err("udc: %s: RXRDY: Invalid endpoint state %d, "
1455 "halting endpoint...\n",
1456 ep->ep.name, ep->state);
1457 set_protocol_stall(udc, ep);
1458 break;
1459 }
1460
1461 goto restart;
1462 }
1463 if (epstatus & USBA_RX_SETUP) {
1464 union {
1465 struct usb_ctrlrequest crq;
1466 unsigned long data[2];
1467 } crq;
1468 unsigned int pkt_len;
1469 int ret;
1470
1471 if (ep->state != WAIT_FOR_SETUP) {
1472
1473
1474
1475
1476
1477 int status = -EPROTO;
1478
1479
1480
1481
1482
1483
1484 if (ep->state == STATUS_STAGE_OUT
1485 || ep->state == STATUS_STAGE_IN) {
1486 usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY);
1487 status = 0;
1488 }
1489
1490 if (req) {
1491 list_del_init(&req->queue);
1492 request_complete(ep, req, status);
1493 }
1494 }
1495
1496 pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA));
1497 DBG(DBG_HW, "Packet length: %u\n", pkt_len);
1498 if (pkt_len != sizeof(crq)) {
1499 pr_warning("udc: Invalid packet length %u "
1500 "(expected %zu)\n", pkt_len, sizeof(crq));
1501 set_protocol_stall(udc, ep);
1502 return;
1503 }
1504
1505 DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo);
1506 memcpy_fromio(crq.data, ep->fifo, sizeof(crq));
1507
1508
1509
1510 usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP);
1511
1512
1513
1514
1515
1516 if (crq.crq.bRequestType & USB_DIR_IN) {
1517
1518
1519
1520
1521
1522
1523 ep->state = DATA_STAGE_IN;
1524 } else {
1525 if (crq.crq.wLength != cpu_to_le16(0))
1526 ep->state = DATA_STAGE_OUT;
1527 else
1528 ep->state = STATUS_STAGE_IN;
1529 }
1530
1531 ret = -1;
1532 if (ep->index == 0)
1533 ret = handle_ep0_setup(udc, ep, &crq.crq);
1534 else {
1535 spin_unlock(&udc->lock);
1536 ret = udc->driver->setup(&udc->gadget, &crq.crq);
1537 spin_lock(&udc->lock);
1538 }
1539
1540 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n",
1541 crq.crq.bRequestType, crq.crq.bRequest,
1542 le16_to_cpu(crq.crq.wLength), ep->state, ret);
1543
1544 if (ret < 0) {
1545
1546 set_protocol_stall(udc, ep);
1547 }
1548 }
1549}
1550
1551static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
1552{
1553 struct usba_request *req;
1554 u32 epstatus;
1555 u32 epctrl;
1556
1557 epstatus = usba_ep_readl(ep, STA);
1558 epctrl = usba_ep_readl(ep, CTL);
1559
1560 DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus);
1561
1562 while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) {
1563 DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name);
1564
1565 if (list_empty(&ep->queue)) {
1566 dev_warn(&udc->pdev->dev, "ep_irq: queue empty\n");
1567 usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
1568 return;
1569 }
1570
1571 req = list_entry(ep->queue.next, struct usba_request, queue);
1572
1573 if (req->using_dma) {
1574
1575 usba_ep_writel(ep, SET_STA,
1576 USBA_TX_PK_RDY);
1577 usba_ep_writel(ep, CTL_DIS,
1578 USBA_TX_PK_RDY);
1579 list_del_init(&req->queue);
1580 submit_next_request(ep);
1581 request_complete(ep, req, 0);
1582 } else {
1583 if (req->submitted)
1584 next_fifo_transaction(ep, req);
1585 else
1586 submit_request(ep, req);
1587
1588 if (req->last_transaction) {
1589 list_del_init(&req->queue);
1590 submit_next_request(ep);
1591 request_complete(ep, req, 0);
1592 }
1593 }
1594
1595 epstatus = usba_ep_readl(ep, STA);
1596 epctrl = usba_ep_readl(ep, CTL);
1597 }
1598 if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
1599 DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
1600 receive_data(ep);
1601 usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
1602 }
1603}
1604
1605static void usba_dma_irq(struct usba_udc *udc, struct usba_ep *ep)
1606{
1607 struct usba_request *req;
1608 u32 status, control, pending;
1609
1610 status = usba_dma_readl(ep, STATUS);
1611 control = usba_dma_readl(ep, CONTROL);
1612#ifdef CONFIG_USB_GADGET_DEBUG_FS
1613 ep->last_dma_status = status;
1614#endif
1615 pending = status & control;
1616 DBG(DBG_INT | DBG_DMA, "dma irq, s/%#08x, c/%#08x\n", status, control);
1617
1618 if (status & USBA_DMA_CH_EN) {
1619 dev_err(&udc->pdev->dev,
1620 "DMA_CH_EN is set after transfer is finished!\n");
1621 dev_err(&udc->pdev->dev,
1622 "status=%#08x, pending=%#08x, control=%#08x\n",
1623 status, pending, control);
1624
1625
1626
1627
1628
1629 }
1630
1631 if (list_empty(&ep->queue))
1632
1633 return;
1634
1635 if (pending & (USBA_DMA_END_TR_ST | USBA_DMA_END_BUF_ST)) {
1636 req = list_entry(ep->queue.next, struct usba_request, queue);
1637 usba_update_req(ep, req, status);
1638
1639 list_del_init(&req->queue);
1640 submit_next_request(ep);
1641 request_complete(ep, req, 0);
1642 }
1643}
1644
1645static irqreturn_t usba_udc_irq(int irq, void *devid)
1646{
1647 struct usba_udc *udc = devid;
1648 u32 status;
1649 u32 dma_status;
1650 u32 ep_status;
1651
1652 spin_lock(&udc->lock);
1653
1654 status = usba_readl(udc, INT_STA);
1655 DBG(DBG_INT, "irq, status=%#08x\n", status);
1656
1657 if (status & USBA_DET_SUSPEND) {
1658 toggle_bias(0);
1659 usba_writel(udc, INT_CLR, USBA_DET_SUSPEND);
1660 DBG(DBG_BUS, "Suspend detected\n");
1661 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1662 && udc->driver && udc->driver->suspend) {
1663 spin_unlock(&udc->lock);
1664 udc->driver->suspend(&udc->gadget);
1665 spin_lock(&udc->lock);
1666 }
1667 }
1668
1669 if (status & USBA_WAKE_UP) {
1670 toggle_bias(1);
1671 usba_writel(udc, INT_CLR, USBA_WAKE_UP);
1672 DBG(DBG_BUS, "Wake Up CPU detected\n");
1673 }
1674
1675 if (status & USBA_END_OF_RESUME) {
1676 usba_writel(udc, INT_CLR, USBA_END_OF_RESUME);
1677 DBG(DBG_BUS, "Resume detected\n");
1678 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1679 && udc->driver && udc->driver->resume) {
1680 spin_unlock(&udc->lock);
1681 udc->driver->resume(&udc->gadget);
1682 spin_lock(&udc->lock);
1683 }
1684 }
1685
1686 dma_status = USBA_BFEXT(DMA_INT, status);
1687 if (dma_status) {
1688 int i;
1689
1690 for (i = 1; i < USBA_NR_ENDPOINTS; i++)
1691 if (dma_status & (1 << i))
1692 usba_dma_irq(udc, &usba_ep[i]);
1693 }
1694
1695 ep_status = USBA_BFEXT(EPT_INT, status);
1696 if (ep_status) {
1697 int i;
1698
1699 for (i = 0; i < USBA_NR_ENDPOINTS; i++)
1700 if (ep_status & (1 << i)) {
1701 if (ep_is_control(&usba_ep[i]))
1702 usba_control_irq(udc, &usba_ep[i]);
1703 else
1704 usba_ep_irq(udc, &usba_ep[i]);
1705 }
1706 }
1707
1708 if (status & USBA_END_OF_RESET) {
1709 struct usba_ep *ep0;
1710
1711 usba_writel(udc, INT_CLR, USBA_END_OF_RESET);
1712 reset_all_endpoints(udc);
1713
1714 if (udc->gadget.speed != USB_SPEED_UNKNOWN
1715 && udc->driver->disconnect) {
1716 udc->gadget.speed = USB_SPEED_UNKNOWN;
1717 spin_unlock(&udc->lock);
1718 udc->driver->disconnect(&udc->gadget);
1719 spin_lock(&udc->lock);
1720 }
1721
1722 if (status & USBA_HIGH_SPEED)
1723 udc->gadget.speed = USB_SPEED_HIGH;
1724 else
1725 udc->gadget.speed = USB_SPEED_FULL;
1726 DBG(DBG_BUS, "%s bus reset detected\n",
1727 usb_speed_string(udc->gadget.speed));
1728
1729 ep0 = &usba_ep[0];
1730 ep0->desc = &usba_ep0_desc;
1731 ep0->state = WAIT_FOR_SETUP;
1732 usba_ep_writel(ep0, CFG,
1733 (USBA_BF(EPT_SIZE, EP0_EPT_SIZE)
1734 | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL)
1735 | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE)));
1736 usba_ep_writel(ep0, CTL_ENB,
1737 USBA_EPT_ENABLE | USBA_RX_SETUP);
1738 usba_writel(udc, INT_ENB,
1739 (usba_readl(udc, INT_ENB)
1740 | USBA_BF(EPT_INT, 1)
1741 | USBA_DET_SUSPEND
1742 | USBA_END_OF_RESUME));
1743
1744
1745
1746
1747
1748 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED))
1749 dev_dbg(&udc->pdev->dev,
1750 "ODD: EP0 configuration is invalid!\n");
1751 }
1752
1753 spin_unlock(&udc->lock);
1754
1755 return IRQ_HANDLED;
1756}
1757
1758static irqreturn_t usba_vbus_irq(int irq, void *devid)
1759{
1760 struct usba_udc *udc = devid;
1761 int vbus;
1762
1763
1764 udelay(10);
1765
1766 spin_lock(&udc->lock);
1767
1768
1769 if (!udc->driver)
1770 goto out;
1771
1772 vbus = vbus_is_present(udc);
1773 if (vbus != udc->vbus_prev) {
1774 if (vbus) {
1775 toggle_bias(1);
1776 usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1777 usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1778 } else {
1779 udc->gadget.speed = USB_SPEED_UNKNOWN;
1780 reset_all_endpoints(udc);
1781 toggle_bias(0);
1782 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1783 if (udc->driver->disconnect) {
1784 spin_unlock(&udc->lock);
1785 udc->driver->disconnect(&udc->gadget);
1786 spin_lock(&udc->lock);
1787 }
1788 }
1789 udc->vbus_prev = vbus;
1790 }
1791
1792out:
1793 spin_unlock(&udc->lock);
1794
1795 return IRQ_HANDLED;
1796}
1797
1798static int atmel_usba_start(struct usb_gadget_driver *driver,
1799 int (*bind)(struct usb_gadget *))
1800{
1801 struct usba_udc *udc = &the_udc;
1802 unsigned long flags;
1803 int ret;
1804
1805 if (!udc->pdev)
1806 return -ENODEV;
1807
1808 spin_lock_irqsave(&udc->lock, flags);
1809 if (udc->driver) {
1810 spin_unlock_irqrestore(&udc->lock, flags);
1811 return -EBUSY;
1812 }
1813
1814 udc->devstatus = 1 << USB_DEVICE_SELF_POWERED;
1815 udc->driver = driver;
1816 udc->gadget.dev.driver = &driver->driver;
1817 spin_unlock_irqrestore(&udc->lock, flags);
1818
1819 clk_enable(udc->pclk);
1820 clk_enable(udc->hclk);
1821
1822 ret = bind(&udc->gadget);
1823 if (ret) {
1824 DBG(DBG_ERR, "Could not bind to driver %s: error %d\n",
1825 driver->driver.name, ret);
1826 goto err_driver_bind;
1827 }
1828
1829 DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
1830
1831 udc->vbus_prev = 0;
1832 if (gpio_is_valid(udc->vbus_pin))
1833 enable_irq(gpio_to_irq(udc->vbus_pin));
1834
1835
1836 spin_lock_irqsave(&udc->lock, flags);
1837 if (vbus_is_present(udc) && udc->vbus_prev == 0) {
1838 toggle_bias(1);
1839 usba_writel(udc, CTRL, USBA_ENABLE_MASK);
1840 usba_writel(udc, INT_ENB, USBA_END_OF_RESET);
1841 }
1842 spin_unlock_irqrestore(&udc->lock, flags);
1843
1844 return 0;
1845
1846err_driver_bind:
1847 udc->driver = NULL;
1848 udc->gadget.dev.driver = NULL;
1849 return ret;
1850}
1851
1852static int atmel_usba_stop(struct usb_gadget_driver *driver)
1853{
1854 struct usba_udc *udc = &the_udc;
1855 unsigned long flags;
1856
1857 if (!udc->pdev)
1858 return -ENODEV;
1859 if (driver != udc->driver || !driver->unbind)
1860 return -EINVAL;
1861
1862 if (gpio_is_valid(udc->vbus_pin))
1863 disable_irq(gpio_to_irq(udc->vbus_pin));
1864
1865 spin_lock_irqsave(&udc->lock, flags);
1866 udc->gadget.speed = USB_SPEED_UNKNOWN;
1867 reset_all_endpoints(udc);
1868 spin_unlock_irqrestore(&udc->lock, flags);
1869
1870
1871 toggle_bias(0);
1872 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1873
1874 if (udc->driver->disconnect)
1875 udc->driver->disconnect(&udc->gadget);
1876
1877 driver->unbind(&udc->gadget);
1878 udc->gadget.dev.driver = NULL;
1879 udc->driver = NULL;
1880
1881 clk_disable(udc->hclk);
1882 clk_disable(udc->pclk);
1883
1884 DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
1885
1886 return 0;
1887}
1888
1889static int __init usba_udc_probe(struct platform_device *pdev)
1890{
1891 struct usba_platform_data *pdata = pdev->dev.platform_data;
1892 struct resource *regs, *fifo;
1893 struct clk *pclk, *hclk;
1894 struct usba_udc *udc = &the_udc;
1895 int irq, ret, i;
1896
1897 regs = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
1898 fifo = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
1899 if (!regs || !fifo || !pdata)
1900 return -ENXIO;
1901
1902 irq = platform_get_irq(pdev, 0);
1903 if (irq < 0)
1904 return irq;
1905
1906 pclk = clk_get(&pdev->dev, "pclk");
1907 if (IS_ERR(pclk))
1908 return PTR_ERR(pclk);
1909 hclk = clk_get(&pdev->dev, "hclk");
1910 if (IS_ERR(hclk)) {
1911 ret = PTR_ERR(hclk);
1912 goto err_get_hclk;
1913 }
1914
1915 spin_lock_init(&udc->lock);
1916 udc->pdev = pdev;
1917 udc->pclk = pclk;
1918 udc->hclk = hclk;
1919 udc->vbus_pin = -ENODEV;
1920
1921 ret = -ENOMEM;
1922 udc->regs = ioremap(regs->start, resource_size(regs));
1923 if (!udc->regs) {
1924 dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
1925 goto err_map_regs;
1926 }
1927 dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
1928 (unsigned long)regs->start, udc->regs);
1929 udc->fifo = ioremap(fifo->start, resource_size(fifo));
1930 if (!udc->fifo) {
1931 dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
1932 goto err_map_fifo;
1933 }
1934 dev_info(&pdev->dev, "FIFO at 0x%08lx mapped at %p\n",
1935 (unsigned long)fifo->start, udc->fifo);
1936
1937 device_initialize(&udc->gadget.dev);
1938 udc->gadget.dev.parent = &pdev->dev;
1939 udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
1940
1941 platform_set_drvdata(pdev, udc);
1942
1943
1944 clk_enable(pclk);
1945 toggle_bias(0);
1946 usba_writel(udc, CTRL, USBA_DISABLE_MASK);
1947 clk_disable(pclk);
1948
1949 usba_ep = kzalloc(sizeof(struct usba_ep) * pdata->num_ep,
1950 GFP_KERNEL);
1951 if (!usba_ep)
1952 goto err_alloc_ep;
1953
1954 the_udc.gadget.ep0 = &usba_ep[0].ep;
1955
1956 INIT_LIST_HEAD(&usba_ep[0].ep.ep_list);
1957 usba_ep[0].ep_regs = udc->regs + USBA_EPT_BASE(0);
1958 usba_ep[0].dma_regs = udc->regs + USBA_DMA_BASE(0);
1959 usba_ep[0].fifo = udc->fifo + USBA_FIFO_BASE(0);
1960 usba_ep[0].ep.ops = &usba_ep_ops;
1961 usba_ep[0].ep.name = pdata->ep[0].name;
1962 usba_ep[0].ep.maxpacket = pdata->ep[0].fifo_size;
1963 usba_ep[0].udc = &the_udc;
1964 INIT_LIST_HEAD(&usba_ep[0].queue);
1965 usba_ep[0].fifo_size = pdata->ep[0].fifo_size;
1966 usba_ep[0].nr_banks = pdata->ep[0].nr_banks;
1967 usba_ep[0].index = pdata->ep[0].index;
1968 usba_ep[0].can_dma = pdata->ep[0].can_dma;
1969 usba_ep[0].can_isoc = pdata->ep[0].can_isoc;
1970
1971 for (i = 1; i < pdata->num_ep; i++) {
1972 struct usba_ep *ep = &usba_ep[i];
1973
1974 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
1975 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
1976 ep->fifo = udc->fifo + USBA_FIFO_BASE(i);
1977 ep->ep.ops = &usba_ep_ops;
1978 ep->ep.name = pdata->ep[i].name;
1979 ep->ep.maxpacket = pdata->ep[i].fifo_size;
1980 ep->udc = &the_udc;
1981 INIT_LIST_HEAD(&ep->queue);
1982 ep->fifo_size = pdata->ep[i].fifo_size;
1983 ep->nr_banks = pdata->ep[i].nr_banks;
1984 ep->index = pdata->ep[i].index;
1985 ep->can_dma = pdata->ep[i].can_dma;
1986 ep->can_isoc = pdata->ep[i].can_isoc;
1987
1988 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1989 }
1990
1991 ret = request_irq(irq, usba_udc_irq, 0, "atmel_usba_udc", udc);
1992 if (ret) {
1993 dev_err(&pdev->dev, "Cannot request irq %d (error %d)\n",
1994 irq, ret);
1995 goto err_request_irq;
1996 }
1997 udc->irq = irq;
1998
1999 ret = device_add(&udc->gadget.dev);
2000 if (ret) {
2001 dev_dbg(&pdev->dev, "Could not add gadget: %d\n", ret);
2002 goto err_device_add;
2003 }
2004
2005 if (gpio_is_valid(pdata->vbus_pin)) {
2006 if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
2007 udc->vbus_pin = pdata->vbus_pin;
2008 udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
2009
2010 ret = request_irq(gpio_to_irq(udc->vbus_pin),
2011 usba_vbus_irq, 0,
2012 "atmel_usba_udc", udc);
2013 if (ret) {
2014 gpio_free(udc->vbus_pin);
2015 udc->vbus_pin = -ENODEV;
2016 dev_warn(&udc->pdev->dev,
2017 "failed to request vbus irq; "
2018 "assuming always on\n");
2019 } else {
2020 disable_irq(gpio_to_irq(udc->vbus_pin));
2021 }
2022 } else {
2023
2024 udc->vbus_pin = -EINVAL;
2025 }
2026 }
2027
2028 ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
2029 if (ret)
2030 goto err_add_udc;
2031
2032 usba_init_debugfs(udc);
2033 for (i = 1; i < pdata->num_ep; i++)
2034 usba_ep_init_debugfs(udc, &usba_ep[i]);
2035
2036 return 0;
2037
2038err_add_udc:
2039 if (gpio_is_valid(pdata->vbus_pin)) {
2040 free_irq(gpio_to_irq(udc->vbus_pin), udc);
2041 gpio_free(udc->vbus_pin);
2042 }
2043
2044 device_unregister(&udc->gadget.dev);
2045
2046err_device_add:
2047 free_irq(irq, udc);
2048err_request_irq:
2049 kfree(usba_ep);
2050err_alloc_ep:
2051 iounmap(udc->fifo);
2052err_map_fifo:
2053 iounmap(udc->regs);
2054err_map_regs:
2055 clk_put(hclk);
2056err_get_hclk:
2057 clk_put(pclk);
2058
2059 platform_set_drvdata(pdev, NULL);
2060
2061 return ret;
2062}
2063
2064static int __exit usba_udc_remove(struct platform_device *pdev)
2065{
2066 struct usba_udc *udc;
2067 int i;
2068 struct usba_platform_data *pdata = pdev->dev.platform_data;
2069
2070 udc = platform_get_drvdata(pdev);
2071
2072 usb_del_gadget_udc(&udc->gadget);
2073
2074 for (i = 1; i < pdata->num_ep; i++)
2075 usba_ep_cleanup_debugfs(&usba_ep[i]);
2076 usba_cleanup_debugfs(udc);
2077
2078 if (gpio_is_valid(udc->vbus_pin)) {
2079 free_irq(gpio_to_irq(udc->vbus_pin), udc);
2080 gpio_free(udc->vbus_pin);
2081 }
2082
2083 free_irq(udc->irq, udc);
2084 kfree(usba_ep);
2085 iounmap(udc->fifo);
2086 iounmap(udc->regs);
2087 clk_put(udc->hclk);
2088 clk_put(udc->pclk);
2089
2090 device_unregister(&udc->gadget.dev);
2091
2092 return 0;
2093}
2094
2095static struct platform_driver udc_driver = {
2096 .remove = __exit_p(usba_udc_remove),
2097 .driver = {
2098 .name = "atmel_usba_udc",
2099 .owner = THIS_MODULE,
2100 },
2101};
2102
2103static int __init udc_init(void)
2104{
2105 return platform_driver_probe(&udc_driver, usba_udc_probe);
2106}
2107module_init(udc_init);
2108
2109static void __exit udc_exit(void)
2110{
2111 platform_driver_unregister(&udc_driver);
2112}
2113module_exit(udc_exit);
2114
2115MODULE_DESCRIPTION("Atmel USBA UDC driver");
2116MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
2117MODULE_LICENSE("GPL");
2118MODULE_ALIAS("platform:atmel_usba_udc");
2119