1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/delay.h>
54#include <linux/device.h>
55#include <linux/dmapool.h>
56#include <linux/dma-mapping.h>
57#include <linux/init.h>
58#include <linux/interrupt.h>
59#include <linux/io.h>
60#include <linux/irq.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/pm_runtime.h>
64#include <linux/usb/ch9.h>
65#include <linux/usb/gadget.h>
66#include <linux/usb/otg.h>
67
68#include "ci13xxx_udc.h"
69
70
71
72
73
74
75#define DMA_ADDR_INVALID (~(dma_addr_t)0)
76
77
78static DEFINE_SPINLOCK(udc_lock);
79
80
81static const struct usb_endpoint_descriptor
82ctrl_endpt_out_desc = {
83 .bLength = USB_DT_ENDPOINT_SIZE,
84 .bDescriptorType = USB_DT_ENDPOINT,
85
86 .bEndpointAddress = USB_DIR_OUT,
87 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
88 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
89};
90
91static const struct usb_endpoint_descriptor
92ctrl_endpt_in_desc = {
93 .bLength = USB_DT_ENDPOINT_SIZE,
94 .bDescriptorType = USB_DT_ENDPOINT,
95
96 .bEndpointAddress = USB_DIR_IN,
97 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
98 .wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
99};
100
101
102static struct ci13xxx *_udc;
103
104
105#define ISR_MASK 0x1F
106static struct {
107 u32 test;
108 u32 ui;
109 u32 uei;
110 u32 pci;
111 u32 uri;
112 u32 sli;
113 u32 none;
114 struct {
115 u32 cnt;
116 u32 buf[ISR_MASK+1];
117 u32 idx;
118 } hndl;
119} isr_statistics;
120
121
122
123
124
125
126
127static int ffs_nr(u32 x)
128{
129 int n = ffs(x);
130
131 return n ? n-1 : 32;
132}
133
134
135
136
137
138static struct {
139 unsigned lpm;
140 void __iomem *abs;
141 void __iomem *cap;
142 size_t size;
143} hw_bank;
144
145
146#define ABS_AHBBURST (0x0090UL)
147#define ABS_AHBMODE (0x0098UL)
148
149#define ABS_CAPLENGTH (0x100UL)
150#define ABS_HCCPARAMS (0x108UL)
151#define ABS_DCCPARAMS (0x124UL)
152#define ABS_TESTMODE (hw_bank.lpm ? 0x0FCUL : 0x138UL)
153
154#define CAP_USBCMD (0x000UL)
155#define CAP_USBSTS (0x004UL)
156#define CAP_USBINTR (0x008UL)
157#define CAP_DEVICEADDR (0x014UL)
158#define CAP_ENDPTLISTADDR (0x018UL)
159#define CAP_PORTSC (0x044UL)
160#define CAP_DEVLC (0x084UL)
161#define CAP_USBMODE (hw_bank.lpm ? 0x0C8UL : 0x068UL)
162#define CAP_ENDPTSETUPSTAT (hw_bank.lpm ? 0x0D8UL : 0x06CUL)
163#define CAP_ENDPTPRIME (hw_bank.lpm ? 0x0DCUL : 0x070UL)
164#define CAP_ENDPTFLUSH (hw_bank.lpm ? 0x0E0UL : 0x074UL)
165#define CAP_ENDPTSTAT (hw_bank.lpm ? 0x0E4UL : 0x078UL)
166#define CAP_ENDPTCOMPLETE (hw_bank.lpm ? 0x0E8UL : 0x07CUL)
167#define CAP_ENDPTCTRL (hw_bank.lpm ? 0x0ECUL : 0x080UL)
168#define CAP_LAST (hw_bank.lpm ? 0x12CUL : 0x0C0UL)
169
170
171static unsigned hw_ep_max;
172
173
174
175
176
177
178
179
180static inline int hw_ep_bit(int num, int dir)
181{
182 return num + (dir ? 16 : 0);
183}
184
185static int ep_to_bit(int n)
186{
187 int fill = 16 - hw_ep_max / 2;
188
189 if (n >= hw_ep_max / 2)
190 n += fill;
191
192 return n;
193}
194
195
196
197
198
199
200
201
202static u32 hw_aread(u32 addr, u32 mask)
203{
204 return ioread32(addr + hw_bank.abs) & mask;
205}
206
207
208
209
210
211
212
213static void hw_awrite(u32 addr, u32 mask, u32 data)
214{
215 iowrite32(hw_aread(addr, ~mask) | (data & mask),
216 addr + hw_bank.abs);
217}
218
219
220
221
222
223
224
225
226static u32 hw_cread(u32 addr, u32 mask)
227{
228 return ioread32(addr + hw_bank.cap) & mask;
229}
230
231
232
233
234
235
236
237static void hw_cwrite(u32 addr, u32 mask, u32 data)
238{
239 iowrite32(hw_cread(addr, ~mask) | (data & mask),
240 addr + hw_bank.cap);
241}
242
243
244
245
246
247
248
249
250static u32 hw_ctest_and_clear(u32 addr, u32 mask)
251{
252 u32 reg = hw_cread(addr, mask);
253
254 iowrite32(reg, addr + hw_bank.cap);
255 return reg;
256}
257
258
259
260
261
262
263
264
265
266static u32 hw_ctest_and_write(u32 addr, u32 mask, u32 data)
267{
268 u32 reg = hw_cread(addr, ~0);
269
270 iowrite32((reg & ~mask) | (data & mask), addr + hw_bank.cap);
271 return (reg & mask) >> ffs_nr(mask);
272}
273
274static int hw_device_init(void __iomem *base)
275{
276 u32 reg;
277
278
279 hw_bank.abs = base;
280
281 hw_bank.cap = hw_bank.abs;
282 hw_bank.cap += ABS_CAPLENGTH;
283 hw_bank.cap += ioread8(hw_bank.cap);
284
285 reg = hw_aread(ABS_HCCPARAMS, HCCPARAMS_LEN) >> ffs_nr(HCCPARAMS_LEN);
286 hw_bank.lpm = reg;
287 hw_bank.size = hw_bank.cap - hw_bank.abs;
288 hw_bank.size += CAP_LAST;
289 hw_bank.size /= sizeof(u32);
290
291 reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
292 hw_ep_max = reg * 2;
293
294 if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
295 return -ENODEV;
296
297
298
299
300
301
302
303 return 0;
304}
305
306
307
308
309
310
311static int hw_device_reset(struct ci13xxx *udc)
312{
313
314 hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
315 hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
316
317 hw_cwrite(CAP_USBCMD, USBCMD_RST, USBCMD_RST);
318 while (hw_cread(CAP_USBCMD, USBCMD_RST))
319 udelay(10);
320
321
322 if (udc->udc_driver->notify_event)
323 udc->udc_driver->notify_event(udc,
324 CI13XXX_CONTROLLER_RESET_EVENT);
325
326 if (udc->udc_driver->flags & CI13XXX_DISABLE_STREAMING)
327 hw_cwrite(CAP_USBMODE, USBMODE_SDIS, USBMODE_SDIS);
328
329
330 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
331 hw_cwrite(CAP_USBMODE, USBMODE_CM, USBMODE_CM_DEVICE);
332 hw_cwrite(CAP_USBMODE, USBMODE_SLOM, USBMODE_SLOM);
333
334 if (hw_cread(CAP_USBMODE, USBMODE_CM) != USBMODE_CM_DEVICE) {
335 pr_err("cannot enter in device mode");
336 pr_err("lpm = %i", hw_bank.lpm);
337 return -ENODEV;
338 }
339
340 return 0;
341}
342
343
344
345
346
347
348
349
350static int hw_device_state(u32 dma)
351{
352 if (dma) {
353 hw_cwrite(CAP_ENDPTLISTADDR, ~0, dma);
354
355 hw_cwrite(CAP_USBINTR, ~0,
356 USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
357 hw_cwrite(CAP_USBCMD, USBCMD_RS, USBCMD_RS);
358 } else {
359 hw_cwrite(CAP_USBCMD, USBCMD_RS, 0);
360 hw_cwrite(CAP_USBINTR, ~0, 0);
361 }
362 return 0;
363}
364
365
366
367
368
369
370
371
372static int hw_ep_flush(int num, int dir)
373{
374 int n = hw_ep_bit(num, dir);
375
376 do {
377
378 hw_cwrite(CAP_ENDPTFLUSH, BIT(n), BIT(n));
379 while (hw_cread(CAP_ENDPTFLUSH, BIT(n)))
380 cpu_relax();
381 } while (hw_cread(CAP_ENDPTSTAT, BIT(n)));
382
383 return 0;
384}
385
386
387
388
389
390
391
392
393static int hw_ep_disable(int num, int dir)
394{
395 hw_ep_flush(num, dir);
396 hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32),
397 dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
398 return 0;
399}
400
401
402
403
404
405
406
407
408
409static int hw_ep_enable(int num, int dir, int type)
410{
411 u32 mask, data;
412
413 if (dir) {
414 mask = ENDPTCTRL_TXT;
415 data = type << ffs_nr(mask);
416
417 mask |= ENDPTCTRL_TXS;
418 mask |= ENDPTCTRL_TXR;
419 data |= ENDPTCTRL_TXR;
420 mask |= ENDPTCTRL_TXE;
421 data |= ENDPTCTRL_TXE;
422 } else {
423 mask = ENDPTCTRL_RXT;
424 data = type << ffs_nr(mask);
425
426 mask |= ENDPTCTRL_RXS;
427 mask |= ENDPTCTRL_RXR;
428 data |= ENDPTCTRL_RXR;
429 mask |= ENDPTCTRL_RXE;
430 data |= ENDPTCTRL_RXE;
431 }
432 hw_cwrite(CAP_ENDPTCTRL + num * sizeof(u32), mask, data);
433 return 0;
434}
435
436
437
438
439
440
441
442
443static int hw_ep_get_halt(int num, int dir)
444{
445 u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
446
447 return hw_cread(CAP_ENDPTCTRL + num * sizeof(u32), mask) ? 1 : 0;
448}
449
450
451
452
453
454
455
456
457static int hw_test_and_clear_setup_status(int n)
458{
459 n = ep_to_bit(n);
460 return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
461}
462
463
464
465
466
467
468
469
470
471static int hw_ep_prime(int num, int dir, int is_ctrl)
472{
473 int n = hw_ep_bit(num, dir);
474
475 if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
476 return -EAGAIN;
477
478 hw_cwrite(CAP_ENDPTPRIME, BIT(n), BIT(n));
479
480 while (hw_cread(CAP_ENDPTPRIME, BIT(n)))
481 cpu_relax();
482 if (is_ctrl && dir == RX && hw_cread(CAP_ENDPTSETUPSTAT, BIT(num)))
483 return -EAGAIN;
484
485
486 return 0;
487}
488
489
490
491
492
493
494
495
496
497
498static int hw_ep_set_halt(int num, int dir, int value)
499{
500 if (value != 0 && value != 1)
501 return -EINVAL;
502
503 do {
504 u32 addr = CAP_ENDPTCTRL + num * sizeof(u32);
505 u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
506 u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
507
508
509 hw_cwrite(addr, mask_xs|mask_xr, value ? mask_xs : mask_xr);
510
511 } while (value != hw_ep_get_halt(num, dir));
512
513 return 0;
514}
515
516
517
518
519
520
521
522
523static int hw_intr_clear(int n)
524{
525 if (n >= REG_BITS)
526 return -EINVAL;
527
528 hw_cwrite(CAP_USBINTR, BIT(n), 0);
529 hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
530 return 0;
531}
532
533
534
535
536
537
538
539
540static int hw_intr_force(int n)
541{
542 if (n >= REG_BITS)
543 return -EINVAL;
544
545 hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, TESTMODE_FORCE);
546 hw_cwrite(CAP_USBINTR, BIT(n), BIT(n));
547 hw_cwrite(CAP_USBSTS, BIT(n), BIT(n));
548 hw_awrite(ABS_TESTMODE, TESTMODE_FORCE, 0);
549 return 0;
550}
551
552
553
554
555
556
557static int hw_port_is_high_speed(void)
558{
559 return hw_bank.lpm ? hw_cread(CAP_DEVLC, DEVLC_PSPD) :
560 hw_cread(CAP_PORTSC, PORTSC_HSP);
561}
562
563
564
565
566
567
568static u8 hw_port_test_get(void)
569{
570 return hw_cread(CAP_PORTSC, PORTSC_PTC) >> ffs_nr(PORTSC_PTC);
571}
572
573
574
575
576
577
578
579static int hw_port_test_set(u8 mode)
580{
581 const u8 TEST_MODE_MAX = 7;
582
583 if (mode > TEST_MODE_MAX)
584 return -EINVAL;
585
586 hw_cwrite(CAP_PORTSC, PORTSC_PTC, mode << ffs_nr(PORTSC_PTC));
587 return 0;
588}
589
590
591
592
593
594
595static u32 hw_read_intr_enable(void)
596{
597 return hw_cread(CAP_USBINTR, ~0);
598}
599
600
601
602
603
604
605static u32 hw_read_intr_status(void)
606{
607 return hw_cread(CAP_USBSTS, ~0);
608}
609
610
611
612
613
614
615
616
617static size_t hw_register_read(u32 *buf, size_t size)
618{
619 unsigned i;
620
621 if (size > hw_bank.size)
622 size = hw_bank.size;
623
624 for (i = 0; i < size; i++)
625 buf[i] = hw_aread(i * sizeof(u32), ~0);
626
627 return size;
628}
629
630
631
632
633
634
635
636
637static int hw_register_write(u16 addr, u32 data)
638{
639
640 addr /= sizeof(u32);
641
642 if (addr >= hw_bank.size)
643 return -EINVAL;
644
645
646 addr *= sizeof(u32);
647
648 hw_awrite(addr, ~0, data);
649 return 0;
650}
651
652
653
654
655
656
657
658
659static int hw_test_and_clear_complete(int n)
660{
661 n = ep_to_bit(n);
662 return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
663}
664
665
666
667
668
669
670
671static u32 hw_test_and_clear_intr_active(void)
672{
673 u32 reg = hw_read_intr_status() & hw_read_intr_enable();
674
675 hw_cwrite(CAP_USBSTS, ~0, reg);
676 return reg;
677}
678
679
680
681
682
683
684
685static int hw_test_and_clear_setup_guard(void)
686{
687 return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, 0);
688}
689
690
691
692
693
694
695
696static int hw_test_and_set_setup_guard(void)
697{
698 return hw_ctest_and_write(CAP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
699}
700
701
702
703
704
705
706
707static int hw_usb_set_address(u8 value)
708{
709
710 hw_cwrite(CAP_DEVICEADDR, DEVICEADDR_USBADR | DEVICEADDR_USBADRA,
711 value << ffs_nr(DEVICEADDR_USBADR) | DEVICEADDR_USBADRA);
712 return 0;
713}
714
715
716
717
718
719
720
721static int hw_usb_reset(void)
722{
723 hw_usb_set_address(0);
724
725
726 hw_cwrite(CAP_ENDPTFLUSH, ~0, ~0);
727
728
729 hw_cwrite(CAP_ENDPTSETUPSTAT, 0, 0);
730
731
732 hw_cwrite(CAP_ENDPTCOMPLETE, 0, 0);
733
734
735 while (hw_cread(CAP_ENDPTPRIME, ~0))
736 udelay(10);
737
738
739
740
741
742
743 return 0;
744}
745
746
747
748
749
750
751
752
753
754static ssize_t show_device(struct device *dev, struct device_attribute *attr,
755 char *buf)
756{
757 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
758 struct usb_gadget *gadget = &udc->gadget;
759 int n = 0;
760
761 dbg_trace("[%s] %p\n", __func__, buf);
762 if (attr == NULL || buf == NULL) {
763 dev_err(dev, "[%s] EINVAL\n", __func__);
764 return 0;
765 }
766
767 n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
768 gadget->speed);
769 n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
770 gadget->max_speed);
771
772 n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
773 gadget_is_dualspeed(gadget));
774 n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
775 gadget->is_otg);
776 n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
777 gadget->is_a_peripheral);
778 n += scnprintf(buf + n, PAGE_SIZE - n, "b_hnp_enable = %d\n",
779 gadget->b_hnp_enable);
780 n += scnprintf(buf + n, PAGE_SIZE - n, "a_hnp_support = %d\n",
781 gadget->a_hnp_support);
782 n += scnprintf(buf + n, PAGE_SIZE - n, "a_alt_hnp_support = %d\n",
783 gadget->a_alt_hnp_support);
784 n += scnprintf(buf + n, PAGE_SIZE - n, "name = %s\n",
785 (gadget->name ? gadget->name : ""));
786
787 return n;
788}
789static DEVICE_ATTR(device, S_IRUSR, show_device, NULL);
790
791
792
793
794
795
796static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
797 char *buf)
798{
799 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
800 struct usb_gadget_driver *driver = udc->driver;
801 int n = 0;
802
803 dbg_trace("[%s] %p\n", __func__, buf);
804 if (attr == NULL || buf == NULL) {
805 dev_err(dev, "[%s] EINVAL\n", __func__);
806 return 0;
807 }
808
809 if (driver == NULL)
810 return scnprintf(buf, PAGE_SIZE,
811 "There is no gadget attached!\n");
812
813 n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
814 (driver->function ? driver->function : ""));
815 n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
816 driver->max_speed);
817
818 return n;
819}
820static DEVICE_ATTR(driver, S_IRUSR, show_driver, NULL);
821
822
823#define DBG_DATA_MSG 64UL
824
825
826#define DBG_DATA_MAX 128UL
827
828
829static struct {
830 char (buf[DBG_DATA_MAX])[DBG_DATA_MSG];
831 unsigned idx;
832 unsigned tty;
833 rwlock_t lck;
834} dbg_data = {
835 .idx = 0,
836 .tty = 0,
837 .lck = __RW_LOCK_UNLOCKED(lck)
838};
839
840
841
842
843
844static void dbg_dec(unsigned *idx)
845{
846 *idx = (*idx - 1) & (DBG_DATA_MAX-1);
847}
848
849
850
851
852
853static void dbg_inc(unsigned *idx)
854{
855 *idx = (*idx + 1) & (DBG_DATA_MAX-1);
856}
857
858
859
860
861
862
863
864
865static void dbg_print(u8 addr, const char *name, int status, const char *extra)
866{
867 struct timeval tval;
868 unsigned int stamp;
869 unsigned long flags;
870
871 write_lock_irqsave(&dbg_data.lck, flags);
872
873 do_gettimeofday(&tval);
874 stamp = tval.tv_sec & 0xFFFF;
875 stamp = stamp * 1000000 + tval.tv_usec;
876
877 scnprintf(dbg_data.buf[dbg_data.idx], DBG_DATA_MSG,
878 "%04X\t? %02X %-7.7s %4i ?\t%s\n",
879 stamp, addr, name, status, extra);
880
881 dbg_inc(&dbg_data.idx);
882
883 write_unlock_irqrestore(&dbg_data.lck, flags);
884
885 if (dbg_data.tty != 0)
886 pr_notice("%04X\t? %02X %-7.7s %4i ?\t%s\n",
887 stamp, addr, name, status, extra);
888}
889
890
891
892
893
894
895
896static void dbg_done(u8 addr, const u32 token, int status)
897{
898 char msg[DBG_DATA_MSG];
899
900 scnprintf(msg, sizeof(msg), "%d %02X",
901 (int)(token & TD_TOTAL_BYTES) >> ffs_nr(TD_TOTAL_BYTES),
902 (int)(token & TD_STATUS) >> ffs_nr(TD_STATUS));
903 dbg_print(addr, "DONE", status, msg);
904}
905
906
907
908
909
910
911
912static void dbg_event(u8 addr, const char *name, int status)
913{
914 if (name != NULL)
915 dbg_print(addr, name, status, "");
916}
917
918
919
920
921
922
923
924static void dbg_queue(u8 addr, const struct usb_request *req, int status)
925{
926 char msg[DBG_DATA_MSG];
927
928 if (req != NULL) {
929 scnprintf(msg, sizeof(msg),
930 "%d %d", !req->no_interrupt, req->length);
931 dbg_print(addr, "QUEUE", status, msg);
932 }
933}
934
935
936
937
938
939
940static void dbg_setup(u8 addr, const struct usb_ctrlrequest *req)
941{
942 char msg[DBG_DATA_MSG];
943
944 if (req != NULL) {
945 scnprintf(msg, sizeof(msg),
946 "%02X %02X %04X %04X %d", req->bRequestType,
947 req->bRequest, le16_to_cpu(req->wValue),
948 le16_to_cpu(req->wIndex), le16_to_cpu(req->wLength));
949 dbg_print(addr, "SETUP", 0, msg);
950 }
951}
952
953
954
955
956
957
958static ssize_t show_events(struct device *dev, struct device_attribute *attr,
959 char *buf)
960{
961 unsigned long flags;
962 unsigned i, j, n = 0;
963
964 dbg_trace("[%s] %p\n", __func__, buf);
965 if (attr == NULL || buf == NULL) {
966 dev_err(dev, "[%s] EINVAL\n", __func__);
967 return 0;
968 }
969
970 read_lock_irqsave(&dbg_data.lck, flags);
971
972 i = dbg_data.idx;
973 for (dbg_dec(&i); i != dbg_data.idx; dbg_dec(&i)) {
974 n += strlen(dbg_data.buf[i]);
975 if (n >= PAGE_SIZE) {
976 n -= strlen(dbg_data.buf[i]);
977 break;
978 }
979 }
980 for (j = 0, dbg_inc(&i); j < n; dbg_inc(&i))
981 j += scnprintf(buf + j, PAGE_SIZE - j,
982 "%s", dbg_data.buf[i]);
983
984 read_unlock_irqrestore(&dbg_data.lck, flags);
985
986 return n;
987}
988
989
990
991
992
993
994static ssize_t store_events(struct device *dev, struct device_attribute *attr,
995 const char *buf, size_t count)
996{
997 unsigned tty;
998
999 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1000 if (attr == NULL || buf == NULL) {
1001 dev_err(dev, "[%s] EINVAL\n", __func__);
1002 goto done;
1003 }
1004
1005 if (sscanf(buf, "%u", &tty) != 1 || tty > 1) {
1006 dev_err(dev, "<1|0>: enable|disable console log\n");
1007 goto done;
1008 }
1009
1010 dbg_data.tty = tty;
1011 dev_info(dev, "tty = %u", dbg_data.tty);
1012
1013 done:
1014 return count;
1015}
1016static DEVICE_ATTR(events, S_IRUSR | S_IWUSR, show_events, store_events);
1017
1018
1019
1020
1021
1022
1023static ssize_t show_inters(struct device *dev, struct device_attribute *attr,
1024 char *buf)
1025{
1026 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1027 unsigned long flags;
1028 u32 intr;
1029 unsigned i, j, n = 0;
1030
1031 dbg_trace("[%s] %p\n", __func__, buf);
1032 if (attr == NULL || buf == NULL) {
1033 dev_err(dev, "[%s] EINVAL\n", __func__);
1034 return 0;
1035 }
1036
1037 spin_lock_irqsave(udc->lock, flags);
1038
1039 n += scnprintf(buf + n, PAGE_SIZE - n,
1040 "status = %08x\n", hw_read_intr_status());
1041 n += scnprintf(buf + n, PAGE_SIZE - n,
1042 "enable = %08x\n", hw_read_intr_enable());
1043
1044 n += scnprintf(buf + n, PAGE_SIZE - n, "*test = %d\n",
1045 isr_statistics.test);
1046 n += scnprintf(buf + n, PAGE_SIZE - n, "? ui = %d\n",
1047 isr_statistics.ui);
1048 n += scnprintf(buf + n, PAGE_SIZE - n, "? uei = %d\n",
1049 isr_statistics.uei);
1050 n += scnprintf(buf + n, PAGE_SIZE - n, "? pci = %d\n",
1051 isr_statistics.pci);
1052 n += scnprintf(buf + n, PAGE_SIZE - n, "? uri = %d\n",
1053 isr_statistics.uri);
1054 n += scnprintf(buf + n, PAGE_SIZE - n, "? sli = %d\n",
1055 isr_statistics.sli);
1056 n += scnprintf(buf + n, PAGE_SIZE - n, "*none = %d\n",
1057 isr_statistics.none);
1058 n += scnprintf(buf + n, PAGE_SIZE - n, "*hndl = %d\n",
1059 isr_statistics.hndl.cnt);
1060
1061 for (i = isr_statistics.hndl.idx, j = 0; j <= ISR_MASK; j++, i++) {
1062 i &= ISR_MASK;
1063 intr = isr_statistics.hndl.buf[i];
1064
1065 if (USBi_UI & intr)
1066 n += scnprintf(buf + n, PAGE_SIZE - n, "ui ");
1067 intr &= ~USBi_UI;
1068 if (USBi_UEI & intr)
1069 n += scnprintf(buf + n, PAGE_SIZE - n, "uei ");
1070 intr &= ~USBi_UEI;
1071 if (USBi_PCI & intr)
1072 n += scnprintf(buf + n, PAGE_SIZE - n, "pci ");
1073 intr &= ~USBi_PCI;
1074 if (USBi_URI & intr)
1075 n += scnprintf(buf + n, PAGE_SIZE - n, "uri ");
1076 intr &= ~USBi_URI;
1077 if (USBi_SLI & intr)
1078 n += scnprintf(buf + n, PAGE_SIZE - n, "sli ");
1079 intr &= ~USBi_SLI;
1080 if (intr)
1081 n += scnprintf(buf + n, PAGE_SIZE - n, "??? ");
1082 if (isr_statistics.hndl.buf[i])
1083 n += scnprintf(buf + n, PAGE_SIZE - n, "\n");
1084 }
1085
1086 spin_unlock_irqrestore(udc->lock, flags);
1087
1088 return n;
1089}
1090
1091
1092
1093
1094
1095
1096
1097static ssize_t store_inters(struct device *dev, struct device_attribute *attr,
1098 const char *buf, size_t count)
1099{
1100 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1101 unsigned long flags;
1102 unsigned en, bit;
1103
1104 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1105 if (attr == NULL || buf == NULL) {
1106 dev_err(dev, "[%s] EINVAL\n", __func__);
1107 goto done;
1108 }
1109
1110 if (sscanf(buf, "%u %u", &en, &bit) != 2 || en > 1) {
1111 dev_err(dev, "<1|0> <bit>: enable|disable interrupt");
1112 goto done;
1113 }
1114
1115 spin_lock_irqsave(udc->lock, flags);
1116 if (en) {
1117 if (hw_intr_force(bit))
1118 dev_err(dev, "invalid bit number\n");
1119 else
1120 isr_statistics.test++;
1121 } else {
1122 if (hw_intr_clear(bit))
1123 dev_err(dev, "invalid bit number\n");
1124 }
1125 spin_unlock_irqrestore(udc->lock, flags);
1126
1127 done:
1128 return count;
1129}
1130static DEVICE_ATTR(inters, S_IRUSR | S_IWUSR, show_inters, store_inters);
1131
1132
1133
1134
1135
1136
1137static ssize_t show_port_test(struct device *dev,
1138 struct device_attribute *attr, char *buf)
1139{
1140 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1141 unsigned long flags;
1142 unsigned mode;
1143
1144 dbg_trace("[%s] %p\n", __func__, buf);
1145 if (attr == NULL || buf == NULL) {
1146 dev_err(dev, "[%s] EINVAL\n", __func__);
1147 return 0;
1148 }
1149
1150 spin_lock_irqsave(udc->lock, flags);
1151 mode = hw_port_test_get();
1152 spin_unlock_irqrestore(udc->lock, flags);
1153
1154 return scnprintf(buf, PAGE_SIZE, "mode = %u\n", mode);
1155}
1156
1157
1158
1159
1160
1161
1162static ssize_t store_port_test(struct device *dev,
1163 struct device_attribute *attr,
1164 const char *buf, size_t count)
1165{
1166 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1167 unsigned long flags;
1168 unsigned mode;
1169
1170 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1171 if (attr == NULL || buf == NULL) {
1172 dev_err(dev, "[%s] EINVAL\n", __func__);
1173 goto done;
1174 }
1175
1176 if (sscanf(buf, "%u", &mode) != 1) {
1177 dev_err(dev, "<mode>: set port test mode");
1178 goto done;
1179 }
1180
1181 spin_lock_irqsave(udc->lock, flags);
1182 if (hw_port_test_set(mode))
1183 dev_err(dev, "invalid mode\n");
1184 spin_unlock_irqrestore(udc->lock, flags);
1185
1186 done:
1187 return count;
1188}
1189static DEVICE_ATTR(port_test, S_IRUSR | S_IWUSR,
1190 show_port_test, store_port_test);
1191
1192
1193
1194
1195
1196
1197static ssize_t show_qheads(struct device *dev, struct device_attribute *attr,
1198 char *buf)
1199{
1200 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1201 unsigned long flags;
1202 unsigned i, j, n = 0;
1203
1204 dbg_trace("[%s] %p\n", __func__, buf);
1205 if (attr == NULL || buf == NULL) {
1206 dev_err(dev, "[%s] EINVAL\n", __func__);
1207 return 0;
1208 }
1209
1210 spin_lock_irqsave(udc->lock, flags);
1211 for (i = 0; i < hw_ep_max/2; i++) {
1212 struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
1213 struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
1214 n += scnprintf(buf + n, PAGE_SIZE - n,
1215 "EP=%02i: RX=%08X TX=%08X\n",
1216 i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
1217 for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
1218 n += scnprintf(buf + n, PAGE_SIZE - n,
1219 " %04X: %08X %08X\n", j,
1220 *((u32 *)mEpRx->qh.ptr + j),
1221 *((u32 *)mEpTx->qh.ptr + j));
1222 }
1223 }
1224 spin_unlock_irqrestore(udc->lock, flags);
1225
1226 return n;
1227}
1228static DEVICE_ATTR(qheads, S_IRUSR, show_qheads, NULL);
1229
1230
1231
1232
1233
1234
1235#define DUMP_ENTRIES 512
1236static ssize_t show_registers(struct device *dev,
1237 struct device_attribute *attr, char *buf)
1238{
1239 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1240 unsigned long flags;
1241 u32 *dump;
1242 unsigned i, k, n = 0;
1243
1244 dbg_trace("[%s] %p\n", __func__, buf);
1245 if (attr == NULL || buf == NULL) {
1246 dev_err(dev, "[%s] EINVAL\n", __func__);
1247 return 0;
1248 }
1249
1250 dump = kmalloc(sizeof(u32) * DUMP_ENTRIES, GFP_KERNEL);
1251 if (!dump) {
1252 dev_err(dev, "%s: out of memory\n", __func__);
1253 return 0;
1254 }
1255
1256 spin_lock_irqsave(udc->lock, flags);
1257 k = hw_register_read(dump, DUMP_ENTRIES);
1258 spin_unlock_irqrestore(udc->lock, flags);
1259
1260 for (i = 0; i < k; i++) {
1261 n += scnprintf(buf + n, PAGE_SIZE - n,
1262 "reg[0x%04X] = 0x%08X\n",
1263 i * (unsigned)sizeof(u32), dump[i]);
1264 }
1265 kfree(dump);
1266
1267 return n;
1268}
1269
1270
1271
1272
1273
1274
1275static ssize_t store_registers(struct device *dev,
1276 struct device_attribute *attr,
1277 const char *buf, size_t count)
1278{
1279 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1280 unsigned long addr, data, flags;
1281
1282 dbg_trace("[%s] %p, %d\n", __func__, buf, count);
1283 if (attr == NULL || buf == NULL) {
1284 dev_err(dev, "[%s] EINVAL\n", __func__);
1285 goto done;
1286 }
1287
1288 if (sscanf(buf, "%li %li", &addr, &data) != 2) {
1289 dev_err(dev, "<addr> <data>: write data to register address");
1290 goto done;
1291 }
1292
1293 spin_lock_irqsave(udc->lock, flags);
1294 if (hw_register_write(addr, data))
1295 dev_err(dev, "invalid address range\n");
1296 spin_unlock_irqrestore(udc->lock, flags);
1297
1298 done:
1299 return count;
1300}
1301static DEVICE_ATTR(registers, S_IRUSR | S_IWUSR,
1302 show_registers, store_registers);
1303
1304
1305
1306
1307
1308
1309static ssize_t show_requests(struct device *dev, struct device_attribute *attr,
1310 char *buf)
1311{
1312 struct ci13xxx *udc = container_of(dev, struct ci13xxx, gadget.dev);
1313 unsigned long flags;
1314 struct list_head *ptr = NULL;
1315 struct ci13xxx_req *req = NULL;
1316 unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
1317
1318 dbg_trace("[%s] %p\n", __func__, buf);
1319 if (attr == NULL || buf == NULL) {
1320 dev_err(dev, "[%s] EINVAL\n", __func__);
1321 return 0;
1322 }
1323
1324 spin_lock_irqsave(udc->lock, flags);
1325 for (i = 0; i < hw_ep_max; i++)
1326 list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
1327 {
1328 req = list_entry(ptr, struct ci13xxx_req, queue);
1329
1330 n += scnprintf(buf + n, PAGE_SIZE - n,
1331 "EP=%02i: TD=%08X %s\n",
1332 i % hw_ep_max/2, (u32)req->dma,
1333 ((i < hw_ep_max/2) ? "RX" : "TX"));
1334
1335 for (j = 0; j < qSize; j++)
1336 n += scnprintf(buf + n, PAGE_SIZE - n,
1337 " %04X: %08X\n", j,
1338 *((u32 *)req->ptr + j));
1339 }
1340 spin_unlock_irqrestore(udc->lock, flags);
1341
1342 return n;
1343}
1344static DEVICE_ATTR(requests, S_IRUSR, show_requests, NULL);
1345
1346
1347
1348
1349
1350
1351
1352__maybe_unused static int dbg_create_files(struct device *dev)
1353{
1354 int retval = 0;
1355
1356 if (dev == NULL)
1357 return -EINVAL;
1358 retval = device_create_file(dev, &dev_attr_device);
1359 if (retval)
1360 goto done;
1361 retval = device_create_file(dev, &dev_attr_driver);
1362 if (retval)
1363 goto rm_device;
1364 retval = device_create_file(dev, &dev_attr_events);
1365 if (retval)
1366 goto rm_driver;
1367 retval = device_create_file(dev, &dev_attr_inters);
1368 if (retval)
1369 goto rm_events;
1370 retval = device_create_file(dev, &dev_attr_port_test);
1371 if (retval)
1372 goto rm_inters;
1373 retval = device_create_file(dev, &dev_attr_qheads);
1374 if (retval)
1375 goto rm_port_test;
1376 retval = device_create_file(dev, &dev_attr_registers);
1377 if (retval)
1378 goto rm_qheads;
1379 retval = device_create_file(dev, &dev_attr_requests);
1380 if (retval)
1381 goto rm_registers;
1382 return 0;
1383
1384 rm_registers:
1385 device_remove_file(dev, &dev_attr_registers);
1386 rm_qheads:
1387 device_remove_file(dev, &dev_attr_qheads);
1388 rm_port_test:
1389 device_remove_file(dev, &dev_attr_port_test);
1390 rm_inters:
1391 device_remove_file(dev, &dev_attr_inters);
1392 rm_events:
1393 device_remove_file(dev, &dev_attr_events);
1394 rm_driver:
1395 device_remove_file(dev, &dev_attr_driver);
1396 rm_device:
1397 device_remove_file(dev, &dev_attr_device);
1398 done:
1399 return retval;
1400}
1401
1402
1403
1404
1405
1406
1407
1408__maybe_unused static int dbg_remove_files(struct device *dev)
1409{
1410 if (dev == NULL)
1411 return -EINVAL;
1412 device_remove_file(dev, &dev_attr_requests);
1413 device_remove_file(dev, &dev_attr_registers);
1414 device_remove_file(dev, &dev_attr_qheads);
1415 device_remove_file(dev, &dev_attr_port_test);
1416 device_remove_file(dev, &dev_attr_inters);
1417 device_remove_file(dev, &dev_attr_events);
1418 device_remove_file(dev, &dev_attr_driver);
1419 device_remove_file(dev, &dev_attr_device);
1420 return 0;
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430static inline u8 _usb_addr(struct ci13xxx_ep *ep)
1431{
1432 return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1443{
1444 unsigned i;
1445 int ret = 0;
1446 unsigned length = mReq->req.length;
1447
1448 trace("%p, %p", mEp, mReq);
1449
1450
1451 if (mReq->req.status == -EALREADY)
1452 return -EALREADY;
1453
1454 mReq->req.status = -EALREADY;
1455 if (length && mReq->req.dma == DMA_ADDR_INVALID) {
1456 mReq->req.dma = \
1457 dma_map_single(mEp->device, mReq->req.buf,
1458 length, mEp->dir ? DMA_TO_DEVICE :
1459 DMA_FROM_DEVICE);
1460 if (mReq->req.dma == 0)
1461 return -ENOMEM;
1462
1463 mReq->map = 1;
1464 }
1465
1466 if (mReq->req.zero && length && (length % mEp->ep.maxpacket == 0)) {
1467 mReq->zptr = dma_pool_alloc(mEp->td_pool, GFP_ATOMIC,
1468 &mReq->zdma);
1469 if (mReq->zptr == NULL) {
1470 if (mReq->map) {
1471 dma_unmap_single(mEp->device, mReq->req.dma,
1472 length, mEp->dir ? DMA_TO_DEVICE :
1473 DMA_FROM_DEVICE);
1474 mReq->req.dma = DMA_ADDR_INVALID;
1475 mReq->map = 0;
1476 }
1477 return -ENOMEM;
1478 }
1479 memset(mReq->zptr, 0, sizeof(*mReq->zptr));
1480 mReq->zptr->next = TD_TERMINATE;
1481 mReq->zptr->token = TD_STATUS_ACTIVE;
1482 if (!mReq->req.no_interrupt)
1483 mReq->zptr->token |= TD_IOC;
1484 }
1485
1486
1487
1488
1489 memset(mReq->ptr, 0, sizeof(*mReq->ptr));
1490 mReq->ptr->token = length << ffs_nr(TD_TOTAL_BYTES);
1491 mReq->ptr->token &= TD_TOTAL_BYTES;
1492 mReq->ptr->token |= TD_STATUS_ACTIVE;
1493 if (mReq->zptr) {
1494 mReq->ptr->next = mReq->zdma;
1495 } else {
1496 mReq->ptr->next = TD_TERMINATE;
1497 if (!mReq->req.no_interrupt)
1498 mReq->ptr->token |= TD_IOC;
1499 }
1500 mReq->ptr->page[0] = mReq->req.dma;
1501 for (i = 1; i < 5; i++)
1502 mReq->ptr->page[i] =
1503 (mReq->req.dma + i * CI13XXX_PAGE_SIZE) & ~TD_RESERVED_MASK;
1504
1505 if (!list_empty(&mEp->qh.queue)) {
1506 struct ci13xxx_req *mReqPrev;
1507 int n = hw_ep_bit(mEp->num, mEp->dir);
1508 int tmp_stat;
1509
1510 mReqPrev = list_entry(mEp->qh.queue.prev,
1511 struct ci13xxx_req, queue);
1512 if (mReqPrev->zptr)
1513 mReqPrev->zptr->next = mReq->dma & TD_ADDR_MASK;
1514 else
1515 mReqPrev->ptr->next = mReq->dma & TD_ADDR_MASK;
1516 wmb();
1517 if (hw_cread(CAP_ENDPTPRIME, BIT(n)))
1518 goto done;
1519 do {
1520 hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
1521 tmp_stat = hw_cread(CAP_ENDPTSTAT, BIT(n));
1522 } while (!hw_cread(CAP_USBCMD, USBCMD_ATDTW));
1523 hw_cwrite(CAP_USBCMD, USBCMD_ATDTW, 0);
1524 if (tmp_stat)
1525 goto done;
1526 }
1527
1528
1529 mEp->qh.ptr->td.next = mReq->dma;
1530 mEp->qh.ptr->td.token &= ~TD_STATUS;
1531 mEp->qh.ptr->cap |= QH_ZLT;
1532
1533 wmb();
1534
1535 ret = hw_ep_prime(mEp->num, mEp->dir,
1536 mEp->type == USB_ENDPOINT_XFER_CONTROL);
1537done:
1538 return ret;
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
1549{
1550 trace("%p, %p", mEp, mReq);
1551
1552 if (mReq->req.status != -EALREADY)
1553 return -EINVAL;
1554
1555 if ((TD_STATUS_ACTIVE & mReq->ptr->token) != 0)
1556 return -EBUSY;
1557
1558 if (mReq->zptr) {
1559 if ((TD_STATUS_ACTIVE & mReq->zptr->token) != 0)
1560 return -EBUSY;
1561 dma_pool_free(mEp->td_pool, mReq->zptr, mReq->zdma);
1562 mReq->zptr = NULL;
1563 }
1564
1565 mReq->req.status = 0;
1566
1567 if (mReq->map) {
1568 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
1569 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1570 mReq->req.dma = DMA_ADDR_INVALID;
1571 mReq->map = 0;
1572 }
1573
1574 mReq->req.status = mReq->ptr->token & TD_STATUS;
1575 if ((TD_STATUS_HALTED & mReq->req.status) != 0)
1576 mReq->req.status = -1;
1577 else if ((TD_STATUS_DT_ERR & mReq->req.status) != 0)
1578 mReq->req.status = -1;
1579 else if ((TD_STATUS_TR_ERR & mReq->req.status) != 0)
1580 mReq->req.status = -1;
1581
1582 mReq->req.actual = mReq->ptr->token & TD_TOTAL_BYTES;
1583 mReq->req.actual >>= ffs_nr(TD_TOTAL_BYTES);
1584 mReq->req.actual = mReq->req.length - mReq->req.actual;
1585 mReq->req.actual = mReq->req.status ? 0 : mReq->req.actual;
1586
1587 return mReq->req.actual;
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597static int _ep_nuke(struct ci13xxx_ep *mEp)
1598__releases(mEp->lock)
1599__acquires(mEp->lock)
1600{
1601 trace("%p", mEp);
1602
1603 if (mEp == NULL)
1604 return -EINVAL;
1605
1606 hw_ep_flush(mEp->num, mEp->dir);
1607
1608 while (!list_empty(&mEp->qh.queue)) {
1609
1610
1611 struct ci13xxx_req *mReq = \
1612 list_entry(mEp->qh.queue.next,
1613 struct ci13xxx_req, queue);
1614 list_del_init(&mReq->queue);
1615 mReq->req.status = -ESHUTDOWN;
1616
1617 if (mReq->req.complete != NULL) {
1618 spin_unlock(mEp->lock);
1619 mReq->req.complete(&mEp->ep, &mReq->req);
1620 spin_lock(mEp->lock);
1621 }
1622 }
1623 return 0;
1624}
1625
1626
1627
1628
1629
1630
1631
1632static int _gadget_stop_activity(struct usb_gadget *gadget)
1633{
1634 struct usb_ep *ep;
1635 struct ci13xxx *udc = container_of(gadget, struct ci13xxx, gadget);
1636 unsigned long flags;
1637
1638 trace("%p", gadget);
1639
1640 if (gadget == NULL)
1641 return -EINVAL;
1642
1643 spin_lock_irqsave(udc->lock, flags);
1644 udc->gadget.speed = USB_SPEED_UNKNOWN;
1645 udc->remote_wakeup = 0;
1646 udc->suspended = 0;
1647 spin_unlock_irqrestore(udc->lock, flags);
1648
1649
1650 gadget_for_each_ep(ep, gadget) {
1651 usb_ep_fifo_flush(ep);
1652 }
1653 usb_ep_fifo_flush(&udc->ep0out.ep);
1654 usb_ep_fifo_flush(&udc->ep0in.ep);
1655
1656 udc->driver->disconnect(gadget);
1657
1658
1659 gadget_for_each_ep(ep, gadget) {
1660 usb_ep_disable(ep);
1661 }
1662
1663 if (udc->status != NULL) {
1664 usb_ep_free_request(&udc->ep0in.ep, udc->status);
1665 udc->status = NULL;
1666 }
1667
1668 return 0;
1669}
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680static void isr_reset_handler(struct ci13xxx *udc)
1681__releases(udc->lock)
1682__acquires(udc->lock)
1683{
1684 int retval;
1685
1686 trace("%p", udc);
1687
1688 if (udc == NULL) {
1689 err("EINVAL");
1690 return;
1691 }
1692
1693 dbg_event(0xFF, "BUS RST", 0);
1694
1695 spin_unlock(udc->lock);
1696 retval = _gadget_stop_activity(&udc->gadget);
1697 if (retval)
1698 goto done;
1699
1700 retval = hw_usb_reset();
1701 if (retval)
1702 goto done;
1703
1704 udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
1705 if (udc->status == NULL)
1706 retval = -ENOMEM;
1707
1708 spin_lock(udc->lock);
1709
1710 done:
1711 if (retval)
1712 err("error: %i", retval);
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
1723{
1724 trace("%p, %p", ep, req);
1725
1726 if (ep == NULL || req == NULL) {
1727 err("EINVAL");
1728 return;
1729 }
1730
1731 kfree(req->buf);
1732 usb_ep_free_request(ep, req);
1733}
1734
1735
1736
1737
1738
1739
1740
1741
1742static int isr_get_status_response(struct ci13xxx *udc,
1743 struct usb_ctrlrequest *setup)
1744__releases(mEp->lock)
1745__acquires(mEp->lock)
1746{
1747 struct ci13xxx_ep *mEp = &udc->ep0in;
1748 struct usb_request *req = NULL;
1749 gfp_t gfp_flags = GFP_ATOMIC;
1750 int dir, num, retval;
1751
1752 trace("%p, %p", mEp, setup);
1753
1754 if (mEp == NULL || setup == NULL)
1755 return -EINVAL;
1756
1757 spin_unlock(mEp->lock);
1758 req = usb_ep_alloc_request(&mEp->ep, gfp_flags);
1759 spin_lock(mEp->lock);
1760 if (req == NULL)
1761 return -ENOMEM;
1762
1763 req->complete = isr_get_status_complete;
1764 req->length = 2;
1765 req->buf = kzalloc(req->length, gfp_flags);
1766 if (req->buf == NULL) {
1767 retval = -ENOMEM;
1768 goto err_free_req;
1769 }
1770
1771 if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1772
1773 *((u16 *)req->buf) = _udc->remote_wakeup << 1;
1774 retval = 0;
1775 } else if ((setup->bRequestType & USB_RECIP_MASK) \
1776 == USB_RECIP_ENDPOINT) {
1777 dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
1778 TX : RX;
1779 num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
1780 *((u16 *)req->buf) = hw_ep_get_halt(num, dir);
1781 }
1782
1783
1784 spin_unlock(mEp->lock);
1785 retval = usb_ep_queue(&mEp->ep, req, gfp_flags);
1786 spin_lock(mEp->lock);
1787 if (retval)
1788 goto err_free_buf;
1789
1790 return 0;
1791
1792 err_free_buf:
1793 kfree(req->buf);
1794 err_free_req:
1795 spin_unlock(mEp->lock);
1796 usb_ep_free_request(&mEp->ep, req);
1797 spin_lock(mEp->lock);
1798 return retval;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static void
1810isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
1811{
1812 struct ci13xxx *udc = req->context;
1813 unsigned long flags;
1814
1815 trace("%p, %p", ep, req);
1816
1817 spin_lock_irqsave(udc->lock, flags);
1818 if (udc->test_mode)
1819 hw_port_test_set(udc->test_mode);
1820 spin_unlock_irqrestore(udc->lock, flags);
1821}
1822
1823
1824
1825
1826
1827
1828
1829static int isr_setup_status_phase(struct ci13xxx *udc)
1830__releases(mEp->lock)
1831__acquires(mEp->lock)
1832{
1833 int retval;
1834 struct ci13xxx_ep *mEp;
1835
1836 trace("%p", udc);
1837
1838 mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
1839 udc->status->context = udc;
1840 udc->status->complete = isr_setup_status_complete;
1841
1842 spin_unlock(mEp->lock);
1843 retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
1844 spin_lock(mEp->lock);
1845
1846 return retval;
1847}
1848
1849
1850
1851
1852
1853
1854
1855
1856static int isr_tr_complete_low(struct ci13xxx_ep *mEp)
1857__releases(mEp->lock)
1858__acquires(mEp->lock)
1859{
1860 struct ci13xxx_req *mReq, *mReqTemp;
1861 struct ci13xxx_ep *mEpTemp = mEp;
1862 int uninitialized_var(retval);
1863
1864 trace("%p", mEp);
1865
1866 if (list_empty(&mEp->qh.queue))
1867 return -EINVAL;
1868
1869 list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue,
1870 queue) {
1871 retval = _hardware_dequeue(mEp, mReq);
1872 if (retval < 0)
1873 break;
1874 list_del_init(&mReq->queue);
1875 dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
1876 if (mReq->req.complete != NULL) {
1877 spin_unlock(mEp->lock);
1878 if ((mEp->type == USB_ENDPOINT_XFER_CONTROL) &&
1879 mReq->req.length)
1880 mEpTemp = &_udc->ep0in;
1881 mReq->req.complete(&mEpTemp->ep, &mReq->req);
1882 spin_lock(mEp->lock);
1883 }
1884 }
1885
1886 if (retval == -EBUSY)
1887 retval = 0;
1888 if (retval < 0)
1889 dbg_event(_usb_addr(mEp), "DONE", retval);
1890
1891 return retval;
1892}
1893
1894
1895
1896
1897
1898
1899
1900static void isr_tr_complete_handler(struct ci13xxx *udc)
1901__releases(udc->lock)
1902__acquires(udc->lock)
1903{
1904 unsigned i;
1905 u8 tmode = 0;
1906
1907 trace("%p", udc);
1908
1909 if (udc == NULL) {
1910 err("EINVAL");
1911 return;
1912 }
1913
1914 for (i = 0; i < hw_ep_max; i++) {
1915 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
1916 int type, num, dir, err = -EINVAL;
1917 struct usb_ctrlrequest req;
1918
1919 if (mEp->desc == NULL)
1920 continue;
1921
1922 if (hw_test_and_clear_complete(i)) {
1923 err = isr_tr_complete_low(mEp);
1924 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
1925 if (err > 0)
1926 err = isr_setup_status_phase(udc);
1927 if (err < 0) {
1928 dbg_event(_usb_addr(mEp),
1929 "ERROR", err);
1930 spin_unlock(udc->lock);
1931 if (usb_ep_set_halt(&mEp->ep))
1932 err("error: ep_set_halt");
1933 spin_lock(udc->lock);
1934 }
1935 }
1936 }
1937
1938 if (mEp->type != USB_ENDPOINT_XFER_CONTROL ||
1939 !hw_test_and_clear_setup_status(i))
1940 continue;
1941
1942 if (i != 0) {
1943 warn("ctrl traffic received at endpoint");
1944 continue;
1945 }
1946
1947
1948
1949
1950
1951 _ep_nuke(&udc->ep0out);
1952 _ep_nuke(&udc->ep0in);
1953
1954
1955 do {
1956 hw_test_and_set_setup_guard();
1957 memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
1958 } while (!hw_test_and_clear_setup_guard());
1959
1960 type = req.bRequestType;
1961
1962 udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
1963
1964 dbg_setup(_usb_addr(mEp), &req);
1965
1966 switch (req.bRequest) {
1967 case USB_REQ_CLEAR_FEATURE:
1968 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
1969 le16_to_cpu(req.wValue) ==
1970 USB_ENDPOINT_HALT) {
1971 if (req.wLength != 0)
1972 break;
1973 num = le16_to_cpu(req.wIndex);
1974 dir = num & USB_ENDPOINT_DIR_MASK;
1975 num &= USB_ENDPOINT_NUMBER_MASK;
1976 if (dir)
1977 num += hw_ep_max/2;
1978 if (!udc->ci13xxx_ep[num].wedge) {
1979 spin_unlock(udc->lock);
1980 err = usb_ep_clear_halt(
1981 &udc->ci13xxx_ep[num].ep);
1982 spin_lock(udc->lock);
1983 if (err)
1984 break;
1985 }
1986 err = isr_setup_status_phase(udc);
1987 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
1988 le16_to_cpu(req.wValue) ==
1989 USB_DEVICE_REMOTE_WAKEUP) {
1990 if (req.wLength != 0)
1991 break;
1992 udc->remote_wakeup = 0;
1993 err = isr_setup_status_phase(udc);
1994 } else {
1995 goto delegate;
1996 }
1997 break;
1998 case USB_REQ_GET_STATUS:
1999 if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
2000 type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
2001 type != (USB_DIR_IN|USB_RECIP_INTERFACE))
2002 goto delegate;
2003 if (le16_to_cpu(req.wLength) != 2 ||
2004 le16_to_cpu(req.wValue) != 0)
2005 break;
2006 err = isr_get_status_response(udc, &req);
2007 break;
2008 case USB_REQ_SET_ADDRESS:
2009 if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
2010 goto delegate;
2011 if (le16_to_cpu(req.wLength) != 0 ||
2012 le16_to_cpu(req.wIndex) != 0)
2013 break;
2014 err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
2015 if (err)
2016 break;
2017 err = isr_setup_status_phase(udc);
2018 break;
2019 case USB_REQ_SET_FEATURE:
2020 if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
2021 le16_to_cpu(req.wValue) ==
2022 USB_ENDPOINT_HALT) {
2023 if (req.wLength != 0)
2024 break;
2025 num = le16_to_cpu(req.wIndex);
2026 dir = num & USB_ENDPOINT_DIR_MASK;
2027 num &= USB_ENDPOINT_NUMBER_MASK;
2028 if (dir)
2029 num += hw_ep_max/2;
2030
2031 spin_unlock(udc->lock);
2032 err = usb_ep_set_halt(&udc->ci13xxx_ep[num].ep);
2033 spin_lock(udc->lock);
2034 if (!err)
2035 isr_setup_status_phase(udc);
2036 } else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
2037 if (req.wLength != 0)
2038 break;
2039 switch (le16_to_cpu(req.wValue)) {
2040 case USB_DEVICE_REMOTE_WAKEUP:
2041 udc->remote_wakeup = 1;
2042 err = isr_setup_status_phase(udc);
2043 break;
2044 case USB_DEVICE_TEST_MODE:
2045 tmode = le16_to_cpu(req.wIndex) >> 8;
2046 switch (tmode) {
2047 case TEST_J:
2048 case TEST_K:
2049 case TEST_SE0_NAK:
2050 case TEST_PACKET:
2051 case TEST_FORCE_EN:
2052 udc->test_mode = tmode;
2053 err = isr_setup_status_phase(
2054 udc);
2055 break;
2056 default:
2057 break;
2058 }
2059 default:
2060 goto delegate;
2061 }
2062 } else {
2063 goto delegate;
2064 }
2065 break;
2066 default:
2067delegate:
2068 if (req.wLength == 0)
2069 udc->ep0_dir = TX;
2070
2071 spin_unlock(udc->lock);
2072 err = udc->driver->setup(&udc->gadget, &req);
2073 spin_lock(udc->lock);
2074 break;
2075 }
2076
2077 if (err < 0) {
2078 dbg_event(_usb_addr(mEp), "ERROR", err);
2079
2080 spin_unlock(udc->lock);
2081 if (usb_ep_set_halt(&mEp->ep))
2082 err("error: ep_set_halt");
2083 spin_lock(udc->lock);
2084 }
2085 }
2086}
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096static int ep_enable(struct usb_ep *ep,
2097 const struct usb_endpoint_descriptor *desc)
2098{
2099 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2100 int retval = 0;
2101 unsigned long flags;
2102
2103 trace("%p, %p", ep, desc);
2104
2105 if (ep == NULL || desc == NULL)
2106 return -EINVAL;
2107
2108 spin_lock_irqsave(mEp->lock, flags);
2109
2110
2111
2112 mEp->desc = desc;
2113
2114 if (!list_empty(&mEp->qh.queue))
2115 warn("enabling a non-empty endpoint!");
2116
2117 mEp->dir = usb_endpoint_dir_in(desc) ? TX : RX;
2118 mEp->num = usb_endpoint_num(desc);
2119 mEp->type = usb_endpoint_type(desc);
2120
2121 mEp->ep.maxpacket = usb_endpoint_maxp(desc);
2122
2123 dbg_event(_usb_addr(mEp), "ENABLE", 0);
2124
2125 mEp->qh.ptr->cap = 0;
2126
2127 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2128 mEp->qh.ptr->cap |= QH_IOS;
2129 else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
2130 mEp->qh.ptr->cap &= ~QH_MULT;
2131 else
2132 mEp->qh.ptr->cap &= ~QH_ZLT;
2133
2134 mEp->qh.ptr->cap |=
2135 (mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
2136 mEp->qh.ptr->td.next |= TD_TERMINATE;
2137
2138
2139
2140
2141
2142 if (mEp->num)
2143 retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
2144
2145 spin_unlock_irqrestore(mEp->lock, flags);
2146 return retval;
2147}
2148
2149
2150
2151
2152
2153
2154static int ep_disable(struct usb_ep *ep)
2155{
2156 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2157 int direction, retval = 0;
2158 unsigned long flags;
2159
2160 trace("%p", ep);
2161
2162 if (ep == NULL)
2163 return -EINVAL;
2164 else if (mEp->desc == NULL)
2165 return -EBUSY;
2166
2167 spin_lock_irqsave(mEp->lock, flags);
2168
2169
2170
2171 direction = mEp->dir;
2172 do {
2173 dbg_event(_usb_addr(mEp), "DISABLE", 0);
2174
2175 retval |= _ep_nuke(mEp);
2176 retval |= hw_ep_disable(mEp->num, mEp->dir);
2177
2178 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2179 mEp->dir = (mEp->dir == TX) ? RX : TX;
2180
2181 } while (mEp->dir != direction);
2182
2183 mEp->desc = NULL;
2184 mEp->ep.desc = NULL;
2185
2186 spin_unlock_irqrestore(mEp->lock, flags);
2187 return retval;
2188}
2189
2190
2191
2192
2193
2194
2195static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
2196{
2197 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2198 struct ci13xxx_req *mReq = NULL;
2199
2200 trace("%p, %i", ep, gfp_flags);
2201
2202 if (ep == NULL) {
2203 err("EINVAL");
2204 return NULL;
2205 }
2206
2207 mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
2208 if (mReq != NULL) {
2209 INIT_LIST_HEAD(&mReq->queue);
2210 mReq->req.dma = DMA_ADDR_INVALID;
2211
2212 mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
2213 &mReq->dma);
2214 if (mReq->ptr == NULL) {
2215 kfree(mReq);
2216 mReq = NULL;
2217 }
2218 }
2219
2220 dbg_event(_usb_addr(mEp), "ALLOC", mReq == NULL);
2221
2222 return (mReq == NULL) ? NULL : &mReq->req;
2223}
2224
2225
2226
2227
2228
2229
2230static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
2231{
2232 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2233 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
2234 unsigned long flags;
2235
2236 trace("%p, %p", ep, req);
2237
2238 if (ep == NULL || req == NULL) {
2239 err("EINVAL");
2240 return;
2241 } else if (!list_empty(&mReq->queue)) {
2242 err("EBUSY");
2243 return;
2244 }
2245
2246 spin_lock_irqsave(mEp->lock, flags);
2247
2248 if (mReq->ptr)
2249 dma_pool_free(mEp->td_pool, mReq->ptr, mReq->dma);
2250 kfree(mReq);
2251
2252 dbg_event(_usb_addr(mEp), "FREE", 0);
2253
2254 spin_unlock_irqrestore(mEp->lock, flags);
2255}
2256
2257
2258
2259
2260
2261
2262static int ep_queue(struct usb_ep *ep, struct usb_request *req,
2263 gfp_t __maybe_unused gfp_flags)
2264{
2265 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2266 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
2267 int retval = 0;
2268 unsigned long flags;
2269
2270 trace("%p, %p, %X", ep, req, gfp_flags);
2271
2272 if (ep == NULL || req == NULL || mEp->desc == NULL)
2273 return -EINVAL;
2274
2275 spin_lock_irqsave(mEp->lock, flags);
2276
2277 if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
2278 if (req->length)
2279 mEp = (_udc->ep0_dir == RX) ?
2280 &_udc->ep0out : &_udc->ep0in;
2281 if (!list_empty(&mEp->qh.queue)) {
2282 _ep_nuke(mEp);
2283 retval = -EOVERFLOW;
2284 warn("endpoint ctrl %X nuked", _usb_addr(mEp));
2285 }
2286 }
2287
2288
2289 if (!list_empty(&mReq->queue)) {
2290 retval = -EBUSY;
2291 err("request already in queue");
2292 goto done;
2293 }
2294
2295 if (req->length > (4 * CI13XXX_PAGE_SIZE)) {
2296 req->length = (4 * CI13XXX_PAGE_SIZE);
2297 retval = -EMSGSIZE;
2298 warn("request length truncated");
2299 }
2300
2301 dbg_queue(_usb_addr(mEp), req, retval);
2302
2303
2304 mReq->req.status = -EINPROGRESS;
2305 mReq->req.actual = 0;
2306
2307 retval = _hardware_enqueue(mEp, mReq);
2308
2309 if (retval == -EALREADY) {
2310 dbg_event(_usb_addr(mEp), "QUEUE", retval);
2311 retval = 0;
2312 }
2313 if (!retval)
2314 list_add_tail(&mReq->queue, &mEp->qh.queue);
2315
2316 done:
2317 spin_unlock_irqrestore(mEp->lock, flags);
2318 return retval;
2319}
2320
2321
2322
2323
2324
2325
2326static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
2327{
2328 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2329 struct ci13xxx_req *mReq = container_of(req, struct ci13xxx_req, req);
2330 unsigned long flags;
2331
2332 trace("%p, %p", ep, req);
2333
2334 if (ep == NULL || req == NULL || mReq->req.status != -EALREADY ||
2335 mEp->desc == NULL || list_empty(&mReq->queue) ||
2336 list_empty(&mEp->qh.queue))
2337 return -EINVAL;
2338
2339 spin_lock_irqsave(mEp->lock, flags);
2340
2341 dbg_event(_usb_addr(mEp), "DEQUEUE", 0);
2342
2343 hw_ep_flush(mEp->num, mEp->dir);
2344
2345
2346 list_del_init(&mReq->queue);
2347 if (mReq->map) {
2348 dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
2349 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
2350 mReq->req.dma = DMA_ADDR_INVALID;
2351 mReq->map = 0;
2352 }
2353 req->status = -ECONNRESET;
2354
2355 if (mReq->req.complete != NULL) {
2356 spin_unlock(mEp->lock);
2357 mReq->req.complete(&mEp->ep, &mReq->req);
2358 spin_lock(mEp->lock);
2359 }
2360
2361 spin_unlock_irqrestore(mEp->lock, flags);
2362 return 0;
2363}
2364
2365
2366
2367
2368
2369
2370static int ep_set_halt(struct usb_ep *ep, int value)
2371{
2372 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2373 int direction, retval = 0;
2374 unsigned long flags;
2375
2376 trace("%p, %i", ep, value);
2377
2378 if (ep == NULL || mEp->desc == NULL)
2379 return -EINVAL;
2380
2381 spin_lock_irqsave(mEp->lock, flags);
2382
2383#ifndef STALL_IN
2384
2385 if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
2386 !list_empty(&mEp->qh.queue)) {
2387 spin_unlock_irqrestore(mEp->lock, flags);
2388 return -EAGAIN;
2389 }
2390#endif
2391
2392 direction = mEp->dir;
2393 do {
2394 dbg_event(_usb_addr(mEp), "HALT", value);
2395 retval |= hw_ep_set_halt(mEp->num, mEp->dir, value);
2396
2397 if (!value)
2398 mEp->wedge = 0;
2399
2400 if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
2401 mEp->dir = (mEp->dir == TX) ? RX : TX;
2402
2403 } while (mEp->dir != direction);
2404
2405 spin_unlock_irqrestore(mEp->lock, flags);
2406 return retval;
2407}
2408
2409
2410
2411
2412
2413
2414static int ep_set_wedge(struct usb_ep *ep)
2415{
2416 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2417 unsigned long flags;
2418
2419 trace("%p", ep);
2420
2421 if (ep == NULL || mEp->desc == NULL)
2422 return -EINVAL;
2423
2424 spin_lock_irqsave(mEp->lock, flags);
2425
2426 dbg_event(_usb_addr(mEp), "WEDGE", 0);
2427 mEp->wedge = 1;
2428
2429 spin_unlock_irqrestore(mEp->lock, flags);
2430
2431 return usb_ep_set_halt(ep);
2432}
2433
2434
2435
2436
2437
2438
2439static void ep_fifo_flush(struct usb_ep *ep)
2440{
2441 struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
2442 unsigned long flags;
2443
2444 trace("%p", ep);
2445
2446 if (ep == NULL) {
2447 err("%02X: -EINVAL", _usb_addr(mEp));
2448 return;
2449 }
2450
2451 spin_lock_irqsave(mEp->lock, flags);
2452
2453 dbg_event(_usb_addr(mEp), "FFLUSH", 0);
2454 hw_ep_flush(mEp->num, mEp->dir);
2455
2456 spin_unlock_irqrestore(mEp->lock, flags);
2457}
2458
2459
2460
2461
2462
2463static const struct usb_ep_ops usb_ep_ops = {
2464 .enable = ep_enable,
2465 .disable = ep_disable,
2466 .alloc_request = ep_alloc_request,
2467 .free_request = ep_free_request,
2468 .queue = ep_queue,
2469 .dequeue = ep_dequeue,
2470 .set_halt = ep_set_halt,
2471 .set_wedge = ep_set_wedge,
2472 .fifo_flush = ep_fifo_flush,
2473};
2474
2475
2476
2477
2478static int ci13xxx_vbus_session(struct usb_gadget *_gadget, int is_active)
2479{
2480 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
2481 unsigned long flags;
2482 int gadget_ready = 0;
2483
2484 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS))
2485 return -EOPNOTSUPP;
2486
2487 spin_lock_irqsave(udc->lock, flags);
2488 udc->vbus_active = is_active;
2489 if (udc->driver)
2490 gadget_ready = 1;
2491 spin_unlock_irqrestore(udc->lock, flags);
2492
2493 if (gadget_ready) {
2494 if (is_active) {
2495 pm_runtime_get_sync(&_gadget->dev);
2496 hw_device_reset(udc);
2497 hw_device_state(udc->ep0out.qh.dma);
2498 } else {
2499 hw_device_state(0);
2500 if (udc->udc_driver->notify_event)
2501 udc->udc_driver->notify_event(udc,
2502 CI13XXX_CONTROLLER_STOPPED_EVENT);
2503 _gadget_stop_activity(&udc->gadget);
2504 pm_runtime_put_sync(&_gadget->dev);
2505 }
2506 }
2507
2508 return 0;
2509}
2510
2511static int ci13xxx_wakeup(struct usb_gadget *_gadget)
2512{
2513 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
2514 unsigned long flags;
2515 int ret = 0;
2516
2517 trace();
2518
2519 spin_lock_irqsave(udc->lock, flags);
2520 if (!udc->remote_wakeup) {
2521 ret = -EOPNOTSUPP;
2522 trace("remote wakeup feature is not enabled\n");
2523 goto out;
2524 }
2525 if (!hw_cread(CAP_PORTSC, PORTSC_SUSP)) {
2526 ret = -EINVAL;
2527 trace("port is not suspended\n");
2528 goto out;
2529 }
2530 hw_cwrite(CAP_PORTSC, PORTSC_FPR, PORTSC_FPR);
2531out:
2532 spin_unlock_irqrestore(udc->lock, flags);
2533 return ret;
2534}
2535
2536static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
2537{
2538 struct ci13xxx *udc = container_of(_gadget, struct ci13xxx, gadget);
2539
2540 if (udc->transceiver)
2541 return otg_set_power(udc->transceiver, mA);
2542 return -ENOTSUPP;
2543}
2544
2545static int ci13xxx_start(struct usb_gadget_driver *driver,
2546 int (*bind)(struct usb_gadget *));
2547static int ci13xxx_stop(struct usb_gadget_driver *driver);
2548
2549
2550
2551
2552
2553static const struct usb_gadget_ops usb_gadget_ops = {
2554 .vbus_session = ci13xxx_vbus_session,
2555 .wakeup = ci13xxx_wakeup,
2556 .vbus_draw = ci13xxx_vbus_draw,
2557 .start = ci13xxx_start,
2558 .stop = ci13xxx_stop,
2559};
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569static int ci13xxx_start(struct usb_gadget_driver *driver,
2570 int (*bind)(struct usb_gadget *))
2571{
2572 struct ci13xxx *udc = _udc;
2573 unsigned long flags;
2574 int i, j;
2575 int retval = -ENOMEM;
2576
2577 trace("%p", driver);
2578
2579 if (driver == NULL ||
2580 bind == NULL ||
2581 driver->setup == NULL ||
2582 driver->disconnect == NULL)
2583 return -EINVAL;
2584 else if (udc == NULL)
2585 return -ENODEV;
2586 else if (udc->driver != NULL)
2587 return -EBUSY;
2588
2589
2590 udc->qh_pool = dma_pool_create("ci13xxx_qh", &udc->gadget.dev,
2591 sizeof(struct ci13xxx_qh),
2592 64, CI13XXX_PAGE_SIZE);
2593 if (udc->qh_pool == NULL)
2594 return -ENOMEM;
2595
2596 udc->td_pool = dma_pool_create("ci13xxx_td", &udc->gadget.dev,
2597 sizeof(struct ci13xxx_td),
2598 64, CI13XXX_PAGE_SIZE);
2599 if (udc->td_pool == NULL) {
2600 dma_pool_destroy(udc->qh_pool);
2601 udc->qh_pool = NULL;
2602 return -ENOMEM;
2603 }
2604
2605 spin_lock_irqsave(udc->lock, flags);
2606
2607 info("hw_ep_max = %d", hw_ep_max);
2608
2609 udc->gadget.dev.driver = NULL;
2610
2611 retval = 0;
2612 for (i = 0; i < hw_ep_max/2; i++) {
2613 for (j = RX; j <= TX; j++) {
2614 int k = i + j * hw_ep_max/2;
2615 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
2616
2617 scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
2618 (j == TX) ? "in" : "out");
2619
2620 mEp->lock = udc->lock;
2621 mEp->device = &udc->gadget.dev;
2622 mEp->td_pool = udc->td_pool;
2623
2624 mEp->ep.name = mEp->name;
2625 mEp->ep.ops = &usb_ep_ops;
2626 mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
2627
2628 INIT_LIST_HEAD(&mEp->qh.queue);
2629 spin_unlock_irqrestore(udc->lock, flags);
2630 mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
2631 &mEp->qh.dma);
2632 spin_lock_irqsave(udc->lock, flags);
2633 if (mEp->qh.ptr == NULL)
2634 retval = -ENOMEM;
2635 else
2636 memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
2637
2638
2639 if (i == 0)
2640 continue;
2641
2642 list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
2643 }
2644 }
2645 if (retval)
2646 goto done;
2647 spin_unlock_irqrestore(udc->lock, flags);
2648 udc->ep0out.ep.desc = &ctrl_endpt_out_desc;
2649 retval = usb_ep_enable(&udc->ep0out.ep);
2650 if (retval)
2651 return retval;
2652
2653 udc->ep0in.ep.desc = &ctrl_endpt_in_desc;
2654 retval = usb_ep_enable(&udc->ep0in.ep);
2655 if (retval)
2656 return retval;
2657 spin_lock_irqsave(udc->lock, flags);
2658
2659 udc->gadget.ep0 = &udc->ep0in.ep;
2660
2661 driver->driver.bus = NULL;
2662 udc->gadget.dev.driver = &driver->driver;
2663
2664 spin_unlock_irqrestore(udc->lock, flags);
2665 retval = bind(&udc->gadget);
2666 spin_lock_irqsave(udc->lock, flags);
2667
2668 if (retval) {
2669 udc->gadget.dev.driver = NULL;
2670 goto done;
2671 }
2672
2673 udc->driver = driver;
2674 pm_runtime_get_sync(&udc->gadget.dev);
2675 if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
2676 if (udc->vbus_active) {
2677 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED)
2678 hw_device_reset(udc);
2679 } else {
2680 pm_runtime_put_sync(&udc->gadget.dev);
2681 goto done;
2682 }
2683 }
2684
2685 retval = hw_device_state(udc->ep0out.qh.dma);
2686 if (retval)
2687 pm_runtime_put_sync(&udc->gadget.dev);
2688
2689 done:
2690 spin_unlock_irqrestore(udc->lock, flags);
2691 return retval;
2692}
2693
2694
2695
2696
2697
2698
2699static int ci13xxx_stop(struct usb_gadget_driver *driver)
2700{
2701 struct ci13xxx *udc = _udc;
2702 unsigned long i, flags;
2703
2704 trace("%p", driver);
2705
2706 if (driver == NULL ||
2707 driver->unbind == NULL ||
2708 driver->setup == NULL ||
2709 driver->disconnect == NULL ||
2710 driver != udc->driver)
2711 return -EINVAL;
2712
2713 spin_lock_irqsave(udc->lock, flags);
2714
2715 if (!(udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) ||
2716 udc->vbus_active) {
2717 hw_device_state(0);
2718 if (udc->udc_driver->notify_event)
2719 udc->udc_driver->notify_event(udc,
2720 CI13XXX_CONTROLLER_STOPPED_EVENT);
2721 spin_unlock_irqrestore(udc->lock, flags);
2722 _gadget_stop_activity(&udc->gadget);
2723 spin_lock_irqsave(udc->lock, flags);
2724 pm_runtime_put(&udc->gadget.dev);
2725 }
2726
2727
2728 spin_unlock_irqrestore(udc->lock, flags);
2729 driver->unbind(&udc->gadget);
2730 spin_lock_irqsave(udc->lock, flags);
2731
2732 udc->gadget.dev.driver = NULL;
2733
2734
2735 for (i = 0; i < hw_ep_max; i++) {
2736 struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
2737
2738 if (!list_empty(&mEp->ep.ep_list))
2739 list_del_init(&mEp->ep.ep_list);
2740
2741 if (mEp->qh.ptr != NULL)
2742 dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
2743 }
2744
2745 udc->gadget.ep0 = NULL;
2746 udc->driver = NULL;
2747
2748 spin_unlock_irqrestore(udc->lock, flags);
2749
2750 if (udc->td_pool != NULL) {
2751 dma_pool_destroy(udc->td_pool);
2752 udc->td_pool = NULL;
2753 }
2754 if (udc->qh_pool != NULL) {
2755 dma_pool_destroy(udc->qh_pool);
2756 udc->qh_pool = NULL;
2757 }
2758
2759 return 0;
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771static irqreturn_t udc_irq(void)
2772{
2773 struct ci13xxx *udc = _udc;
2774 irqreturn_t retval;
2775 u32 intr;
2776
2777 trace();
2778
2779 if (udc == NULL) {
2780 err("ENODEV");
2781 return IRQ_HANDLED;
2782 }
2783
2784 spin_lock(udc->lock);
2785
2786 if (udc->udc_driver->flags & CI13XXX_REGS_SHARED) {
2787 if (hw_cread(CAP_USBMODE, USBMODE_CM) !=
2788 USBMODE_CM_DEVICE) {
2789 spin_unlock(udc->lock);
2790 return IRQ_NONE;
2791 }
2792 }
2793 intr = hw_test_and_clear_intr_active();
2794 if (intr) {
2795 isr_statistics.hndl.buf[isr_statistics.hndl.idx++] = intr;
2796 isr_statistics.hndl.idx &= ISR_MASK;
2797 isr_statistics.hndl.cnt++;
2798
2799
2800 if (USBi_URI & intr) {
2801 isr_statistics.uri++;
2802 isr_reset_handler(udc);
2803 }
2804 if (USBi_PCI & intr) {
2805 isr_statistics.pci++;
2806 udc->gadget.speed = hw_port_is_high_speed() ?
2807 USB_SPEED_HIGH : USB_SPEED_FULL;
2808 if (udc->suspended && udc->driver->resume) {
2809 spin_unlock(udc->lock);
2810 udc->driver->resume(&udc->gadget);
2811 spin_lock(udc->lock);
2812 udc->suspended = 0;
2813 }
2814 }
2815 if (USBi_UEI & intr)
2816 isr_statistics.uei++;
2817 if (USBi_UI & intr) {
2818 isr_statistics.ui++;
2819 isr_tr_complete_handler(udc);
2820 }
2821 if (USBi_SLI & intr) {
2822 if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
2823 udc->driver->suspend) {
2824 udc->suspended = 1;
2825 spin_unlock(udc->lock);
2826 udc->driver->suspend(&udc->gadget);
2827 spin_lock(udc->lock);
2828 }
2829 isr_statistics.sli++;
2830 }
2831 retval = IRQ_HANDLED;
2832 } else {
2833 isr_statistics.none++;
2834 retval = IRQ_NONE;
2835 }
2836 spin_unlock(udc->lock);
2837
2838 return retval;
2839}
2840
2841
2842
2843
2844
2845
2846
2847static void udc_release(struct device *dev)
2848{
2849 trace("%p", dev);
2850
2851 if (dev == NULL)
2852 err("EINVAL");
2853}
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
2866 void __iomem *regs)
2867{
2868 struct ci13xxx *udc;
2869 int retval = 0;
2870
2871 trace("%p, %p, %p", dev, regs, driver->name);
2872
2873 if (dev == NULL || regs == NULL || driver == NULL ||
2874 driver->name == NULL)
2875 return -EINVAL;
2876
2877 udc = kzalloc(sizeof(struct ci13xxx), GFP_KERNEL);
2878 if (udc == NULL)
2879 return -ENOMEM;
2880
2881 udc->lock = &udc_lock;
2882 udc->regs = regs;
2883 udc->udc_driver = driver;
2884
2885 udc->gadget.ops = &usb_gadget_ops;
2886 udc->gadget.speed = USB_SPEED_UNKNOWN;
2887 udc->gadget.max_speed = USB_SPEED_HIGH;
2888 udc->gadget.is_otg = 0;
2889 udc->gadget.name = driver->name;
2890
2891 INIT_LIST_HEAD(&udc->gadget.ep_list);
2892 udc->gadget.ep0 = NULL;
2893
2894 dev_set_name(&udc->gadget.dev, "gadget");
2895 udc->gadget.dev.dma_mask = dev->dma_mask;
2896 udc->gadget.dev.coherent_dma_mask = dev->coherent_dma_mask;
2897 udc->gadget.dev.parent = dev;
2898 udc->gadget.dev.release = udc_release;
2899
2900 retval = hw_device_init(regs);
2901 if (retval < 0)
2902 goto free_udc;
2903
2904 udc->transceiver = otg_get_transceiver();
2905
2906 if (udc->udc_driver->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
2907 if (udc->transceiver == NULL) {
2908 retval = -ENODEV;
2909 goto free_udc;
2910 }
2911 }
2912
2913 if (!(udc->udc_driver->flags & CI13XXX_REGS_SHARED)) {
2914 retval = hw_device_reset(udc);
2915 if (retval)
2916 goto put_transceiver;
2917 }
2918
2919 retval = device_register(&udc->gadget.dev);
2920 if (retval) {
2921 put_device(&udc->gadget.dev);
2922 goto put_transceiver;
2923 }
2924
2925#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2926 retval = dbg_create_files(&udc->gadget.dev);
2927#endif
2928 if (retval)
2929 goto unreg_device;
2930
2931 if (udc->transceiver) {
2932 retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
2933 if (retval)
2934 goto remove_dbg;
2935 }
2936
2937 retval = usb_add_gadget_udc(dev, &udc->gadget);
2938 if (retval)
2939 goto remove_trans;
2940
2941 pm_runtime_no_callbacks(&udc->gadget.dev);
2942 pm_runtime_enable(&udc->gadget.dev);
2943
2944 _udc = udc;
2945 return retval;
2946
2947remove_trans:
2948 if (udc->transceiver) {
2949 otg_set_peripheral(udc->transceiver, &udc->gadget);
2950 otg_put_transceiver(udc->transceiver);
2951 }
2952
2953 err("error = %i", retval);
2954remove_dbg:
2955#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2956 dbg_remove_files(&udc->gadget.dev);
2957#endif
2958unreg_device:
2959 device_unregister(&udc->gadget.dev);
2960put_transceiver:
2961 if (udc->transceiver)
2962 otg_put_transceiver(udc->transceiver);
2963free_udc:
2964 kfree(udc);
2965 _udc = NULL;
2966 return retval;
2967}
2968
2969
2970
2971
2972
2973
2974static void udc_remove(void)
2975{
2976 struct ci13xxx *udc = _udc;
2977
2978 if (udc == NULL) {
2979 err("EINVAL");
2980 return;
2981 }
2982 usb_del_gadget_udc(&udc->gadget);
2983
2984 if (udc->transceiver) {
2985 otg_set_peripheral(udc->transceiver, &udc->gadget);
2986 otg_put_transceiver(udc->transceiver);
2987 }
2988#ifdef CONFIG_USB_GADGET_DEBUG_FILES
2989 dbg_remove_files(&udc->gadget.dev);
2990#endif
2991 device_unregister(&udc->gadget.dev);
2992
2993 kfree(udc);
2994 _udc = NULL;
2995}
2996