1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/pci.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/list.h>
15#include <linux/interrupt.h>
16#include <linux/usb/ch9.h>
17#include <linux/usb/gadget.h>
18
19
20#define UDC_EP_REG_SHIFT 0x20
21
22#define UDC_EPCTL_ADDR 0x00
23#define UDC_EPSTS_ADDR 0x04
24#define UDC_BUFIN_FRAMENUM_ADDR 0x08
25#define UDC_BUFOUT_MAXPKT_ADDR 0x0C
26#define UDC_SUBPTR_ADDR 0x10
27#define UDC_DESPTR_ADDR 0x14
28#define UDC_CONFIRM_ADDR 0x18
29
30#define UDC_DEVCFG_ADDR 0x400
31#define UDC_DEVCTL_ADDR 0x404
32#define UDC_DEVSTS_ADDR 0x408
33#define UDC_DEVIRQSTS_ADDR 0x40C
34#define UDC_DEVIRQMSK_ADDR 0x410
35#define UDC_EPIRQSTS_ADDR 0x414
36#define UDC_EPIRQMSK_ADDR 0x418
37#define UDC_DEVLPM_ADDR 0x41C
38#define UDC_CSR_BUSY_ADDR 0x4f0
39#define UDC_SRST_ADDR 0x4fc
40#define UDC_CSR_ADDR 0x500
41
42
43
44#define UDC_EPCTL_MRXFLUSH (1 << 12)
45#define UDC_EPCTL_RRDY (1 << 9)
46#define UDC_EPCTL_CNAK (1 << 8)
47#define UDC_EPCTL_SNAK (1 << 7)
48#define UDC_EPCTL_NAK (1 << 6)
49#define UDC_EPCTL_P (1 << 3)
50#define UDC_EPCTL_F (1 << 1)
51#define UDC_EPCTL_S (1 << 0)
52#define UDC_EPCTL_ET_SHIFT 4
53
54#define UDC_EPCTL_ET_MASK 0x00000030
55
56#define UDC_EPCTL_ET_CONTROL 0
57#define UDC_EPCTL_ET_ISO 1
58#define UDC_EPCTL_ET_BULK 2
59#define UDC_EPCTL_ET_INTERRUPT 3
60
61
62
63#define UDC_EPSTS_XFERDONE (1 << 27)
64#define UDC_EPSTS_RSS (1 << 26)
65#define UDC_EPSTS_RCS (1 << 25)
66#define UDC_EPSTS_TXEMPTY (1 << 24)
67#define UDC_EPSTS_TDC (1 << 10)
68#define UDC_EPSTS_HE (1 << 9)
69#define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
70#define UDC_EPSTS_BNA (1 << 7)
71#define UDC_EPSTS_IN (1 << 6)
72#define UDC_EPSTS_OUT_SHIFT 4
73
74#define UDC_EPSTS_OUT_MASK 0x00000030
75#define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
76
77#define UDC_EPSTS_OUT_SETUP 2
78#define UDC_EPSTS_OUT_DATA 1
79
80
81
82#define UDC_DEVCFG_CSR_PRG (1 << 17)
83#define UDC_DEVCFG_SP (1 << 3)
84
85#define UDC_DEVCFG_SPD_HS 0x0
86#define UDC_DEVCFG_SPD_FS 0x1
87#define UDC_DEVCFG_SPD_LS 0x2
88
89
90
91#define UDC_DEVCTL_THLEN_SHIFT 24
92#define UDC_DEVCTL_BRLEN_SHIFT 16
93#define UDC_DEVCTL_CSR_DONE (1 << 13)
94#define UDC_DEVCTL_SD (1 << 10)
95#define UDC_DEVCTL_MODE (1 << 9)
96#define UDC_DEVCTL_BREN (1 << 8)
97#define UDC_DEVCTL_THE (1 << 7)
98#define UDC_DEVCTL_DU (1 << 4)
99#define UDC_DEVCTL_TDE (1 << 3)
100#define UDC_DEVCTL_RDE (1 << 2)
101#define UDC_DEVCTL_RES (1 << 0)
102
103
104
105#define UDC_DEVSTS_TS_SHIFT 18
106#define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
107#define UDC_DEVSTS_ALT_SHIFT 8
108#define UDC_DEVSTS_INTF_SHIFT 4
109#define UDC_DEVSTS_CFG_SHIFT 0
110
111#define UDC_DEVSTS_TS_MASK 0xfffc0000
112#define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
113#define UDC_DEVSTS_ALT_MASK 0x00000f00
114#define UDC_DEVSTS_INTF_MASK 0x000000f0
115#define UDC_DEVSTS_CFG_MASK 0x0000000f
116
117#define UDC_DEVSTS_ENUM_SPEED_FULL 1
118#define UDC_DEVSTS_ENUM_SPEED_HIGH 0
119#define UDC_DEVSTS_ENUM_SPEED_LOW 2
120#define UDC_DEVSTS_ENUM_SPEED_FULLX 3
121
122
123
124#define UDC_DEVINT_RWKP (1 << 7)
125#define UDC_DEVINT_ENUM (1 << 6)
126#define UDC_DEVINT_SOF (1 << 5)
127#define UDC_DEVINT_US (1 << 4)
128#define UDC_DEVINT_UR (1 << 3)
129#define UDC_DEVINT_ES (1 << 2)
130#define UDC_DEVINT_SI (1 << 1)
131#define UDC_DEVINT_SC (1 << 0)
132
133#define UDC_DEVINT_MSK 0x7f
134
135
136
137#define UDC_EPINT_IN_SHIFT 0
138#define UDC_EPINT_OUT_SHIFT 16
139#define UDC_EPINT_IN_EP0 (1 << 0)
140#define UDC_EPINT_OUT_EP0 (1 << 16)
141
142#define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
143
144
145
146#define UDC_CSR_BUSY (1 << 0)
147
148
149
150#define UDC_PSRST (1 << 1)
151#define UDC_SRST (1 << 0)
152
153
154
155#define UDC_CSR_NE_NUM_SHIFT 0
156#define UDC_CSR_NE_DIR_SHIFT 4
157#define UDC_CSR_NE_TYPE_SHIFT 5
158#define UDC_CSR_NE_CFG_SHIFT 7
159#define UDC_CSR_NE_INTF_SHIFT 11
160#define UDC_CSR_NE_ALT_SHIFT 15
161#define UDC_CSR_NE_MAX_PKT_SHIFT 19
162
163#define UDC_CSR_NE_NUM_MASK 0x0000000f
164#define UDC_CSR_NE_DIR_MASK 0x00000010
165#define UDC_CSR_NE_TYPE_MASK 0x00000060
166#define UDC_CSR_NE_CFG_MASK 0x00000780
167#define UDC_CSR_NE_INTF_MASK 0x00007800
168#define UDC_CSR_NE_ALT_MASK 0x00078000
169#define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
170
171#define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
172#define PCH_UDC_EPINT(in, num)\
173 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
174
175
176#define UDC_EP0IN_IDX 0
177#define UDC_EP0OUT_IDX 1
178#define UDC_EPIN_IDX(ep) (ep * 2)
179#define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
180#define PCH_UDC_EP0 0
181#define PCH_UDC_EP1 1
182#define PCH_UDC_EP2 2
183#define PCH_UDC_EP3 3
184
185
186#define PCH_UDC_EP_NUM 32
187#define PCH_UDC_USED_EP_NUM 4
188
189#define PCH_UDC_BRLEN 0x0F
190#define PCH_UDC_THLEN 0x1F
191
192#define UDC_EP0IN_BUFF_SIZE 16
193#define UDC_EPIN_BUFF_SIZE 256
194#define UDC_EP0OUT_BUFF_SIZE 16
195#define UDC_EPOUT_BUFF_SIZE 256
196
197#define UDC_EP0IN_MAX_PKT_SIZE 64
198#define UDC_EP0OUT_MAX_PKT_SIZE 64
199#define UDC_BULK_MAX_PKT_SIZE 512
200
201
202#define DMA_DIR_RX 1
203#define DMA_DIR_TX 2
204#define DMA_ADDR_INVALID (~(dma_addr_t)0)
205#define UDC_DMA_MAXPACKET 65536
206
207
208
209
210
211
212
213
214
215struct pch_udc_data_dma_desc {
216 u32 status;
217 u32 reserved;
218 u32 dataptr;
219 u32 next;
220};
221
222
223
224
225
226
227
228
229
230struct pch_udc_stp_dma_desc {
231 u32 status;
232 u32 reserved;
233 struct usb_ctrlrequest request;
234} __attribute((packed));
235
236
237
238#define PCH_UDC_BUFF_STS 0xC0000000
239#define PCH_UDC_BS_HST_RDY 0x00000000
240#define PCH_UDC_BS_DMA_BSY 0x40000000
241#define PCH_UDC_BS_DMA_DONE 0x80000000
242#define PCH_UDC_BS_HST_BSY 0xC0000000
243
244#define PCH_UDC_RXTX_STS 0x30000000
245#define PCH_UDC_RTS_SUCC 0x00000000
246#define PCH_UDC_RTS_DESERR 0x10000000
247#define PCH_UDC_RTS_BUFERR 0x30000000
248
249#define PCH_UDC_DMA_LAST 0x08000000
250
251#define PCH_UDC_RXTX_BYTES 0x0000ffff
252
253
254
255
256
257
258
259
260struct pch_udc_cfg_data {
261 u16 cur_cfg;
262 u16 cur_intf;
263 u16 cur_alt;
264};
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282struct pch_udc_ep {
283 struct usb_ep ep;
284 dma_addr_t td_stp_phys;
285 dma_addr_t td_data_phys;
286 struct pch_udc_stp_dma_desc *td_stp;
287 struct pch_udc_data_dma_desc *td_data;
288 struct pch_udc_dev *dev;
289 unsigned long offset_addr;
290 const struct usb_endpoint_descriptor *desc;
291 struct list_head queue;
292 unsigned num:5,
293 in:1,
294 halted:1;
295 unsigned long epsts;
296};
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326struct pch_udc_dev {
327 struct usb_gadget gadget;
328 struct usb_gadget_driver *driver;
329 struct pci_dev *pdev;
330 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
331 spinlock_t lock;
332 unsigned active:1,
333 stall:1,
334 prot_stall:1,
335 irq_registered:1,
336 mem_region:1,
337 registered:1,
338 suspended:1,
339 connected:1,
340 set_cfg_not_acked:1,
341 waiting_zlp_ack:1;
342 struct pci_pool *data_requests;
343 struct pci_pool *stp_requests;
344 dma_addr_t dma_addr;
345 void *ep0out_buf;
346 struct usb_ctrlrequest setup_data;
347 unsigned long phys_addr;
348 void __iomem *base_addr;
349 unsigned irq;
350 struct pch_udc_cfg_data cfg_data;
351};
352
353#define PCH_UDC_PCI_BAR 1
354#define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
355#define PCI_VENDOR_ID_ROHM 0x10DB
356#define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
357#define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
358
359static const char ep0_string[] = "ep0in";
360static DEFINE_SPINLOCK(udc_stall_spinlock);
361struct pch_udc_dev *pch_udc;
362static bool speed_fs;
363module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
364MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380struct pch_udc_request {
381 struct usb_request req;
382 dma_addr_t td_data_phys;
383 struct pch_udc_data_dma_desc *td_data;
384 struct pch_udc_data_dma_desc *td_data_last;
385 struct list_head queue;
386 unsigned dma_going:1,
387 dma_mapped:1,
388 dma_done:1;
389 unsigned chain_len;
390 void *buf;
391 dma_addr_t dma;
392};
393
394static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
395{
396 return ioread32(dev->base_addr + reg);
397}
398
399static inline void pch_udc_writel(struct pch_udc_dev *dev,
400 unsigned long val, unsigned long reg)
401{
402 iowrite32(val, dev->base_addr + reg);
403}
404
405static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
406 unsigned long reg,
407 unsigned long bitmask)
408{
409 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
410}
411
412static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
413 unsigned long reg,
414 unsigned long bitmask)
415{
416 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
417}
418
419static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
420{
421 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
422}
423
424static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
425 unsigned long val, unsigned long reg)
426{
427 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
428}
429
430static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
431 unsigned long reg,
432 unsigned long bitmask)
433{
434 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
435}
436
437static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
438 unsigned long reg,
439 unsigned long bitmask)
440{
441 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
442}
443
444
445
446
447
448static void pch_udc_csr_busy(struct pch_udc_dev *dev)
449{
450 unsigned int count = 200;
451
452
453 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
454 && --count)
455 cpu_relax();
456 if (!count)
457 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
458}
459
460
461
462
463
464
465
466static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
467 unsigned int ep)
468{
469 unsigned long reg = PCH_UDC_CSR(ep);
470
471 pch_udc_csr_busy(dev);
472 pch_udc_writel(dev, val, reg);
473 pch_udc_csr_busy(dev);
474}
475
476
477
478
479
480
481
482
483static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
484{
485 unsigned long reg = PCH_UDC_CSR(ep);
486
487 pch_udc_csr_busy(dev);
488 pch_udc_readl(dev, reg);
489 pch_udc_csr_busy(dev);
490 return pch_udc_readl(dev, reg);
491}
492
493
494
495
496
497static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
498{
499 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
500 mdelay(1);
501 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
502}
503
504
505
506
507
508
509static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
510{
511 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
512 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
513}
514
515
516
517
518
519static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
520{
521 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
522}
523
524
525
526
527
528static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
529{
530 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
531}
532
533
534
535
536
537static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
538{
539 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
540}
541
542
543
544
545
546static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
547{
548
549 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
550 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
551 mdelay(1);
552
553 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
554}
555
556
557
558
559
560
561
562
563static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
564 int is_active)
565{
566 if (is_active)
567 pch_udc_clear_disconnect(dev);
568 else
569 pch_udc_set_disconnect(dev);
570}
571
572
573
574
575
576static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
577{
578 if (ep->in) {
579 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
580 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
581 } else {
582 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
583 }
584}
585
586
587
588
589
590static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
591{
592
593 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
594
595 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
596}
597
598
599
600
601
602
603static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
604 u8 type)
605{
606 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
607 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
608}
609
610
611
612
613
614
615static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
616 u32 buf_size, u32 ep_in)
617{
618 u32 data;
619 if (ep_in) {
620 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
621 data = (data & 0xffff0000) | (buf_size & 0xffff);
622 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
623 } else {
624 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
625 data = (buf_size << 16) | (data & 0xffff);
626 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
627 }
628}
629
630
631
632
633
634
635static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
636{
637 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
638 data = (data & 0xffff0000) | (pkt_size & 0xffff);
639 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
640}
641
642
643
644
645
646
647static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
648{
649 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
650}
651
652
653
654
655
656
657static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
658{
659 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
660}
661
662
663
664
665
666static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
667{
668 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
669}
670
671
672
673
674
675static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
676{
677 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
678}
679
680
681
682
683
684static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
685{
686 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
687}
688
689
690
691
692
693
694
695
696
697static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
698{
699 if (dir == DMA_DIR_RX)
700 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
701 else if (dir == DMA_DIR_TX)
702 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
703}
704
705
706
707
708
709
710
711
712
713static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
714{
715 if (dir == DMA_DIR_RX)
716 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
717 else if (dir == DMA_DIR_TX)
718 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
719}
720
721
722
723
724
725
726static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
727{
728 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
729}
730
731
732
733
734
735
736static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
737 u32 mask)
738{
739 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
740}
741
742
743
744
745
746
747static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
748 u32 mask)
749{
750 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
751}
752
753
754
755
756
757
758static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
759 u32 mask)
760{
761 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
762}
763
764
765
766
767
768
769static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
770 u32 mask)
771{
772 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
773}
774
775
776
777
778
779
780static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
781{
782 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
783}
784
785
786
787
788
789
790static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
791 u32 val)
792{
793 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
794}
795
796
797
798
799
800
801static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
802{
803 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
804}
805
806
807
808
809
810
811static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
812 u32 val)
813{
814 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
815}
816
817
818
819
820
821
822static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
823{
824 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
825}
826
827
828
829
830
831
832static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
833{
834 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
835}
836
837
838
839
840
841
842static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
843{
844 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
845}
846
847
848
849
850
851
852static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
853{
854 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
855}
856
857
858
859
860
861
862static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
863 u32 stat)
864{
865 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
866}
867
868
869
870
871
872
873static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
874{
875 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
876}
877
878
879
880
881
882
883static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
884{
885 unsigned int loopcnt = 0;
886 struct pch_udc_dev *dev = ep->dev;
887
888 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
889 return;
890 if (!ep->in) {
891 loopcnt = 10000;
892 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
893 --loopcnt)
894 udelay(5);
895 if (!loopcnt)
896 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
897 __func__);
898 }
899 loopcnt = 10000;
900 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
901 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
902 udelay(5);
903 }
904 if (!loopcnt)
905 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
906 __func__, ep->num, (ep->in ? "in" : "out"));
907}
908
909
910
911
912
913
914
915
916static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
917{
918 if (dir) {
919 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
920 return;
921 }
922}
923
924
925
926
927
928
929static void pch_udc_ep_enable(struct pch_udc_ep *ep,
930 struct pch_udc_cfg_data *cfg,
931 const struct usb_endpoint_descriptor *desc)
932{
933 u32 val = 0;
934 u32 buff_size = 0;
935
936 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
937 if (ep->in)
938 buff_size = UDC_EPIN_BUFF_SIZE;
939 else
940 buff_size = UDC_EPOUT_BUFF_SIZE;
941 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
942 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
943 pch_udc_ep_set_nak(ep);
944 pch_udc_ep_fifo_flush(ep, ep->in);
945
946 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
947 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
948 UDC_CSR_NE_TYPE_SHIFT) |
949 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
950 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
951 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
952 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
953
954 if (ep->in)
955 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
956 else
957 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
958}
959
960
961
962
963
964static void pch_udc_ep_disable(struct pch_udc_ep *ep)
965{
966 if (ep->in) {
967
968 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
969
970 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
971 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
972 } else {
973
974 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
975 }
976
977 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
978}
979
980
981
982
983
984static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
985{
986 unsigned int count = 10000;
987
988
989 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
990 udelay(5);
991 if (!count)
992 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
993}
994
995
996
997
998
999static void pch_udc_init(struct pch_udc_dev *dev)
1000{
1001 if (NULL == dev) {
1002 pr_err("%s: Invalid address\n", __func__);
1003 return;
1004 }
1005
1006 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1007 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1008 mdelay(1);
1009 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1010 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1011 mdelay(1);
1012
1013 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1014 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1015
1016
1017 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1018 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1019
1020
1021 if (speed_fs)
1022 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1023 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1024 else
1025 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1026 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1027 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1028 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1029 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1030 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1031 UDC_DEVCTL_THE);
1032}
1033
1034
1035
1036
1037
1038static void pch_udc_exit(struct pch_udc_dev *dev)
1039{
1040
1041 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1042
1043 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1044
1045 pch_udc_set_disconnect(dev);
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1057{
1058 struct pch_udc_dev *dev;
1059
1060 if (!gadget)
1061 return -EINVAL;
1062 dev = container_of(gadget, struct pch_udc_dev, gadget);
1063 return pch_udc_get_frame(dev);
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1075{
1076 struct pch_udc_dev *dev;
1077 unsigned long flags;
1078
1079 if (!gadget)
1080 return -EINVAL;
1081 dev = container_of(gadget, struct pch_udc_dev, gadget);
1082 spin_lock_irqsave(&dev->lock, flags);
1083 pch_udc_rmt_wakeup(dev);
1084 spin_unlock_irqrestore(&dev->lock, flags);
1085 return 0;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1099{
1100 struct pch_udc_dev *dev;
1101
1102 if (!gadget)
1103 return -EINVAL;
1104 dev = container_of(gadget, struct pch_udc_dev, gadget);
1105 if (value)
1106 pch_udc_set_selfpowered(dev);
1107 else
1108 pch_udc_clear_selfpowered(dev);
1109 return 0;
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1123{
1124 struct pch_udc_dev *dev;
1125
1126 if (!gadget)
1127 return -EINVAL;
1128 dev = container_of(gadget, struct pch_udc_dev, gadget);
1129 pch_udc_vbus_session(dev, is_on);
1130 return 0;
1131}
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1145{
1146 struct pch_udc_dev *dev;
1147
1148 if (!gadget)
1149 return -EINVAL;
1150 dev = container_of(gadget, struct pch_udc_dev, gadget);
1151 pch_udc_vbus_session(dev, is_active);
1152 return 0;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1167{
1168 return -EOPNOTSUPP;
1169}
1170
1171static int pch_udc_start(struct usb_gadget_driver *driver,
1172 int (*bind)(struct usb_gadget *));
1173static int pch_udc_stop(struct usb_gadget_driver *driver);
1174static const struct usb_gadget_ops pch_udc_ops = {
1175 .get_frame = pch_udc_pcd_get_frame,
1176 .wakeup = pch_udc_pcd_wakeup,
1177 .set_selfpowered = pch_udc_pcd_selfpowered,
1178 .pullup = pch_udc_pcd_pullup,
1179 .vbus_session = pch_udc_pcd_vbus_session,
1180 .vbus_draw = pch_udc_pcd_vbus_draw,
1181 .start = pch_udc_start,
1182 .stop = pch_udc_stop,
1183};
1184
1185
1186
1187
1188
1189
1190
1191
1192static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1193 int status)
1194{
1195 struct pch_udc_dev *dev;
1196 unsigned halted = ep->halted;
1197
1198 list_del_init(&req->queue);
1199
1200
1201 if (req->req.status == -EINPROGRESS)
1202 req->req.status = status;
1203 else
1204 status = req->req.status;
1205
1206 dev = ep->dev;
1207 if (req->dma_mapped) {
1208 if (req->dma == DMA_ADDR_INVALID) {
1209 if (ep->in)
1210 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1211 req->req.length,
1212 DMA_TO_DEVICE);
1213 else
1214 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1215 req->req.length,
1216 DMA_FROM_DEVICE);
1217 req->req.dma = DMA_ADDR_INVALID;
1218 } else {
1219 if (ep->in)
1220 dma_unmap_single(&dev->pdev->dev, req->dma,
1221 req->req.length,
1222 DMA_TO_DEVICE);
1223 else {
1224 dma_unmap_single(&dev->pdev->dev, req->dma,
1225 req->req.length,
1226 DMA_FROM_DEVICE);
1227 memcpy(req->req.buf, req->buf, req->req.length);
1228 }
1229 kfree(req->buf);
1230 req->dma = DMA_ADDR_INVALID;
1231 }
1232 req->dma_mapped = 0;
1233 }
1234 ep->halted = 1;
1235 spin_unlock(&dev->lock);
1236 if (!ep->in)
1237 pch_udc_ep_clear_rrdy(ep);
1238 req->req.complete(&ep->ep, &req->req);
1239 spin_lock(&dev->lock);
1240 ep->halted = halted;
1241}
1242
1243
1244
1245
1246
1247static void empty_req_queue(struct pch_udc_ep *ep)
1248{
1249 struct pch_udc_request *req;
1250
1251 ep->halted = 1;
1252 while (!list_empty(&ep->queue)) {
1253 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1254 complete_req(ep, req, -ESHUTDOWN);
1255 }
1256}
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1268 struct pch_udc_request *req)
1269{
1270 struct pch_udc_data_dma_desc *td = req->td_data;
1271 unsigned i = req->chain_len;
1272
1273 dma_addr_t addr2;
1274 dma_addr_t addr = (dma_addr_t)td->next;
1275 td->next = 0x00;
1276 for (; i > 1; --i) {
1277
1278 td = phys_to_virt(addr);
1279 addr2 = (dma_addr_t)td->next;
1280 pci_pool_free(dev->data_requests, td, addr);
1281 td->next = 0x00;
1282 addr = addr2;
1283 }
1284 req->chain_len = 1;
1285}
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1300 struct pch_udc_request *req,
1301 unsigned long buf_len,
1302 gfp_t gfp_flags)
1303{
1304 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1305 unsigned long bytes = req->req.length, i = 0;
1306 dma_addr_t dma_addr;
1307 unsigned len = 1;
1308
1309 if (req->chain_len > 1)
1310 pch_udc_free_dma_chain(ep->dev, req);
1311
1312 if (req->dma == DMA_ADDR_INVALID)
1313 td->dataptr = req->req.dma;
1314 else
1315 td->dataptr = req->dma;
1316
1317 td->status = PCH_UDC_BS_HST_BSY;
1318 for (; ; bytes -= buf_len, ++len) {
1319 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1320 if (bytes <= buf_len)
1321 break;
1322 last = td;
1323 td = pci_pool_alloc(ep->dev->data_requests, gfp_flags,
1324 &dma_addr);
1325 if (!td)
1326 goto nomem;
1327 i += buf_len;
1328 td->dataptr = req->td_data->dataptr + i;
1329 last->next = dma_addr;
1330 }
1331
1332 req->td_data_last = td;
1333 td->status |= PCH_UDC_DMA_LAST;
1334 td->next = req->td_data_phys;
1335 req->chain_len = len;
1336 return 0;
1337
1338nomem:
1339 if (len > 1) {
1340 req->chain_len = len;
1341 pch_udc_free_dma_chain(ep->dev, req);
1342 }
1343 req->chain_len = 1;
1344 return -ENOMEM;
1345}
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1359 gfp_t gfp)
1360{
1361 int retval;
1362
1363
1364 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1365 if (retval) {
1366 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1367 return retval;
1368 }
1369 if (ep->in)
1370 req->td_data->status = (req->td_data->status &
1371 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1372 return 0;
1373}
1374
1375
1376
1377
1378
1379
1380
1381static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1382{
1383 struct pch_udc_dev *dev = ep->dev;
1384
1385
1386 complete_req(ep, req, 0);
1387
1388
1389
1390
1391 if (dev->set_cfg_not_acked) {
1392 pch_udc_set_csr_done(dev);
1393 dev->set_cfg_not_acked = 0;
1394 }
1395
1396 if (!dev->stall && dev->waiting_zlp_ack) {
1397 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1398 dev->waiting_zlp_ack = 0;
1399 }
1400}
1401
1402
1403
1404
1405
1406
1407static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1408 struct pch_udc_request *req)
1409{
1410 struct pch_udc_data_dma_desc *td_data;
1411
1412 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1413 td_data = req->td_data;
1414
1415 while (1) {
1416 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1417 PCH_UDC_BS_HST_RDY;
1418 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1419 break;
1420 td_data = phys_to_virt(td_data->next);
1421 }
1422
1423 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1424 req->dma_going = 1;
1425 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1426 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1427 pch_udc_ep_clear_nak(ep);
1428 pch_udc_ep_set_rrdy(ep);
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1443 const struct usb_endpoint_descriptor *desc)
1444{
1445 struct pch_udc_ep *ep;
1446 struct pch_udc_dev *dev;
1447 unsigned long iflags;
1448
1449 if (!usbep || (usbep->name == ep0_string) || !desc ||
1450 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1451 return -EINVAL;
1452
1453 ep = container_of(usbep, struct pch_udc_ep, ep);
1454 dev = ep->dev;
1455 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1456 return -ESHUTDOWN;
1457 spin_lock_irqsave(&dev->lock, iflags);
1458 ep->desc = desc;
1459 ep->halted = 0;
1460 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1461 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1462 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1463 spin_unlock_irqrestore(&dev->lock, iflags);
1464 return 0;
1465}
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1477{
1478 struct pch_udc_ep *ep;
1479 struct pch_udc_dev *dev;
1480 unsigned long iflags;
1481
1482 if (!usbep)
1483 return -EINVAL;
1484
1485 ep = container_of(usbep, struct pch_udc_ep, ep);
1486 dev = ep->dev;
1487 if ((usbep->name == ep0_string) || !ep->desc)
1488 return -EINVAL;
1489
1490 spin_lock_irqsave(&ep->dev->lock, iflags);
1491 empty_req_queue(ep);
1492 ep->halted = 1;
1493 pch_udc_ep_disable(ep);
1494 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1495 ep->desc = NULL;
1496 ep->ep.desc = NULL;
1497 INIT_LIST_HEAD(&ep->queue);
1498 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1499 return 0;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1513 gfp_t gfp)
1514{
1515 struct pch_udc_request *req;
1516 struct pch_udc_ep *ep;
1517 struct pch_udc_data_dma_desc *dma_desc;
1518 struct pch_udc_dev *dev;
1519
1520 if (!usbep)
1521 return NULL;
1522 ep = container_of(usbep, struct pch_udc_ep, ep);
1523 dev = ep->dev;
1524 req = kzalloc(sizeof *req, gfp);
1525 if (!req)
1526 return NULL;
1527 req->req.dma = DMA_ADDR_INVALID;
1528 req->dma = DMA_ADDR_INVALID;
1529 INIT_LIST_HEAD(&req->queue);
1530 if (!ep->dev->dma_addr)
1531 return &req->req;
1532
1533 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
1534 &req->td_data_phys);
1535 if (NULL == dma_desc) {
1536 kfree(req);
1537 return NULL;
1538 }
1539
1540 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1541 dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
1542 req->td_data = dma_desc;
1543 req->td_data_last = dma_desc;
1544 req->chain_len = 1;
1545 return &req->req;
1546}
1547
1548
1549
1550
1551
1552
1553
1554static void pch_udc_free_request(struct usb_ep *usbep,
1555 struct usb_request *usbreq)
1556{
1557 struct pch_udc_ep *ep;
1558 struct pch_udc_request *req;
1559 struct pch_udc_dev *dev;
1560
1561 if (!usbep || !usbreq)
1562 return;
1563 ep = container_of(usbep, struct pch_udc_ep, ep);
1564 req = container_of(usbreq, struct pch_udc_request, req);
1565 dev = ep->dev;
1566 if (!list_empty(&req->queue))
1567 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1568 __func__, usbep->name, req);
1569 if (req->td_data != NULL) {
1570 if (req->chain_len > 1)
1571 pch_udc_free_dma_chain(ep->dev, req);
1572 pci_pool_free(ep->dev->data_requests, req->td_data,
1573 req->td_data_phys);
1574 }
1575 kfree(req);
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1590 gfp_t gfp)
1591{
1592 int retval = 0;
1593 struct pch_udc_ep *ep;
1594 struct pch_udc_dev *dev;
1595 struct pch_udc_request *req;
1596 unsigned long iflags;
1597
1598 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1599 return -EINVAL;
1600 ep = container_of(usbep, struct pch_udc_ep, ep);
1601 dev = ep->dev;
1602 if (!ep->desc && ep->num)
1603 return -EINVAL;
1604 req = container_of(usbreq, struct pch_udc_request, req);
1605 if (!list_empty(&req->queue))
1606 return -EINVAL;
1607 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1608 return -ESHUTDOWN;
1609 spin_lock_irqsave(&dev->lock, iflags);
1610
1611 if (usbreq->length &&
1612 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1613 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1614 if (ep->in)
1615 usbreq->dma = dma_map_single(&dev->pdev->dev,
1616 usbreq->buf,
1617 usbreq->length,
1618 DMA_TO_DEVICE);
1619 else
1620 usbreq->dma = dma_map_single(&dev->pdev->dev,
1621 usbreq->buf,
1622 usbreq->length,
1623 DMA_FROM_DEVICE);
1624 } else {
1625 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1626 if (!req->buf) {
1627 retval = -ENOMEM;
1628 goto probe_end;
1629 }
1630 if (ep->in) {
1631 memcpy(req->buf, usbreq->buf, usbreq->length);
1632 req->dma = dma_map_single(&dev->pdev->dev,
1633 req->buf,
1634 usbreq->length,
1635 DMA_TO_DEVICE);
1636 } else
1637 req->dma = dma_map_single(&dev->pdev->dev,
1638 req->buf,
1639 usbreq->length,
1640 DMA_FROM_DEVICE);
1641 }
1642 req->dma_mapped = 1;
1643 }
1644 if (usbreq->length > 0) {
1645 retval = prepare_dma(ep, req, GFP_ATOMIC);
1646 if (retval)
1647 goto probe_end;
1648 }
1649 usbreq->actual = 0;
1650 usbreq->status = -EINPROGRESS;
1651 req->dma_done = 0;
1652 if (list_empty(&ep->queue) && !ep->halted) {
1653
1654 if (!usbreq->length) {
1655 process_zlp(ep, req);
1656 retval = 0;
1657 goto probe_end;
1658 }
1659 if (!ep->in) {
1660 pch_udc_start_rxrequest(ep, req);
1661 } else {
1662
1663
1664
1665
1666
1667 pch_udc_wait_ep_stall(ep);
1668 pch_udc_ep_clear_nak(ep);
1669 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1670 }
1671 }
1672
1673 if (req != NULL)
1674 list_add_tail(&req->queue, &ep->queue);
1675
1676probe_end:
1677 spin_unlock_irqrestore(&dev->lock, iflags);
1678 return retval;
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1692 struct usb_request *usbreq)
1693{
1694 struct pch_udc_ep *ep;
1695 struct pch_udc_request *req;
1696 struct pch_udc_dev *dev;
1697 unsigned long flags;
1698 int ret = -EINVAL;
1699
1700 ep = container_of(usbep, struct pch_udc_ep, ep);
1701 dev = ep->dev;
1702 if (!usbep || !usbreq || (!ep->desc && ep->num))
1703 return ret;
1704 req = container_of(usbreq, struct pch_udc_request, req);
1705 spin_lock_irqsave(&ep->dev->lock, flags);
1706
1707 list_for_each_entry(req, &ep->queue, queue) {
1708 if (&req->req == usbreq) {
1709 pch_udc_ep_set_nak(ep);
1710 if (!list_empty(&req->queue))
1711 complete_req(ep, req, -ECONNRESET);
1712 ret = 0;
1713 break;
1714 }
1715 }
1716 spin_unlock_irqrestore(&ep->dev->lock, flags);
1717 return ret;
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
1731{
1732 struct pch_udc_ep *ep;
1733 struct pch_udc_dev *dev;
1734 unsigned long iflags;
1735 int ret;
1736
1737 if (!usbep)
1738 return -EINVAL;
1739 ep = container_of(usbep, struct pch_udc_ep, ep);
1740 dev = ep->dev;
1741 if (!ep->desc && !ep->num)
1742 return -EINVAL;
1743 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1744 return -ESHUTDOWN;
1745 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1746 if (list_empty(&ep->queue)) {
1747 if (halt) {
1748 if (ep->num == PCH_UDC_EP0)
1749 ep->dev->stall = 1;
1750 pch_udc_ep_set_stall(ep);
1751 pch_udc_enable_ep_interrupts(ep->dev,
1752 PCH_UDC_EPINT(ep->in,
1753 ep->num));
1754 } else {
1755 pch_udc_ep_clear_stall(ep);
1756 }
1757 ret = 0;
1758 } else {
1759 ret = -EAGAIN;
1760 }
1761 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1762 return ret;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
1776{
1777 struct pch_udc_ep *ep;
1778 struct pch_udc_dev *dev;
1779 unsigned long iflags;
1780 int ret;
1781
1782 if (!usbep)
1783 return -EINVAL;
1784 ep = container_of(usbep, struct pch_udc_ep, ep);
1785 dev = ep->dev;
1786 if (!ep->desc && !ep->num)
1787 return -EINVAL;
1788 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
1789 return -ESHUTDOWN;
1790 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1791 if (!list_empty(&ep->queue)) {
1792 ret = -EAGAIN;
1793 } else {
1794 if (ep->num == PCH_UDC_EP0)
1795 ep->dev->stall = 1;
1796 pch_udc_ep_set_stall(ep);
1797 pch_udc_enable_ep_interrupts(ep->dev,
1798 PCH_UDC_EPINT(ep->in, ep->num));
1799 ep->dev->prot_stall = 1;
1800 ret = 0;
1801 }
1802 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1803 return ret;
1804}
1805
1806
1807
1808
1809
1810static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
1811{
1812 struct pch_udc_ep *ep;
1813
1814 if (!usbep)
1815 return;
1816
1817 ep = container_of(usbep, struct pch_udc_ep, ep);
1818 if (ep->desc || !ep->num)
1819 pch_udc_ep_fifo_flush(ep, ep->in);
1820}
1821
1822static const struct usb_ep_ops pch_udc_ep_ops = {
1823 .enable = pch_udc_pcd_ep_enable,
1824 .disable = pch_udc_pcd_ep_disable,
1825 .alloc_request = pch_udc_alloc_request,
1826 .free_request = pch_udc_free_request,
1827 .queue = pch_udc_pcd_queue,
1828 .dequeue = pch_udc_pcd_dequeue,
1829 .set_halt = pch_udc_pcd_set_halt,
1830 .set_wedge = pch_udc_pcd_set_wedge,
1831 .fifo_status = NULL,
1832 .fifo_flush = pch_udc_pcd_fifo_flush,
1833};
1834
1835
1836
1837
1838
1839static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
1840{
1841 static u32 pky_marker;
1842
1843 if (!td_stp)
1844 return;
1845 td_stp->reserved = ++pky_marker;
1846 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
1847 td_stp->status = PCH_UDC_BS_HST_RDY;
1848}
1849
1850
1851
1852
1853
1854
1855static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
1856{
1857 struct pch_udc_request *req;
1858 struct pch_udc_data_dma_desc *td_data;
1859
1860 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
1861 return;
1862
1863 if (list_empty(&ep->queue))
1864 return;
1865
1866
1867 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1868 if (req->dma_going)
1869 return;
1870 if (!req->td_data)
1871 return;
1872 pch_udc_wait_ep_stall(ep);
1873 req->dma_going = 1;
1874 pch_udc_ep_set_ddptr(ep, 0);
1875 td_data = req->td_data;
1876 while (1) {
1877 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1878 PCH_UDC_BS_HST_RDY;
1879 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1880 break;
1881 td_data = phys_to_virt(td_data->next);
1882 }
1883 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1884 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
1885 pch_udc_ep_set_pd(ep);
1886 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1887 pch_udc_ep_clear_nak(ep);
1888}
1889
1890
1891
1892
1893
1894static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
1895{
1896 struct pch_udc_request *req;
1897 struct pch_udc_dev *dev = ep->dev;
1898
1899 if (list_empty(&ep->queue))
1900 return;
1901 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1902 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
1903 PCH_UDC_BS_DMA_DONE)
1904 return;
1905 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
1906 PCH_UDC_RTS_SUCC) {
1907 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
1908 "epstatus=0x%08x\n",
1909 (req->td_data_last->status & PCH_UDC_RXTX_STS),
1910 (int)(ep->epsts));
1911 return;
1912 }
1913
1914 req->req.actual = req->req.length;
1915 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
1916 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
1917 complete_req(ep, req, 0);
1918 req->dma_going = 0;
1919 if (!list_empty(&ep->queue)) {
1920 pch_udc_wait_ep_stall(ep);
1921 pch_udc_ep_clear_nak(ep);
1922 pch_udc_enable_ep_interrupts(ep->dev,
1923 PCH_UDC_EPINT(ep->in, ep->num));
1924 } else {
1925 pch_udc_disable_ep_interrupts(ep->dev,
1926 PCH_UDC_EPINT(ep->in, ep->num));
1927 }
1928}
1929
1930
1931
1932
1933
1934static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
1935{
1936 struct pch_udc_request *req;
1937 struct pch_udc_dev *dev = ep->dev;
1938 unsigned int count;
1939 struct pch_udc_data_dma_desc *td;
1940 dma_addr_t addr;
1941
1942 if (list_empty(&ep->queue))
1943 return;
1944
1945 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1946 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1947 pch_udc_ep_set_ddptr(ep, 0);
1948 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
1949 PCH_UDC_BS_DMA_DONE)
1950 td = req->td_data_last;
1951 else
1952 td = req->td_data;
1953
1954 while (1) {
1955 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
1956 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
1957 "epstatus=0x%08x\n",
1958 (req->td_data->status & PCH_UDC_RXTX_STS),
1959 (int)(ep->epsts));
1960 return;
1961 }
1962 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
1963 if (td->status | PCH_UDC_DMA_LAST) {
1964 count = td->status & PCH_UDC_RXTX_BYTES;
1965 break;
1966 }
1967 if (td == req->td_data_last) {
1968 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
1969 return;
1970 }
1971 addr = (dma_addr_t)td->next;
1972 td = phys_to_virt(addr);
1973 }
1974
1975 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
1976 count = UDC_DMA_MAXPACKET;
1977 req->td_data->status |= PCH_UDC_DMA_LAST;
1978 td->status |= PCH_UDC_BS_HST_BSY;
1979
1980 req->dma_going = 0;
1981 req->req.actual = count;
1982 complete_req(ep, req, 0);
1983
1984 if (!list_empty(&ep->queue)) {
1985 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1986 pch_udc_start_rxrequest(ep, req);
1987 }
1988}
1989
1990
1991
1992
1993
1994
1995
1996static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
1997{
1998 u32 epsts;
1999 struct pch_udc_ep *ep;
2000
2001 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2002 epsts = ep->epsts;
2003 ep->epsts = 0;
2004
2005 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2006 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2007 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2008 return;
2009 if ((epsts & UDC_EPSTS_BNA))
2010 return;
2011 if (epsts & UDC_EPSTS_HE)
2012 return;
2013 if (epsts & UDC_EPSTS_RSS) {
2014 pch_udc_ep_set_stall(ep);
2015 pch_udc_enable_ep_interrupts(ep->dev,
2016 PCH_UDC_EPINT(ep->in, ep->num));
2017 }
2018 if (epsts & UDC_EPSTS_RCS) {
2019 if (!dev->prot_stall) {
2020 pch_udc_ep_clear_stall(ep);
2021 } else {
2022 pch_udc_ep_set_stall(ep);
2023 pch_udc_enable_ep_interrupts(ep->dev,
2024 PCH_UDC_EPINT(ep->in, ep->num));
2025 }
2026 }
2027 if (epsts & UDC_EPSTS_TDC)
2028 pch_udc_complete_transfer(ep);
2029
2030 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2031 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2032 pch_udc_start_next_txrequest(ep);
2033}
2034
2035
2036
2037
2038
2039
2040static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2041{
2042 u32 epsts;
2043 struct pch_udc_ep *ep;
2044 struct pch_udc_request *req = NULL;
2045
2046 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2047 epsts = ep->epsts;
2048 ep->epsts = 0;
2049
2050 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2051
2052 req = list_entry(ep->queue.next, struct pch_udc_request,
2053 queue);
2054 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2055 PCH_UDC_BS_DMA_DONE) {
2056 if (!req->dma_going)
2057 pch_udc_start_rxrequest(ep, req);
2058 return;
2059 }
2060 }
2061 if (epsts & UDC_EPSTS_HE)
2062 return;
2063 if (epsts & UDC_EPSTS_RSS) {
2064 pch_udc_ep_set_stall(ep);
2065 pch_udc_enable_ep_interrupts(ep->dev,
2066 PCH_UDC_EPINT(ep->in, ep->num));
2067 }
2068 if (epsts & UDC_EPSTS_RCS) {
2069 if (!dev->prot_stall) {
2070 pch_udc_ep_clear_stall(ep);
2071 } else {
2072 pch_udc_ep_set_stall(ep);
2073 pch_udc_enable_ep_interrupts(ep->dev,
2074 PCH_UDC_EPINT(ep->in, ep->num));
2075 }
2076 }
2077 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2078 UDC_EPSTS_OUT_DATA) {
2079 if (ep->dev->prot_stall == 1) {
2080 pch_udc_ep_set_stall(ep);
2081 pch_udc_enable_ep_interrupts(ep->dev,
2082 PCH_UDC_EPINT(ep->in, ep->num));
2083 } else {
2084 pch_udc_complete_receiver(ep);
2085 }
2086 }
2087 if (list_empty(&ep->queue))
2088 pch_udc_set_dma(dev, DMA_DIR_RX);
2089}
2090
2091
2092
2093
2094
2095static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2096{
2097 u32 epsts;
2098 struct pch_udc_ep *ep;
2099 struct pch_udc_ep *ep_out;
2100
2101 ep = &dev->ep[UDC_EP0IN_IDX];
2102 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2103 epsts = ep->epsts;
2104 ep->epsts = 0;
2105
2106 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2107 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2108 UDC_EPSTS_XFERDONE)))
2109 return;
2110 if ((epsts & UDC_EPSTS_BNA))
2111 return;
2112 if (epsts & UDC_EPSTS_HE)
2113 return;
2114 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2115 pch_udc_complete_transfer(ep);
2116 pch_udc_clear_dma(dev, DMA_DIR_RX);
2117 ep_out->td_data->status = (ep_out->td_data->status &
2118 ~PCH_UDC_BUFF_STS) |
2119 PCH_UDC_BS_HST_RDY;
2120 pch_udc_ep_clear_nak(ep_out);
2121 pch_udc_set_dma(dev, DMA_DIR_RX);
2122 pch_udc_ep_set_rrdy(ep_out);
2123 }
2124
2125 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2126 !(epsts & UDC_EPSTS_TXEMPTY))
2127 pch_udc_start_next_txrequest(ep);
2128}
2129
2130
2131
2132
2133
2134
2135static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2136{
2137 u32 stat;
2138 int setup_supported;
2139 struct pch_udc_ep *ep;
2140
2141 ep = &dev->ep[UDC_EP0OUT_IDX];
2142 stat = ep->epsts;
2143 ep->epsts = 0;
2144
2145
2146 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2147 UDC_EPSTS_OUT_SETUP) {
2148 dev->stall = 0;
2149 dev->ep[UDC_EP0IN_IDX].halted = 0;
2150 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2151 dev->setup_data = ep->td_stp->request;
2152 pch_udc_init_setup_buff(ep->td_stp);
2153 pch_udc_clear_dma(dev, DMA_DIR_RX);
2154 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2155 dev->ep[UDC_EP0IN_IDX].in);
2156 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2157 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2158 else
2159 dev->gadget.ep0 = &ep->ep;
2160 spin_unlock(&dev->lock);
2161
2162 if ((dev->setup_data.bRequestType == 0x21) &&
2163 (dev->setup_data.bRequest == 0xFF))
2164 dev->prot_stall = 0;
2165
2166 setup_supported = dev->driver->setup(&dev->gadget,
2167 &dev->setup_data);
2168 spin_lock(&dev->lock);
2169
2170 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2171 ep->td_data->status = (ep->td_data->status &
2172 ~PCH_UDC_BUFF_STS) |
2173 PCH_UDC_BS_HST_RDY;
2174 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2175 }
2176
2177 if (setup_supported >= 0 && setup_supported <
2178 UDC_EP0IN_MAX_PKT_SIZE) {
2179 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2180
2181
2182 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2183 pch_udc_set_dma(dev, DMA_DIR_RX);
2184 pch_udc_ep_clear_nak(ep);
2185 }
2186 } else if (setup_supported < 0) {
2187
2188 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2189 pch_udc_enable_ep_interrupts(ep->dev,
2190 PCH_UDC_EPINT(ep->in, ep->num));
2191 dev->stall = 0;
2192 pch_udc_set_dma(dev, DMA_DIR_RX);
2193 } else {
2194 dev->waiting_zlp_ack = 1;
2195 }
2196 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2197 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2198 pch_udc_clear_dma(dev, DMA_DIR_RX);
2199 pch_udc_ep_set_ddptr(ep, 0);
2200 if (!list_empty(&ep->queue)) {
2201 ep->epsts = stat;
2202 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2203 }
2204 pch_udc_set_dma(dev, DMA_DIR_RX);
2205 }
2206 pch_udc_ep_set_rrdy(ep);
2207}
2208
2209
2210
2211
2212
2213
2214
2215
2216static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2217{
2218 struct pch_udc_ep *ep;
2219 struct pch_udc_request *req;
2220
2221 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2222 if (!list_empty(&ep->queue)) {
2223 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2224 pch_udc_enable_ep_interrupts(ep->dev,
2225 PCH_UDC_EPINT(ep->in, ep->num));
2226 pch_udc_ep_clear_nak(ep);
2227 }
2228}
2229
2230
2231
2232
2233
2234
2235static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2236{
2237 int i;
2238 struct pch_udc_ep *ep;
2239
2240 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2241
2242 if (ep_intr & (0x1 << i)) {
2243 ep = &dev->ep[UDC_EPIN_IDX(i)];
2244 ep->epsts = pch_udc_read_ep_status(ep);
2245 pch_udc_clear_ep_status(ep, ep->epsts);
2246 }
2247
2248 if (ep_intr & (0x10000 << i)) {
2249 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2250 ep->epsts = pch_udc_read_ep_status(ep);
2251 pch_udc_clear_ep_status(ep, ep->epsts);
2252 }
2253 }
2254}
2255
2256
2257
2258
2259
2260
2261static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2262{
2263 struct pch_udc_ep *ep;
2264 u32 val;
2265
2266
2267 ep = &dev->ep[UDC_EP0IN_IDX];
2268 pch_udc_clear_ep_control(ep);
2269 pch_udc_ep_fifo_flush(ep, ep->in);
2270 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2271 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2272
2273 ep->td_data = NULL;
2274 ep->td_stp = NULL;
2275 ep->td_data_phys = 0;
2276 ep->td_stp_phys = 0;
2277
2278
2279 ep = &dev->ep[UDC_EP0OUT_IDX];
2280 pch_udc_clear_ep_control(ep);
2281 pch_udc_ep_fifo_flush(ep, ep->in);
2282 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2283 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2284 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2285 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2286
2287
2288 pch_udc_init_setup_buff(ep->td_stp);
2289
2290 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2291
2292 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2293
2294
2295 ep->td_data->status = PCH_UDC_DMA_LAST;
2296 ep->td_data->dataptr = dev->dma_addr;
2297 ep->td_data->next = ep->td_data_phys;
2298
2299 pch_udc_ep_clear_nak(ep);
2300}
2301
2302
2303
2304
2305
2306
2307static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2308{
2309 struct pch_udc_ep *ep;
2310 int i;
2311
2312 pch_udc_clear_dma(dev, DMA_DIR_TX);
2313 pch_udc_clear_dma(dev, DMA_DIR_RX);
2314
2315 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2316
2317 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2318
2319 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2320 ep = &dev->ep[i];
2321 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2322 pch_udc_clear_ep_control(ep);
2323 pch_udc_ep_set_ddptr(ep, 0);
2324 pch_udc_write_csr(ep->dev, 0x00, i);
2325 }
2326 dev->stall = 0;
2327 dev->prot_stall = 0;
2328 dev->waiting_zlp_ack = 0;
2329 dev->set_cfg_not_acked = 0;
2330
2331
2332 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2333 ep = &dev->ep[i];
2334 pch_udc_ep_set_nak(ep);
2335 pch_udc_ep_fifo_flush(ep, ep->in);
2336
2337 empty_req_queue(ep);
2338 }
2339 if (dev->driver && dev->driver->disconnect)
2340 dev->driver->disconnect(&dev->gadget);
2341}
2342
2343
2344
2345
2346
2347
2348static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2349{
2350 u32 dev_stat, dev_speed;
2351 u32 speed = USB_SPEED_FULL;
2352
2353 dev_stat = pch_udc_read_device_status(dev);
2354 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2355 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2356 switch (dev_speed) {
2357 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2358 speed = USB_SPEED_HIGH;
2359 break;
2360 case UDC_DEVSTS_ENUM_SPEED_FULL:
2361 speed = USB_SPEED_FULL;
2362 break;
2363 case UDC_DEVSTS_ENUM_SPEED_LOW:
2364 speed = USB_SPEED_LOW;
2365 break;
2366 default:
2367 BUG();
2368 }
2369 dev->gadget.speed = speed;
2370 pch_udc_activate_control_ep(dev);
2371 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2372 pch_udc_set_dma(dev, DMA_DIR_TX);
2373 pch_udc_set_dma(dev, DMA_DIR_RX);
2374 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2375}
2376
2377
2378
2379
2380
2381
2382static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2383{
2384 u32 reg, dev_stat = 0;
2385 int i, ret;
2386
2387 dev_stat = pch_udc_read_device_status(dev);
2388 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2389 UDC_DEVSTS_INTF_SHIFT;
2390 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2391 UDC_DEVSTS_ALT_SHIFT;
2392 dev->set_cfg_not_acked = 1;
2393
2394 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2395 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2396 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2397 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2398 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2399
2400
2401 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2402 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2403 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2404 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2405 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2406 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2407 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2408
2409 pch_udc_ep_clear_stall(&(dev->ep[i]));
2410 dev->ep[i].halted = 0;
2411 }
2412 dev->stall = 0;
2413 spin_unlock(&dev->lock);
2414 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2415 spin_lock(&dev->lock);
2416}
2417
2418
2419
2420
2421
2422
2423static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2424{
2425 int i, ret;
2426 u32 reg, dev_stat = 0;
2427
2428 dev_stat = pch_udc_read_device_status(dev);
2429 dev->set_cfg_not_acked = 1;
2430 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2431 UDC_DEVSTS_CFG_SHIFT;
2432
2433 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2434 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2435 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2436
2437
2438 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2439 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2440 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2441 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2442 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2443
2444 pch_udc_ep_clear_stall(&(dev->ep[i]));
2445 dev->ep[i].halted = 0;
2446 }
2447 dev->stall = 0;
2448
2449
2450 spin_unlock(&dev->lock);
2451 ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
2452 spin_lock(&dev->lock);
2453}
2454
2455
2456
2457
2458
2459
2460
2461static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2462{
2463
2464 if (dev_intr & UDC_DEVINT_UR)
2465 pch_udc_svc_ur_interrupt(dev);
2466
2467 if (dev_intr & UDC_DEVINT_ENUM)
2468 pch_udc_svc_enum_interrupt(dev);
2469
2470 if (dev_intr & UDC_DEVINT_SI)
2471 pch_udc_svc_intf_interrupt(dev);
2472
2473 if (dev_intr & UDC_DEVINT_SC)
2474 pch_udc_svc_cfg_interrupt(dev);
2475
2476 if (dev_intr & UDC_DEVINT_US)
2477 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2478
2479 if (dev_intr & UDC_DEVINT_SOF)
2480 dev_dbg(&dev->pdev->dev, "SOF\n");
2481
2482 if (dev_intr & UDC_DEVINT_ES)
2483 dev_dbg(&dev->pdev->dev, "ES\n");
2484
2485 if (dev_intr & UDC_DEVINT_RWKP)
2486 dev_dbg(&dev->pdev->dev, "RWKP\n");
2487}
2488
2489
2490
2491
2492
2493
2494static irqreturn_t pch_udc_isr(int irq, void *pdev)
2495{
2496 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2497 u32 dev_intr, ep_intr;
2498 int i;
2499
2500 dev_intr = pch_udc_read_device_interrupts(dev);
2501 ep_intr = pch_udc_read_ep_interrupts(dev);
2502
2503 if (dev_intr)
2504
2505 pch_udc_write_device_interrupts(dev, dev_intr);
2506 if (ep_intr)
2507
2508 pch_udc_write_ep_interrupts(dev, ep_intr);
2509 if (!dev_intr && !ep_intr)
2510 return IRQ_NONE;
2511 spin_lock(&dev->lock);
2512 if (dev_intr)
2513 pch_udc_dev_isr(dev, dev_intr);
2514 if (ep_intr) {
2515 pch_udc_read_all_epstatus(dev, ep_intr);
2516
2517 if (ep_intr & UDC_EPINT_IN_EP0) {
2518 pch_udc_svc_control_in(dev);
2519 pch_udc_postsvc_epinters(dev, 0);
2520 }
2521
2522 if (ep_intr & UDC_EPINT_OUT_EP0)
2523 pch_udc_svc_control_out(dev);
2524
2525 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2526 if (ep_intr & (1 << i)) {
2527 pch_udc_svc_data_in(dev, i);
2528 pch_udc_postsvc_epinters(dev, i);
2529 }
2530 }
2531
2532 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2533 PCH_UDC_USED_EP_NUM); i++)
2534 if (ep_intr & (1 << i))
2535 pch_udc_svc_data_out(dev, i -
2536 UDC_EPINT_OUT_SHIFT);
2537 }
2538 spin_unlock(&dev->lock);
2539 return IRQ_HANDLED;
2540}
2541
2542
2543
2544
2545
2546static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2547{
2548
2549 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2550 UDC_EPINT_OUT_EP0);
2551
2552 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2553 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2554 UDC_DEVINT_SI | UDC_DEVINT_SC);
2555}
2556
2557
2558
2559
2560
2561static void gadget_release(struct device *pdev)
2562{
2563 struct pch_udc_dev *dev = dev_get_drvdata(pdev);
2564
2565 kfree(dev);
2566}
2567
2568
2569
2570
2571
2572static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2573{
2574 const char *const ep_string[] = {
2575 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2576 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2577 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2578 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2579 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2580 "ep15in", "ep15out",
2581 };
2582 int i;
2583
2584 dev->gadget.speed = USB_SPEED_UNKNOWN;
2585 INIT_LIST_HEAD(&dev->gadget.ep_list);
2586
2587
2588 memset(dev->ep, 0, sizeof dev->ep);
2589 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2590 struct pch_udc_ep *ep = &dev->ep[i];
2591 ep->dev = dev;
2592 ep->halted = 1;
2593 ep->num = i / 2;
2594 ep->in = ~i & 1;
2595 ep->ep.name = ep_string[i];
2596 ep->ep.ops = &pch_udc_ep_ops;
2597 if (ep->in)
2598 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2599 else
2600 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2601 UDC_EP_REG_SHIFT;
2602
2603 ep->ep.maxpacket = UDC_BULK_MAX_PKT_SIZE;
2604 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2605 INIT_LIST_HEAD(&ep->queue);
2606 }
2607 dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
2608 dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
2609
2610
2611 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2612 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2613
2614 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2615 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2616}
2617
2618
2619
2620
2621
2622
2623
2624
2625static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2626{
2627 pch_udc_init(dev);
2628 pch_udc_pcd_reinit(dev);
2629 return 0;
2630}
2631
2632
2633
2634
2635
2636static int init_dma_pools(struct pch_udc_dev *dev)
2637{
2638 struct pch_udc_stp_dma_desc *td_stp;
2639 struct pch_udc_data_dma_desc *td_data;
2640
2641
2642 dev->data_requests = pci_pool_create("data_requests", dev->pdev,
2643 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2644 if (!dev->data_requests) {
2645 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2646 __func__);
2647 return -ENOMEM;
2648 }
2649
2650
2651 dev->stp_requests = pci_pool_create("setup requests", dev->pdev,
2652 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2653 if (!dev->stp_requests) {
2654 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2655 __func__);
2656 return -ENOMEM;
2657 }
2658
2659 td_stp = pci_pool_alloc(dev->stp_requests, GFP_KERNEL,
2660 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2661 if (!td_stp) {
2662 dev_err(&dev->pdev->dev,
2663 "%s: can't allocate setup dma descriptor\n", __func__);
2664 return -ENOMEM;
2665 }
2666 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2667
2668
2669 td_data = pci_pool_alloc(dev->data_requests, GFP_KERNEL,
2670 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2671 if (!td_data) {
2672 dev_err(&dev->pdev->dev,
2673 "%s: can't allocate data dma descriptor\n", __func__);
2674 return -ENOMEM;
2675 }
2676 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2677 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2678 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2679 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2680 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
2681
2682 dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
2683 if (!dev->ep0out_buf)
2684 return -ENOMEM;
2685 dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
2686 UDC_EP0OUT_BUFF_SIZE * 4,
2687 DMA_FROM_DEVICE);
2688 return 0;
2689}
2690
2691static int pch_udc_start(struct usb_gadget_driver *driver,
2692 int (*bind)(struct usb_gadget *))
2693{
2694 struct pch_udc_dev *dev = pch_udc;
2695 int retval;
2696
2697 if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
2698 !driver->setup || !driver->unbind || !driver->disconnect) {
2699 dev_err(&dev->pdev->dev,
2700 "%s: invalid driver parameter\n", __func__);
2701 return -EINVAL;
2702 }
2703
2704 if (!dev)
2705 return -ENODEV;
2706
2707 if (dev->driver) {
2708 dev_err(&dev->pdev->dev, "%s: already bound\n", __func__);
2709 return -EBUSY;
2710 }
2711 driver->driver.bus = NULL;
2712 dev->driver = driver;
2713 dev->gadget.dev.driver = &driver->driver;
2714
2715
2716 retval = bind(&dev->gadget);
2717
2718 if (retval) {
2719 dev_err(&dev->pdev->dev, "%s: binding to %s returning %d\n",
2720 __func__, driver->driver.name, retval);
2721 dev->driver = NULL;
2722 dev->gadget.dev.driver = NULL;
2723 return retval;
2724 }
2725
2726 pch_udc_setup_ep0(dev);
2727
2728
2729 pch_udc_clear_disconnect(dev);
2730
2731 dev->connected = 1;
2732 return 0;
2733}
2734
2735static int pch_udc_stop(struct usb_gadget_driver *driver)
2736{
2737 struct pch_udc_dev *dev = pch_udc;
2738
2739 if (!dev)
2740 return -ENODEV;
2741
2742 if (!driver || (driver != dev->driver)) {
2743 dev_err(&dev->pdev->dev,
2744 "%s: invalid driver parameter\n", __func__);
2745 return -EINVAL;
2746 }
2747
2748 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2749
2750
2751 driver->disconnect(&dev->gadget);
2752 driver->unbind(&dev->gadget);
2753 dev->gadget.dev.driver = NULL;
2754 dev->driver = NULL;
2755 dev->connected = 0;
2756
2757
2758 pch_udc_set_disconnect(dev);
2759 return 0;
2760}
2761
2762static void pch_udc_shutdown(struct pci_dev *pdev)
2763{
2764 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2765
2766 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2767 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2768
2769
2770 pch_udc_set_disconnect(dev);
2771}
2772
2773static void pch_udc_remove(struct pci_dev *pdev)
2774{
2775 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2776
2777 usb_del_gadget_udc(&dev->gadget);
2778
2779
2780 if (dev->driver)
2781 dev_err(&pdev->dev,
2782 "%s: gadget driver still bound!!!\n", __func__);
2783
2784 if (dev->data_requests)
2785 pci_pool_destroy(dev->data_requests);
2786
2787 if (dev->stp_requests) {
2788
2789 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
2790 pci_pool_free(dev->stp_requests,
2791 dev->ep[UDC_EP0OUT_IDX].td_stp,
2792 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2793 }
2794 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
2795 pci_pool_free(dev->stp_requests,
2796 dev->ep[UDC_EP0OUT_IDX].td_data,
2797 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2798 }
2799 pci_pool_destroy(dev->stp_requests);
2800 }
2801
2802 if (dev->dma_addr)
2803 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
2804 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
2805 kfree(dev->ep0out_buf);
2806
2807 pch_udc_exit(dev);
2808
2809 if (dev->irq_registered)
2810 free_irq(pdev->irq, dev);
2811 if (dev->base_addr)
2812 iounmap(dev->base_addr);
2813 if (dev->mem_region)
2814 release_mem_region(dev->phys_addr,
2815 pci_resource_len(pdev, PCH_UDC_PCI_BAR));
2816 if (dev->active)
2817 pci_disable_device(pdev);
2818 if (dev->registered)
2819 device_unregister(&dev->gadget.dev);
2820 kfree(dev);
2821 pci_set_drvdata(pdev, NULL);
2822}
2823
2824#ifdef CONFIG_PM
2825static int pch_udc_suspend(struct pci_dev *pdev, pm_message_t state)
2826{
2827 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
2828
2829 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
2830 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2831
2832 pci_disable_device(pdev);
2833 pci_enable_wake(pdev, PCI_D3hot, 0);
2834
2835 if (pci_save_state(pdev)) {
2836 dev_err(&pdev->dev,
2837 "%s: could not save PCI config state\n", __func__);
2838 return -ENOMEM;
2839 }
2840 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2841 return 0;
2842}
2843
2844static int pch_udc_resume(struct pci_dev *pdev)
2845{
2846 int ret;
2847
2848 pci_set_power_state(pdev, PCI_D0);
2849 pci_restore_state(pdev);
2850 ret = pci_enable_device(pdev);
2851 if (ret) {
2852 dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
2853 return ret;
2854 }
2855 pci_enable_wake(pdev, PCI_D3hot, 0);
2856 return 0;
2857}
2858#else
2859#define pch_udc_suspend NULL
2860#define pch_udc_resume NULL
2861#endif
2862
2863static int pch_udc_probe(struct pci_dev *pdev,
2864 const struct pci_device_id *id)
2865{
2866 unsigned long resource;
2867 unsigned long len;
2868 int retval;
2869 struct pch_udc_dev *dev;
2870
2871
2872 if (pch_udc) {
2873 pr_err("%s: already probed\n", __func__);
2874 return -EBUSY;
2875 }
2876
2877 dev = kzalloc(sizeof *dev, GFP_KERNEL);
2878 if (!dev) {
2879 pr_err("%s: no memory for device structure\n", __func__);
2880 return -ENOMEM;
2881 }
2882
2883 if (pci_enable_device(pdev) < 0) {
2884 kfree(dev);
2885 pr_err("%s: pci_enable_device failed\n", __func__);
2886 return -ENODEV;
2887 }
2888 dev->active = 1;
2889 pci_set_drvdata(pdev, dev);
2890
2891
2892 resource = pci_resource_start(pdev, 1);
2893 len = pci_resource_len(pdev, 1);
2894
2895 if (!request_mem_region(resource, len, KBUILD_MODNAME)) {
2896 dev_err(&pdev->dev, "%s: pci device used already\n", __func__);
2897 retval = -EBUSY;
2898 goto finished;
2899 }
2900 dev->phys_addr = resource;
2901 dev->mem_region = 1;
2902
2903 dev->base_addr = ioremap_nocache(resource, len);
2904 if (!dev->base_addr) {
2905 pr_err("%s: device memory cannot be mapped\n", __func__);
2906 retval = -ENOMEM;
2907 goto finished;
2908 }
2909 if (!pdev->irq) {
2910 dev_err(&pdev->dev, "%s: irq not set\n", __func__);
2911 retval = -ENODEV;
2912 goto finished;
2913 }
2914 pch_udc = dev;
2915
2916 if (pch_udc_pcd_init(dev))
2917 goto finished;
2918 if (request_irq(pdev->irq, pch_udc_isr, IRQF_SHARED, KBUILD_MODNAME,
2919 dev)) {
2920 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
2921 pdev->irq);
2922 retval = -ENODEV;
2923 goto finished;
2924 }
2925 dev->irq = pdev->irq;
2926 dev->irq_registered = 1;
2927
2928 pci_set_master(pdev);
2929 pci_try_set_mwi(pdev);
2930
2931
2932 spin_lock_init(&dev->lock);
2933 dev->pdev = pdev;
2934 dev->gadget.ops = &pch_udc_ops;
2935
2936 retval = init_dma_pools(dev);
2937 if (retval)
2938 goto finished;
2939
2940 dev_set_name(&dev->gadget.dev, "gadget");
2941 dev->gadget.dev.parent = &pdev->dev;
2942 dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
2943 dev->gadget.dev.release = gadget_release;
2944 dev->gadget.name = KBUILD_MODNAME;
2945 dev->gadget.max_speed = USB_SPEED_HIGH;
2946
2947 retval = device_register(&dev->gadget.dev);
2948 if (retval)
2949 goto finished;
2950 dev->registered = 1;
2951
2952
2953 pch_udc_set_disconnect(dev);
2954 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
2955 if (retval)
2956 goto finished;
2957 return 0;
2958
2959finished:
2960 pch_udc_remove(pdev);
2961 return retval;
2962}
2963
2964static DEFINE_PCI_DEVICE_TABLE(pch_udc_pcidev_id) = {
2965 {
2966 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
2967 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2968 .class_mask = 0xffffffff,
2969 },
2970 {
2971 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
2972 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2973 .class_mask = 0xffffffff,
2974 },
2975 {
2976 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
2977 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
2978 .class_mask = 0xffffffff,
2979 },
2980 { 0 },
2981};
2982
2983MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
2984
2985
2986static struct pci_driver pch_udc_driver = {
2987 .name = KBUILD_MODNAME,
2988 .id_table = pch_udc_pcidev_id,
2989 .probe = pch_udc_probe,
2990 .remove = pch_udc_remove,
2991 .suspend = pch_udc_suspend,
2992 .resume = pch_udc_resume,
2993 .shutdown = pch_udc_shutdown,
2994};
2995
2996static int __init pch_udc_pci_init(void)
2997{
2998 return pci_register_driver(&pch_udc_driver);
2999}
3000module_init(pch_udc_pci_init);
3001
3002static void __exit pch_udc_pci_exit(void)
3003{
3004 pci_unregister_driver(&pch_udc_driver);
3005}
3006module_exit(pch_udc_pci_exit);
3007
3008MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3009MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3010MODULE_LICENSE("GPL");
3011