1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/blkdev.h>
40#include <linux/delay.h>
41#include <linux/device.h>
42#include <scsi/scsi_host.h>
43#include <linux/libata.h>
44
45#define DRV_NAME "pata_scc"
46#define DRV_VERSION "0.3"
47
48#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
49
50
51#define SCC_CTRL_BAR 0
52#define SCC_BMID_BAR 1
53
54
55#define SCC_CTL_PIOSHT 0x000
56#define SCC_CTL_PIOCT 0x004
57#define SCC_CTL_MDMACT 0x008
58#define SCC_CTL_MCRCST 0x00C
59#define SCC_CTL_SDMACT 0x010
60#define SCC_CTL_SCRCST 0x014
61#define SCC_CTL_UDENVT 0x018
62#define SCC_CTL_TDVHSEL 0x020
63#define SCC_CTL_MODEREG 0x024
64#define SCC_CTL_ECMODE 0xF00
65#define SCC_CTL_MAEA0 0xF50
66#define SCC_CTL_MAEC0 0xF54
67#define SCC_CTL_CCKCTRL 0xFF0
68
69
70#define SCC_DMA_CMD 0x000
71#define SCC_DMA_STATUS 0x004
72#define SCC_DMA_TABLE_OFS 0x008
73#define SCC_DMA_INTMASK 0x010
74#define SCC_DMA_INTST 0x014
75#define SCC_DMA_PTERADD 0x018
76#define SCC_REG_CMD_ADDR 0x020
77#define SCC_REG_DATA 0x000
78#define SCC_REG_ERR 0x004
79#define SCC_REG_FEATURE 0x004
80#define SCC_REG_NSECT 0x008
81#define SCC_REG_LBAL 0x00C
82#define SCC_REG_LBAM 0x010
83#define SCC_REG_LBAH 0x014
84#define SCC_REG_DEVICE 0x018
85#define SCC_REG_STATUS 0x01C
86#define SCC_REG_CMD 0x01C
87#define SCC_REG_ALTSTATUS 0x020
88
89
90#define TDVHSEL_MASTER 0x00000001
91#define TDVHSEL_SLAVE 0x00000004
92
93#define MODE_JCUSFEN 0x00000080
94
95#define ECMODE_VALUE 0x01
96
97#define CCKCTRL_ATARESET 0x00040000
98#define CCKCTRL_BUFCNT 0x00020000
99#define CCKCTRL_CRST 0x00010000
100#define CCKCTRL_OCLKEN 0x00000100
101#define CCKCTRL_ATACLKOEN 0x00000002
102#define CCKCTRL_LCLKEN 0x00000001
103
104#define QCHCD_IOS_SS 0x00000001
105
106#define QCHSD_STPDIAG 0x00020000
107
108#define INTMASK_MSK 0xD1000012
109#define INTSTS_SERROR 0x80000000
110#define INTSTS_PRERR 0x40000000
111#define INTSTS_RERR 0x10000000
112#define INTSTS_ICERR 0x01000000
113#define INTSTS_BMSINT 0x00000010
114#define INTSTS_BMHE 0x00000008
115#define INTSTS_IOIRQS 0x00000004
116#define INTSTS_INTRQ 0x00000002
117#define INTSTS_ACTEINT 0x00000001
118
119
120
121
122static const unsigned long JCHSTtbl[2][7] = {
123 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00},
124 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00}
125};
126
127
128static const unsigned long JCHHTtbl[2][7] = {
129 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00},
130 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00}
131};
132
133
134static const unsigned long JCHCTtbl[2][7] = {
135 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00},
136 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00}
137};
138
139
140
141static const unsigned long JCHDCTxtbl[2][7] = {
142 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00},
143 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00}
144};
145
146
147static const unsigned long JCSTWTxtbl[2][7] = {
148 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00},
149 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02}
150};
151
152
153static const unsigned long JCTSStbl[2][7] = {
154 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00},
155 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05}
156};
157
158
159static const unsigned long JCENVTtbl[2][7] = {
160 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00},
161 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}
162};
163
164
165static const unsigned long JCACTSELtbl[2][7] = {
166 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00},
167 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}
168};
169
170static const struct pci_device_id scc_pci_tbl[] = {
171 {PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
173 { }
174};
175
176
177
178
179
180
181
182
183
184
185
186
187static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
188{
189 unsigned int pio = adev->pio_mode - XFER_PIO_0;
190 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
191 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
192 void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
193 void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
194 unsigned long reg;
195 int offset;
196
197 reg = in_be32(cckctrl_port);
198 if (reg & CCKCTRL_ATACLKOEN)
199 offset = 1;
200 else
201 offset = 0;
202
203 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
204 out_be32(piosht_port, reg);
205 reg = JCHCTtbl[offset][pio];
206 out_be32(pioct_port, reg);
207}
208
209
210
211
212
213
214
215
216
217
218
219
220static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
221{
222 unsigned int udma = adev->dma_mode;
223 unsigned int is_slave = (adev->devno != 0);
224 u8 speed = udma;
225 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
226 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
227 void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
228 void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
229 void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
230 void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
231 void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
232 void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
233 int offset, idx;
234
235 if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
236 offset = 1;
237 else
238 offset = 0;
239
240 if (speed >= XFER_UDMA_0)
241 idx = speed - XFER_UDMA_0;
242 else
243 return;
244
245 if (is_slave) {
246 out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
247 out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
248 out_be32(tdvhsel_port,
249 (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
250 } else {
251 out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
252 out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
253 out_be32(tdvhsel_port,
254 (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
255 }
256 out_be32(udenvt_port,
257 JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
258}
259
260unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
261{
262
263 if (adev->class == ATA_DEV_ATAPI &&
264 (mask & (0xE0 << ATA_SHIFT_UDMA))) {
265 printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
266 mask &= ~(0xE0 << ATA_SHIFT_UDMA);
267 }
268 return ata_bmdma_mode_filter(adev, mask);
269}
270
271
272
273
274
275
276
277
278
279static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
280{
281 struct ata_ioports *ioaddr = &ap->ioaddr;
282 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
283
284 if (tf->ctl != ap->last_ctl) {
285 out_be32(ioaddr->ctl_addr, tf->ctl);
286 ap->last_ctl = tf->ctl;
287 ata_wait_idle(ap);
288 }
289
290 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
291 out_be32(ioaddr->feature_addr, tf->hob_feature);
292 out_be32(ioaddr->nsect_addr, tf->hob_nsect);
293 out_be32(ioaddr->lbal_addr, tf->hob_lbal);
294 out_be32(ioaddr->lbam_addr, tf->hob_lbam);
295 out_be32(ioaddr->lbah_addr, tf->hob_lbah);
296 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
297 tf->hob_feature,
298 tf->hob_nsect,
299 tf->hob_lbal,
300 tf->hob_lbam,
301 tf->hob_lbah);
302 }
303
304 if (is_addr) {
305 out_be32(ioaddr->feature_addr, tf->feature);
306 out_be32(ioaddr->nsect_addr, tf->nsect);
307 out_be32(ioaddr->lbal_addr, tf->lbal);
308 out_be32(ioaddr->lbam_addr, tf->lbam);
309 out_be32(ioaddr->lbah_addr, tf->lbah);
310 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
311 tf->feature,
312 tf->nsect,
313 tf->lbal,
314 tf->lbam,
315 tf->lbah);
316 }
317
318 if (tf->flags & ATA_TFLAG_DEVICE) {
319 out_be32(ioaddr->device_addr, tf->device);
320 VPRINTK("device 0x%X\n", tf->device);
321 }
322
323 ata_wait_idle(ap);
324}
325
326
327
328
329
330
331
332
333static u8 scc_check_status (struct ata_port *ap)
334{
335 return in_be32(ap->ioaddr.status_addr);
336}
337
338
339
340
341
342
343
344
345
346static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
347{
348 struct ata_ioports *ioaddr = &ap->ioaddr;
349
350 tf->command = scc_check_status(ap);
351 tf->feature = in_be32(ioaddr->error_addr);
352 tf->nsect = in_be32(ioaddr->nsect_addr);
353 tf->lbal = in_be32(ioaddr->lbal_addr);
354 tf->lbam = in_be32(ioaddr->lbam_addr);
355 tf->lbah = in_be32(ioaddr->lbah_addr);
356 tf->device = in_be32(ioaddr->device_addr);
357
358 if (tf->flags & ATA_TFLAG_LBA48) {
359 out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
360 tf->hob_feature = in_be32(ioaddr->error_addr);
361 tf->hob_nsect = in_be32(ioaddr->nsect_addr);
362 tf->hob_lbal = in_be32(ioaddr->lbal_addr);
363 tf->hob_lbam = in_be32(ioaddr->lbam_addr);
364 tf->hob_lbah = in_be32(ioaddr->lbah_addr);
365 out_be32(ioaddr->ctl_addr, tf->ctl);
366 ap->last_ctl = tf->ctl;
367 }
368}
369
370
371
372
373
374
375
376
377
378static void scc_exec_command (struct ata_port *ap,
379 const struct ata_taskfile *tf)
380{
381 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
382
383 out_be32(ap->ioaddr.command_addr, tf->command);
384 ata_sff_pause(ap);
385}
386
387
388
389
390
391
392static u8 scc_check_altstatus (struct ata_port *ap)
393{
394 return in_be32(ap->ioaddr.altstatus_addr);
395}
396
397
398
399
400
401
402
403
404
405static void scc_dev_select (struct ata_port *ap, unsigned int device)
406{
407 u8 tmp;
408
409 if (device == 0)
410 tmp = ATA_DEVICE_OBS;
411 else
412 tmp = ATA_DEVICE_OBS | ATA_DEV1;
413
414 out_be32(ap->ioaddr.device_addr, tmp);
415 ata_sff_pause(ap);
416}
417
418
419
420
421
422
423
424
425static void scc_bmdma_setup (struct ata_queued_cmd *qc)
426{
427 struct ata_port *ap = qc->ap;
428 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
429 u8 dmactl;
430 void __iomem *mmio = ap->ioaddr.bmdma_addr;
431
432
433 out_be32(mmio + SCC_DMA_TABLE_OFS, ap->prd_dma);
434
435
436 dmactl = in_be32(mmio + SCC_DMA_CMD);
437 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
438 if (!rw)
439 dmactl |= ATA_DMA_WR;
440 out_be32(mmio + SCC_DMA_CMD, dmactl);
441
442
443 ap->ops->sff_exec_command(ap, &qc->tf);
444}
445
446
447
448
449
450
451
452
453static void scc_bmdma_start (struct ata_queued_cmd *qc)
454{
455 struct ata_port *ap = qc->ap;
456 u8 dmactl;
457 void __iomem *mmio = ap->ioaddr.bmdma_addr;
458
459
460 dmactl = in_be32(mmio + SCC_DMA_CMD);
461 out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
462}
463
464
465
466
467
468
469
470
471
472static unsigned int scc_devchk (struct ata_port *ap,
473 unsigned int device)
474{
475 struct ata_ioports *ioaddr = &ap->ioaddr;
476 u8 nsect, lbal;
477
478 ap->ops->sff_dev_select(ap, device);
479
480 out_be32(ioaddr->nsect_addr, 0x55);
481 out_be32(ioaddr->lbal_addr, 0xaa);
482
483 out_be32(ioaddr->nsect_addr, 0xaa);
484 out_be32(ioaddr->lbal_addr, 0x55);
485
486 out_be32(ioaddr->nsect_addr, 0x55);
487 out_be32(ioaddr->lbal_addr, 0xaa);
488
489 nsect = in_be32(ioaddr->nsect_addr);
490 lbal = in_be32(ioaddr->lbal_addr);
491
492 if ((nsect == 0x55) && (lbal == 0xaa))
493 return 1;
494
495 return 0;
496}
497
498
499
500
501
502
503
504int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
505 unsigned long deadline)
506{
507 struct ata_port *ap = link->ap;
508 struct ata_ioports *ioaddr = &ap->ioaddr;
509 unsigned int dev0 = devmask & (1 << 0);
510 unsigned int dev1 = devmask & (1 << 1);
511 int rc, ret = 0;
512
513
514
515
516
517
518
519
520
521
522
523 msleep(150);
524
525
526 rc = ata_sff_wait_ready(link, deadline);
527
528
529
530 if (rc)
531 return rc;
532
533
534
535
536 if (dev1) {
537 int i;
538
539 ap->ops->sff_dev_select(ap, 1);
540
541
542
543
544
545 for (i = 0; i < 2; i++) {
546 u8 nsect, lbal;
547
548 nsect = in_be32(ioaddr->nsect_addr);
549 lbal = in_be32(ioaddr->lbal_addr);
550 if ((nsect == 1) && (lbal == 1))
551 break;
552 msleep(50);
553 }
554
555 rc = ata_sff_wait_ready(link, deadline);
556 if (rc) {
557 if (rc != -ENODEV)
558 return rc;
559 ret = rc;
560 }
561 }
562
563
564 ap->ops->sff_dev_select(ap, 0);
565 if (dev1)
566 ap->ops->sff_dev_select(ap, 1);
567 if (dev0)
568 ap->ops->sff_dev_select(ap, 0);
569
570 return ret;
571}
572
573
574
575
576
577
578
579static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
580 unsigned long deadline)
581{
582 struct ata_ioports *ioaddr = &ap->ioaddr;
583
584 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
585
586
587 out_be32(ioaddr->ctl_addr, ap->ctl);
588 udelay(20);
589 out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
590 udelay(20);
591 out_be32(ioaddr->ctl_addr, ap->ctl);
592
593 scc_wait_after_reset(&ap->link, devmask, deadline);
594
595 return 0;
596}
597
598
599
600
601
602
603
604
605
606
607static int scc_softreset(struct ata_link *link, unsigned int *classes,
608 unsigned long deadline)
609{
610 struct ata_port *ap = link->ap;
611 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
612 unsigned int devmask = 0, err_mask;
613 u8 err;
614
615 DPRINTK("ENTER\n");
616
617
618 if (scc_devchk(ap, 0))
619 devmask |= (1 << 0);
620 if (slave_possible && scc_devchk(ap, 1))
621 devmask |= (1 << 1);
622
623
624 ap->ops->sff_dev_select(ap, 0);
625
626
627 DPRINTK("about to softreset, devmask=%x\n", devmask);
628 err_mask = scc_bus_softreset(ap, devmask, deadline);
629 if (err_mask) {
630 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
631 err_mask);
632 return -EIO;
633 }
634
635
636 classes[0] = ata_sff_dev_classify(&ap->link.device[0],
637 devmask & (1 << 0), &err);
638 if (slave_possible && err != 0x81)
639 classes[1] = ata_sff_dev_classify(&ap->link.device[1],
640 devmask & (1 << 1), &err);
641
642 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
643 return 0;
644}
645
646
647
648
649
650
651static void scc_bmdma_stop (struct ata_queued_cmd *qc)
652{
653 struct ata_port *ap = qc->ap;
654 void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
655 void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
656 u32 reg;
657
658 while (1) {
659 reg = in_be32(bmid_base + SCC_DMA_INTST);
660
661 if (reg & INTSTS_SERROR) {
662 printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
663 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
664 out_be32(bmid_base + SCC_DMA_CMD,
665 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
666 continue;
667 }
668
669 if (reg & INTSTS_PRERR) {
670 u32 maea0, maec0;
671 maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
672 maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
673 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
674 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
675 out_be32(bmid_base + SCC_DMA_CMD,
676 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
677 continue;
678 }
679
680 if (reg & INTSTS_RERR) {
681 printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
682 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
683 out_be32(bmid_base + SCC_DMA_CMD,
684 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
685 continue;
686 }
687
688 if (reg & INTSTS_ICERR) {
689 out_be32(bmid_base + SCC_DMA_CMD,
690 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
691 printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
692 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
693 continue;
694 }
695
696 if (reg & INTSTS_BMSINT) {
697 unsigned int classes;
698 unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
699 printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
700 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
701
702 scc_softreset(&ap->link, &classes, deadline);
703 continue;
704 }
705
706 if (reg & INTSTS_BMHE) {
707 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
708 continue;
709 }
710
711 if (reg & INTSTS_ACTEINT) {
712 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
713 continue;
714 }
715
716 if (reg & INTSTS_IOIRQS) {
717 out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
718 continue;
719 }
720 break;
721 }
722
723
724 out_be32(bmid_base + SCC_DMA_CMD,
725 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
726
727
728 ata_sff_dma_pause(ap);
729}
730
731
732
733
734
735
736static u8 scc_bmdma_status (struct ata_port *ap)
737{
738 void __iomem *mmio = ap->ioaddr.bmdma_addr;
739 u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
740 u32 int_status = in_be32(mmio + SCC_DMA_INTST);
741 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
742 static int retry = 0;
743
744
745 if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
746 return host_stat;
747
748
749 if ((scc_check_altstatus(ap) & ATA_ERR)
750 && (int_status & INTSTS_INTRQ))
751 return (host_stat | ATA_DMA_INTR);
752
753
754 if (int_status & INTSTS_IOIRQS) {
755 host_stat |= ATA_DMA_INTR;
756
757
758 if ((qc->tf.protocol == ATA_PROT_DMA &&
759 qc->dev->xfer_mode > XFER_UDMA_4)) {
760 if (!(int_status & INTSTS_ACTEINT)) {
761 printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
762 ap->print_id);
763 host_stat |= ATA_DMA_ERR;
764 if (retry++)
765 ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
766 } else
767 retry = 0;
768 }
769 }
770
771 return host_stat;
772}
773
774
775
776
777
778
779
780
781
782
783
784static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
785 unsigned int buflen, int rw)
786{
787 struct ata_port *ap = dev->link->ap;
788 unsigned int words = buflen >> 1;
789 unsigned int i;
790 __le16 *buf16 = (__le16 *) buf;
791 void __iomem *mmio = ap->ioaddr.data_addr;
792
793
794 if (rw == READ)
795 for (i = 0; i < words; i++)
796 buf16[i] = cpu_to_le16(in_be32(mmio));
797 else
798 for (i = 0; i < words; i++)
799 out_be32(mmio, le16_to_cpu(buf16[i]));
800
801
802 if (unlikely(buflen & 0x01)) {
803 __le16 align_buf[1] = { 0 };
804 unsigned char *trailing_buf = buf + buflen - 1;
805
806 if (rw == READ) {
807 align_buf[0] = cpu_to_le16(in_be32(mmio));
808 memcpy(trailing_buf, align_buf, 1);
809 } else {
810 memcpy(align_buf, trailing_buf, 1);
811 out_be32(mmio, le16_to_cpu(align_buf[0]));
812 }
813 words++;
814 }
815
816 return words << 1;
817}
818
819
820
821
822
823
824
825
826static u8 scc_irq_on (struct ata_port *ap)
827{
828 struct ata_ioports *ioaddr = &ap->ioaddr;
829 u8 tmp;
830
831 ap->ctl &= ~ATA_NIEN;
832 ap->last_ctl = ap->ctl;
833
834 out_be32(ioaddr->ctl_addr, ap->ctl);
835 tmp = ata_wait_idle(ap);
836
837 ap->ops->sff_irq_clear(ap);
838
839 return tmp;
840}
841
842
843
844
845
846
847
848
849static void scc_freeze (struct ata_port *ap)
850{
851 struct ata_ioports *ioaddr = &ap->ioaddr;
852
853 ap->ctl |= ATA_NIEN;
854 ap->last_ctl = ap->ctl;
855
856 out_be32(ioaddr->ctl_addr, ap->ctl);
857
858
859
860
861
862 ap->ops->sff_check_status(ap);
863
864 ap->ops->sff_irq_clear(ap);
865}
866
867
868
869
870
871
872
873static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
874{
875 link->ap->cbl = ATA_CBL_PATA80;
876 return ata_sff_prereset(link, deadline);
877}
878
879
880
881
882
883
884
885
886
887static void scc_postreset(struct ata_link *link, unsigned int *classes)
888{
889 struct ata_port *ap = link->ap;
890
891 DPRINTK("ENTER\n");
892
893
894 if (classes[0] != ATA_DEV_NONE)
895 ap->ops->sff_dev_select(ap, 1);
896 if (classes[1] != ATA_DEV_NONE)
897 ap->ops->sff_dev_select(ap, 0);
898
899
900 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
901 DPRINTK("EXIT, no device\n");
902 return;
903 }
904
905
906 if (ap->ioaddr.ctl_addr)
907 out_be32(ap->ioaddr.ctl_addr, ap->ctl);
908
909 DPRINTK("EXIT\n");
910}
911
912
913
914
915
916
917
918
919static void scc_irq_clear (struct ata_port *ap)
920{
921 void __iomem *mmio = ap->ioaddr.bmdma_addr;
922
923 if (!mmio)
924 return;
925
926 out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
927}
928
929
930
931
932
933
934
935
936
937static int scc_port_start (struct ata_port *ap)
938{
939 void __iomem *mmio = ap->ioaddr.bmdma_addr;
940 int rc;
941
942 rc = ata_port_start(ap);
943 if (rc)
944 return rc;
945
946 out_be32(mmio + SCC_DMA_PTERADD, ap->prd_dma);
947 return 0;
948}
949
950
951
952
953
954
955
956
957static void scc_port_stop (struct ata_port *ap)
958{
959 void __iomem *mmio = ap->ioaddr.bmdma_addr;
960
961 out_be32(mmio + SCC_DMA_PTERADD, 0);
962}
963
964static struct scsi_host_template scc_sht = {
965 ATA_BMDMA_SHT(DRV_NAME),
966};
967
968static struct ata_port_operations scc_pata_ops = {
969 .inherits = &ata_bmdma_port_ops,
970
971 .set_piomode = scc_set_piomode,
972 .set_dmamode = scc_set_dmamode,
973 .mode_filter = scc_mode_filter,
974
975 .sff_tf_load = scc_tf_load,
976 .sff_tf_read = scc_tf_read,
977 .sff_exec_command = scc_exec_command,
978 .sff_check_status = scc_check_status,
979 .sff_check_altstatus = scc_check_altstatus,
980 .sff_dev_select = scc_dev_select,
981
982 .bmdma_setup = scc_bmdma_setup,
983 .bmdma_start = scc_bmdma_start,
984 .bmdma_stop = scc_bmdma_stop,
985 .bmdma_status = scc_bmdma_status,
986 .sff_data_xfer = scc_data_xfer,
987
988 .freeze = scc_freeze,
989 .prereset = scc_pata_prereset,
990 .softreset = scc_softreset,
991 .postreset = scc_postreset,
992 .post_internal_cmd = scc_bmdma_stop,
993
994 .sff_irq_clear = scc_irq_clear,
995 .sff_irq_on = scc_irq_on,
996
997 .port_start = scc_port_start,
998 .port_stop = scc_port_stop,
999};
1000
1001static struct ata_port_info scc_port_info[] = {
1002 {
1003 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_MMIO | ATA_FLAG_NO_LEGACY,
1004 .pio_mask = ATA_PIO4,
1005
1006 .udma_mask = ATA_UDMA6,
1007 .port_ops = &scc_pata_ops,
1008 },
1009};
1010
1011
1012
1013
1014
1015static int scc_reset_controller(struct ata_host *host)
1016{
1017 void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
1018 void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
1019 void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
1020 void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
1021 void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
1022 void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
1023 void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
1024 u32 reg = 0;
1025
1026 out_be32(cckctrl_port, reg);
1027 reg |= CCKCTRL_ATACLKOEN;
1028 out_be32(cckctrl_port, reg);
1029 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
1030 out_be32(cckctrl_port, reg);
1031 reg |= CCKCTRL_CRST;
1032 out_be32(cckctrl_port, reg);
1033
1034 for (;;) {
1035 reg = in_be32(cckctrl_port);
1036 if (reg & CCKCTRL_CRST)
1037 break;
1038 udelay(5000);
1039 }
1040
1041 reg |= CCKCTRL_ATARESET;
1042 out_be32(cckctrl_port, reg);
1043 out_be32(ecmode_port, ECMODE_VALUE);
1044 out_be32(mode_port, MODE_JCUSFEN);
1045 out_be32(intmask_port, INTMASK_MSK);
1046
1047 if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
1048 printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
1049 return -EIO;
1050 }
1051
1052 return 0;
1053}
1054
1055
1056
1057
1058
1059
1060
1061static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
1062{
1063 ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
1064 ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
1065 ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
1066 ioaddr->bmdma_addr = base;
1067 ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
1068 ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
1069 ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
1070 ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
1071 ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
1072 ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
1073 ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
1074 ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
1075 ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
1076 ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
1077}
1078
1079static int scc_host_init(struct ata_host *host)
1080{
1081 struct pci_dev *pdev = to_pci_dev(host->dev);
1082 int rc;
1083
1084 rc = scc_reset_controller(host);
1085 if (rc)
1086 return rc;
1087
1088 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1089 if (rc)
1090 return rc;
1091 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1092 if (rc)
1093 return rc;
1094
1095 scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
1096
1097 pci_set_master(pdev);
1098
1099 return 0;
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1115{
1116 static int printed_version;
1117 unsigned int board_idx = (unsigned int) ent->driver_data;
1118 const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
1119 struct ata_host *host;
1120 int rc;
1121
1122 if (!printed_version++)
1123 dev_printk(KERN_DEBUG, &pdev->dev,
1124 "version " DRV_VERSION "\n");
1125
1126 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
1127 if (!host)
1128 return -ENOMEM;
1129
1130 rc = pcim_enable_device(pdev);
1131 if (rc)
1132 return rc;
1133
1134 rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
1135 if (rc == -EBUSY)
1136 pcim_pin_device(pdev);
1137 if (rc)
1138 return rc;
1139 host->iomap = pcim_iomap_table(pdev);
1140
1141 ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
1142 ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
1143
1144 rc = scc_host_init(host);
1145 if (rc)
1146 return rc;
1147
1148 return ata_host_activate(host, pdev->irq, ata_sff_interrupt,
1149 IRQF_SHARED, &scc_sht);
1150}
1151
1152static struct pci_driver scc_pci_driver = {
1153 .name = DRV_NAME,
1154 .id_table = scc_pci_tbl,
1155 .probe = scc_init_one,
1156 .remove = ata_pci_remove_one,
1157#ifdef CONFIG_PM
1158 .suspend = ata_pci_device_suspend,
1159 .resume = ata_pci_device_resume,
1160#endif
1161};
1162
1163static int __init scc_init (void)
1164{
1165 int rc;
1166
1167 DPRINTK("pci_register_driver\n");
1168 rc = pci_register_driver(&scc_pci_driver);
1169 if (rc)
1170 return rc;
1171
1172 DPRINTK("done\n");
1173 return 0;
1174}
1175
1176static void __exit scc_exit (void)
1177{
1178 pci_unregister_driver(&scc_pci_driver);
1179}
1180
1181module_init(scc_init);
1182module_exit(scc_exit);
1183
1184MODULE_AUTHOR("Toshiba corp");
1185MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
1186MODULE_LICENSE("GPL");
1187MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
1188MODULE_VERSION(DRV_VERSION);
1189