1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
57#include <linux/scatterlist.h>
58#include <linux/io.h>
59#include <scsi/scsi.h>
60#include <scsi/scsi_cmnd.h>
61#include <scsi/scsi_host.h>
62#include <linux/libata.h>
63#include <asm/byteorder.h>
64#include <linux/cdrom.h>
65
66#include "libata.h"
67
68
69
70const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
71const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
72const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
73
74const struct ata_port_operations ata_base_port_ops = {
75 .prereset = ata_std_prereset,
76 .postreset = ata_std_postreset,
77 .error_handler = ata_std_error_handler,
78};
79
80const struct ata_port_operations sata_port_ops = {
81 .inherits = &ata_base_port_ops,
82
83 .qc_defer = ata_std_qc_defer,
84 .hardreset = sata_std_hardreset,
85};
86
87static unsigned int ata_dev_init_params(struct ata_device *dev,
88 u16 heads, u16 sectors);
89static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
90static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 u8 enable, u8 feature);
92static void ata_dev_xfermask(struct ata_device *dev);
93static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
94
95unsigned int ata_print_id = 1;
96static struct workqueue_struct *ata_wq;
97
98struct workqueue_struct *ata_aux_wq;
99
100struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107 unsigned int lflags;
108};
109
110struct ata_force_ent {
111 int port;
112 int device;
113 struct ata_force_param param;
114};
115
116static struct ata_force_ent *ata_force_tbl;
117static int ata_force_tbl_size;
118
119static char ata_force_param_buf[PAGE_SIZE] __initdata;
120
121module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
122MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123
124static int atapi_enabled = 1;
125module_param(atapi_enabled, int, 0444);
126MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127
128static int atapi_dmadir = 0;
129module_param(atapi_dmadir, int, 0444);
130MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131
132int atapi_passthru16 = 1;
133module_param(atapi_passthru16, int, 0444);
134MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135
136int libata_fua = 0;
137module_param_named(fua, libata_fua, int, 0444);
138MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139
140static int ata_ignore_hpa;
141module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143
144static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145module_param_named(dma, libata_dma_mask, int, 0444);
146MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147
148static int ata_probe_timeout;
149module_param(ata_probe_timeout, int, 0444);
150MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151
152int libata_noacpi = 0;
153module_param_named(noacpi, libata_noacpi, int, 0444);
154MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
155
156int libata_allow_tpm = 0;
157module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159
160MODULE_AUTHOR("Jeff Garzik");
161MODULE_DESCRIPTION("Library module for ATA devices");
162MODULE_LICENSE("GPL");
163MODULE_VERSION(DRV_VERSION);
164
165
166
167
168
169
170
171
172struct ata_link *__ata_port_next_link(struct ata_port *ap,
173 struct ata_link *link, bool dev_only)
174{
175
176 if (!link) {
177 if (dev_only && sata_pmp_attached(ap))
178 return ap->pmp_link;
179 return &ap->link;
180 }
181
182
183 if (link == &ap->link) {
184 if (!sata_pmp_attached(ap)) {
185 if (unlikely(ap->slave_link) && !dev_only)
186 return ap->slave_link;
187 return NULL;
188 }
189 return ap->pmp_link;
190 }
191
192
193 if (unlikely(link == ap->slave_link))
194 return NULL;
195
196
197 if (++link < ap->pmp_link + ap->nr_pmp_links)
198 return link;
199 return NULL;
200}
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216struct ata_link *ata_dev_phys_link(struct ata_device *dev)
217{
218 struct ata_port *ap = dev->link->ap;
219
220 if (!ap->slave_link)
221 return dev->link;
222 if (!dev->devno)
223 return &ap->link;
224 return ap->slave_link;
225}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240void ata_force_cbl(struct ata_port *ap)
241{
242 int i;
243
244 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
245 const struct ata_force_ent *fe = &ata_force_tbl[i];
246
247 if (fe->port != -1 && fe->port != ap->print_id)
248 continue;
249
250 if (fe->param.cbl == ATA_CBL_NONE)
251 continue;
252
253 ap->cbl = fe->param.cbl;
254 ata_port_printk(ap, KERN_NOTICE,
255 "FORCE: cable set to %s\n", fe->param.name);
256 return;
257 }
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276static void ata_force_link_limits(struct ata_link *link)
277{
278 bool did_spd = false;
279 int linkno = link->pmp;
280 int i;
281
282 if (ata_is_host_link(link))
283 linkno += 15;
284
285 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
286 const struct ata_force_ent *fe = &ata_force_tbl[i];
287
288 if (fe->port != -1 && fe->port != link->ap->print_id)
289 continue;
290
291 if (fe->device != -1 && fe->device != linkno)
292 continue;
293
294
295 if (!did_spd && fe->param.spd_limit) {
296 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
297 ata_link_printk(link, KERN_NOTICE,
298 "FORCE: PHY spd limit set to %s\n",
299 fe->param.name);
300 did_spd = true;
301 }
302
303
304 if (fe->param.lflags) {
305 link->flags |= fe->param.lflags;
306 ata_link_printk(link, KERN_NOTICE,
307 "FORCE: link flag 0x%x forced -> 0x%x\n",
308 fe->param.lflags, link->flags);
309 }
310 }
311}
312
313
314
315
316
317
318
319
320
321
322
323
324static void ata_force_xfermask(struct ata_device *dev)
325{
326 int devno = dev->link->pmp + dev->devno;
327 int alt_devno = devno;
328 int i;
329
330
331 if (ata_is_host_link(dev->link))
332 alt_devno += 15;
333
334 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
335 const struct ata_force_ent *fe = &ata_force_tbl[i];
336 unsigned long pio_mask, mwdma_mask, udma_mask;
337
338 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
339 continue;
340
341 if (fe->device != -1 && fe->device != devno &&
342 fe->device != alt_devno)
343 continue;
344
345 if (!fe->param.xfer_mask)
346 continue;
347
348 ata_unpack_xfermask(fe->param.xfer_mask,
349 &pio_mask, &mwdma_mask, &udma_mask);
350 if (udma_mask)
351 dev->udma_mask = udma_mask;
352 else if (mwdma_mask) {
353 dev->udma_mask = 0;
354 dev->mwdma_mask = mwdma_mask;
355 } else {
356 dev->udma_mask = 0;
357 dev->mwdma_mask = 0;
358 dev->pio_mask = pio_mask;
359 }
360
361 ata_dev_printk(dev, KERN_NOTICE,
362 "FORCE: xfer_mask set to %s\n", fe->param.name);
363 return;
364 }
365}
366
367
368
369
370
371
372
373
374
375
376
377
378static void ata_force_horkage(struct ata_device *dev)
379{
380 int devno = dev->link->pmp + dev->devno;
381 int alt_devno = devno;
382 int i;
383
384
385 if (ata_is_host_link(dev->link))
386 alt_devno += 15;
387
388 for (i = 0; i < ata_force_tbl_size; i++) {
389 const struct ata_force_ent *fe = &ata_force_tbl[i];
390
391 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
392 continue;
393
394 if (fe->device != -1 && fe->device != devno &&
395 fe->device != alt_devno)
396 continue;
397
398 if (!(~dev->horkage & fe->param.horkage_on) &&
399 !(dev->horkage & fe->param.horkage_off))
400 continue;
401
402 dev->horkage |= fe->param.horkage_on;
403 dev->horkage &= ~fe->param.horkage_off;
404
405 ata_dev_printk(dev, KERN_NOTICE,
406 "FORCE: horkage modified (%s)\n", fe->param.name);
407 }
408}
409
410
411
412
413
414
415
416
417
418
419
420
421
422int atapi_cmd_type(u8 opcode)
423{
424 switch (opcode) {
425 case GPCMD_READ_10:
426 case GPCMD_READ_12:
427 return ATAPI_READ;
428
429 case GPCMD_WRITE_10:
430 case GPCMD_WRITE_12:
431 case GPCMD_WRITE_AND_VERIFY_10:
432 return ATAPI_WRITE;
433
434 case GPCMD_READ_CD:
435 case GPCMD_READ_CD_MSF:
436 return ATAPI_READ_CD;
437
438 case ATA_16:
439 case ATA_12:
440 if (atapi_passthru16)
441 return ATAPI_PASS_THRU;
442
443 default:
444 return ATAPI_MISC;
445 }
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
462{
463 fis[0] = 0x27;
464 fis[1] = pmp & 0xf;
465 if (is_cmd)
466 fis[1] |= (1 << 7);
467
468 fis[2] = tf->command;
469 fis[3] = tf->feature;
470
471 fis[4] = tf->lbal;
472 fis[5] = tf->lbam;
473 fis[6] = tf->lbah;
474 fis[7] = tf->device;
475
476 fis[8] = tf->hob_lbal;
477 fis[9] = tf->hob_lbam;
478 fis[10] = tf->hob_lbah;
479 fis[11] = tf->hob_feature;
480
481 fis[12] = tf->nsect;
482 fis[13] = tf->hob_nsect;
483 fis[14] = 0;
484 fis[15] = tf->ctl;
485
486 fis[16] = 0;
487 fis[17] = 0;
488 fis[18] = 0;
489 fis[19] = 0;
490}
491
492
493
494
495
496
497
498
499
500
501
502
503void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
504{
505 tf->command = fis[2];
506 tf->feature = fis[3];
507
508 tf->lbal = fis[4];
509 tf->lbam = fis[5];
510 tf->lbah = fis[6];
511 tf->device = fis[7];
512
513 tf->hob_lbal = fis[8];
514 tf->hob_lbam = fis[9];
515 tf->hob_lbah = fis[10];
516
517 tf->nsect = fis[12];
518 tf->hob_nsect = fis[13];
519}
520
521static const u8 ata_rw_cmds[] = {
522
523 ATA_CMD_READ_MULTI,
524 ATA_CMD_WRITE_MULTI,
525 ATA_CMD_READ_MULTI_EXT,
526 ATA_CMD_WRITE_MULTI_EXT,
527 0,
528 0,
529 0,
530 ATA_CMD_WRITE_MULTI_FUA_EXT,
531
532 ATA_CMD_PIO_READ,
533 ATA_CMD_PIO_WRITE,
534 ATA_CMD_PIO_READ_EXT,
535 ATA_CMD_PIO_WRITE_EXT,
536 0,
537 0,
538 0,
539 0,
540
541 ATA_CMD_READ,
542 ATA_CMD_WRITE,
543 ATA_CMD_READ_EXT,
544 ATA_CMD_WRITE_EXT,
545 0,
546 0,
547 0,
548 ATA_CMD_WRITE_FUA_EXT
549};
550
551
552
553
554
555
556
557
558
559
560
561
562static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
563{
564 u8 cmd;
565
566 int index, fua, lba48, write;
567
568 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
569 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
570 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
571
572 if (dev->flags & ATA_DFLAG_PIO) {
573 tf->protocol = ATA_PROT_PIO;
574 index = dev->multi_count ? 0 : 8;
575 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
576
577 tf->protocol = ATA_PROT_PIO;
578 index = dev->multi_count ? 0 : 8;
579 } else {
580 tf->protocol = ATA_PROT_DMA;
581 index = 16;
582 }
583
584 cmd = ata_rw_cmds[index + fua + lba48 + write];
585 if (cmd) {
586 tf->command = cmd;
587 return 0;
588 }
589 return -1;
590}
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
608{
609 u64 block = 0;
610
611 if (tf->flags & ATA_TFLAG_LBA) {
612 if (tf->flags & ATA_TFLAG_LBA48) {
613 block |= (u64)tf->hob_lbah << 40;
614 block |= (u64)tf->hob_lbam << 32;
615 block |= (u64)tf->hob_lbal << 24;
616 } else
617 block |= (tf->device & 0xf) << 24;
618
619 block |= tf->lbah << 16;
620 block |= tf->lbam << 8;
621 block |= tf->lbal;
622 } else {
623 u32 cyl, head, sect;
624
625 cyl = tf->lbam | (tf->lbah << 8);
626 head = tf->device & 0xf;
627 sect = tf->lbal;
628
629 block = (cyl * dev->heads + head) * dev->sectors + sect;
630 }
631
632 return block;
633}
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
656 u64 block, u32 n_block, unsigned int tf_flags,
657 unsigned int tag)
658{
659 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
660 tf->flags |= tf_flags;
661
662 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
663
664 if (!lba_48_ok(block, n_block))
665 return -ERANGE;
666
667 tf->protocol = ATA_PROT_NCQ;
668 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
669
670 if (tf->flags & ATA_TFLAG_WRITE)
671 tf->command = ATA_CMD_FPDMA_WRITE;
672 else
673 tf->command = ATA_CMD_FPDMA_READ;
674
675 tf->nsect = tag << 3;
676 tf->hob_feature = (n_block >> 8) & 0xff;
677 tf->feature = n_block & 0xff;
678
679 tf->hob_lbah = (block >> 40) & 0xff;
680 tf->hob_lbam = (block >> 32) & 0xff;
681 tf->hob_lbal = (block >> 24) & 0xff;
682 tf->lbah = (block >> 16) & 0xff;
683 tf->lbam = (block >> 8) & 0xff;
684 tf->lbal = block & 0xff;
685
686 tf->device = 1 << 6;
687 if (tf->flags & ATA_TFLAG_FUA)
688 tf->device |= 1 << 7;
689 } else if (dev->flags & ATA_DFLAG_LBA) {
690 tf->flags |= ATA_TFLAG_LBA;
691
692 if (lba_28_ok(block, n_block)) {
693
694 tf->device |= (block >> 24) & 0xf;
695 } else if (lba_48_ok(block, n_block)) {
696 if (!(dev->flags & ATA_DFLAG_LBA48))
697 return -ERANGE;
698
699
700 tf->flags |= ATA_TFLAG_LBA48;
701
702 tf->hob_nsect = (n_block >> 8) & 0xff;
703
704 tf->hob_lbah = (block >> 40) & 0xff;
705 tf->hob_lbam = (block >> 32) & 0xff;
706 tf->hob_lbal = (block >> 24) & 0xff;
707 } else
708
709 return -ERANGE;
710
711 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
712 return -EINVAL;
713
714 tf->nsect = n_block & 0xff;
715
716 tf->lbah = (block >> 16) & 0xff;
717 tf->lbam = (block >> 8) & 0xff;
718 tf->lbal = block & 0xff;
719
720 tf->device |= ATA_LBA;
721 } else {
722
723 u32 sect, head, cyl, track;
724
725
726 if (!lba_28_ok(block, n_block))
727 return -ERANGE;
728
729 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
730 return -EINVAL;
731
732
733 track = (u32)block / dev->sectors;
734 cyl = track / dev->heads;
735 head = track % dev->heads;
736 sect = (u32)block % dev->sectors + 1;
737
738 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
739 (u32)block, track, cyl, head, sect);
740
741
742
743
744
745 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
746 return -ERANGE;
747
748 tf->nsect = n_block & 0xff;
749 tf->lbal = sect;
750 tf->lbam = cyl;
751 tf->lbah = cyl >> 8;
752 tf->device |= head;
753 }
754
755 return 0;
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773unsigned long ata_pack_xfermask(unsigned long pio_mask,
774 unsigned long mwdma_mask,
775 unsigned long udma_mask)
776{
777 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
778 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
779 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
780}
781
782
783
784
785
786
787
788
789
790
791
792void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
793 unsigned long *mwdma_mask, unsigned long *udma_mask)
794{
795 if (pio_mask)
796 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
797 if (mwdma_mask)
798 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
799 if (udma_mask)
800 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
801}
802
803static const struct ata_xfer_ent {
804 int shift, bits;
805 u8 base;
806} ata_xfer_tbl[] = {
807 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
808 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
809 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
810 { -1, },
811};
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826u8 ata_xfer_mask2mode(unsigned long xfer_mask)
827{
828 int highbit = fls(xfer_mask) - 1;
829 const struct ata_xfer_ent *ent;
830
831 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
832 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
833 return ent->base + highbit - ent->shift;
834 return 0xff;
835}
836
837
838
839
840
841
842
843
844
845
846
847
848
849unsigned long ata_xfer_mode2mask(u8 xfer_mode)
850{
851 const struct ata_xfer_ent *ent;
852
853 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
854 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
855 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
856 & ~((1 << ent->shift) - 1);
857 return 0;
858}
859
860
861
862
863
864
865
866
867
868
869
870
871
872int ata_xfer_mode2shift(unsigned long xfer_mode)
873{
874 const struct ata_xfer_ent *ent;
875
876 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
877 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
878 return ent->shift;
879 return -1;
880}
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896const char *ata_mode_string(unsigned long xfer_mask)
897{
898 static const char * const xfer_mode_str[] = {
899 "PIO0",
900 "PIO1",
901 "PIO2",
902 "PIO3",
903 "PIO4",
904 "PIO5",
905 "PIO6",
906 "MWDMA0",
907 "MWDMA1",
908 "MWDMA2",
909 "MWDMA3",
910 "MWDMA4",
911 "UDMA/16",
912 "UDMA/25",
913 "UDMA/33",
914 "UDMA/44",
915 "UDMA/66",
916 "UDMA/100",
917 "UDMA/133",
918 "UDMA7",
919 };
920 int highbit;
921
922 highbit = fls(xfer_mask) - 1;
923 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
924 return xfer_mode_str[highbit];
925 return "<n/a>";
926}
927
928static const char *sata_spd_string(unsigned int spd)
929{
930 static const char * const spd_str[] = {
931 "1.5 Gbps",
932 "3.0 Gbps",
933 };
934
935 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
936 return "<unknown>";
937 return spd_str[spd - 1];
938}
939
940void ata_dev_disable(struct ata_device *dev)
941{
942 if (ata_dev_enabled(dev)) {
943 if (ata_msg_drv(dev->link->ap))
944 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
945 ata_acpi_on_disable(dev);
946 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
947 ATA_DNXFER_QUIET);
948 dev->class++;
949 }
950}
951
952static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
953{
954 struct ata_link *link = dev->link;
955 struct ata_port *ap = link->ap;
956 u32 scontrol;
957 unsigned int err_mask;
958 int rc;
959
960
961
962
963
964
965
966
967
968 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
969 ap->pm_policy = NOT_AVAILABLE;
970 return -EINVAL;
971 }
972
973
974
975
976
977
978
979
980
981
982
983 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
984 if (rc)
985 return rc;
986
987 switch (policy) {
988 case MIN_POWER:
989
990 scontrol &= ~(0x3 << 8);
991 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
992 if (rc)
993 return rc;
994
995
996 if (dev->flags & ATA_DFLAG_DIPM)
997 err_mask = ata_dev_set_feature(dev,
998 SETFEATURES_SATA_ENABLE, SATA_DIPM);
999 break;
1000 case MEDIUM_POWER:
1001
1002 scontrol &= ~(0x1 << 8);
1003 scontrol |= (0x2 << 8);
1004 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1005 if (rc)
1006 return rc;
1007
1008
1009
1010
1011
1012
1013 break;
1014 case NOT_AVAILABLE:
1015 case MAX_PERFORMANCE:
1016
1017 scontrol |= (0x3 << 8);
1018 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
1019 if (rc)
1020 return rc;
1021
1022
1023
1024
1025
1026
1027 break;
1028 }
1029
1030
1031 (void) err_mask;
1032
1033 return 0;
1034}
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1050{
1051 int rc = 0;
1052 struct ata_port *ap = dev->link->ap;
1053
1054
1055 if (ap->ops->enable_pm)
1056 rc = ap->ops->enable_pm(ap, policy);
1057 if (rc)
1058 goto enable_pm_out;
1059 rc = ata_dev_set_dipm(dev, policy);
1060
1061enable_pm_out:
1062 if (rc)
1063 ap->pm_policy = MAX_PERFORMANCE;
1064 else
1065 ap->pm_policy = policy;
1066 return ;
1067}
1068
1069#ifdef CONFIG_PM
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static void ata_dev_disable_pm(struct ata_device *dev)
1083{
1084 struct ata_port *ap = dev->link->ap;
1085
1086 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1087 if (ap->ops->disable_pm)
1088 ap->ops->disable_pm(ap);
1089}
1090#endif
1091
1092void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1093{
1094 ap->pm_policy = policy;
1095 ap->link.eh_info.action |= ATA_EH_LPM;
1096 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1097 ata_port_schedule_eh(ap);
1098}
1099
1100#ifdef CONFIG_PM
1101static void ata_lpm_enable(struct ata_host *host)
1102{
1103 struct ata_link *link;
1104 struct ata_port *ap;
1105 struct ata_device *dev;
1106 int i;
1107
1108 for (i = 0; i < host->n_ports; i++) {
1109 ap = host->ports[i];
1110 ata_port_for_each_link(link, ap) {
1111 ata_link_for_each_dev(dev, link)
1112 ata_dev_disable_pm(dev);
1113 }
1114 }
1115}
1116
1117static void ata_lpm_disable(struct ata_host *host)
1118{
1119 int i;
1120
1121 for (i = 0; i < host->n_ports; i++) {
1122 struct ata_port *ap = host->ports[i];
1123 ata_lpm_schedule(ap, ap->pm_policy);
1124 }
1125}
1126#endif
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1144{
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1164 DPRINTK("found ATA device by sig\n");
1165 return ATA_DEV_ATA;
1166 }
1167
1168 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1169 DPRINTK("found ATAPI device by sig\n");
1170 return ATA_DEV_ATAPI;
1171 }
1172
1173 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1174 DPRINTK("found PMP device by sig\n");
1175 return ATA_DEV_PMP;
1176 }
1177
1178 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1179 printk(KERN_INFO "ata: SEMB device ignored\n");
1180 return ATA_DEV_SEMB_UNSUP;
1181 }
1182
1183 DPRINTK("unknown device\n");
1184 return ATA_DEV_UNKNOWN;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202void ata_id_string(const u16 *id, unsigned char *s,
1203 unsigned int ofs, unsigned int len)
1204{
1205 unsigned int c;
1206
1207 BUG_ON(len & 1);
1208
1209 while (len > 0) {
1210 c = id[ofs] >> 8;
1211 *s = c;
1212 s++;
1213
1214 c = id[ofs] & 0xff;
1215 *s = c;
1216 s++;
1217
1218 ofs++;
1219 len -= 2;
1220 }
1221}
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237void ata_id_c_string(const u16 *id, unsigned char *s,
1238 unsigned int ofs, unsigned int len)
1239{
1240 unsigned char *p;
1241
1242 ata_id_string(id, s, ofs, len - 1);
1243
1244 p = s + strnlen(s, len - 1);
1245 while (p > s && p[-1] == ' ')
1246 p--;
1247 *p = '\0';
1248}
1249
1250static u64 ata_id_n_sectors(const u16 *id)
1251{
1252 if (ata_id_has_lba(id)) {
1253 if (ata_id_has_lba48(id))
1254 return ata_id_u64(id, 100);
1255 else
1256 return ata_id_u32(id, 60);
1257 } else {
1258 if (ata_id_current_chs_valid(id))
1259 return ata_id_u32(id, 57);
1260 else
1261 return id[1] * id[3] * id[6];
1262 }
1263}
1264
1265u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1266{
1267 u64 sectors = 0;
1268
1269 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1270 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1271 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1272 sectors |= (tf->lbah & 0xff) << 16;
1273 sectors |= (tf->lbam & 0xff) << 8;
1274 sectors |= (tf->lbal & 0xff);
1275
1276 return sectors;
1277}
1278
1279u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1280{
1281 u64 sectors = 0;
1282
1283 sectors |= (tf->device & 0x0f) << 24;
1284 sectors |= (tf->lbah & 0xff) << 16;
1285 sectors |= (tf->lbam & 0xff) << 8;
1286 sectors |= (tf->lbal & 0xff);
1287
1288 return sectors;
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1304{
1305 unsigned int err_mask;
1306 struct ata_taskfile tf;
1307 int lba48 = ata_id_has_lba48(dev->id);
1308
1309 ata_tf_init(dev, &tf);
1310
1311
1312 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1313
1314 if (lba48) {
1315 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1316 tf.flags |= ATA_TFLAG_LBA48;
1317 } else
1318 tf.command = ATA_CMD_READ_NATIVE_MAX;
1319
1320 tf.protocol |= ATA_PROT_NODATA;
1321 tf.device |= ATA_LBA;
1322
1323 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1324 if (err_mask) {
1325 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1326 "max address (err_mask=0x%x)\n", err_mask);
1327 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1328 return -EACCES;
1329 return -EIO;
1330 }
1331
1332 if (lba48)
1333 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1334 else
1335 *max_sectors = ata_tf_to_lba(&tf) + 1;
1336 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1337 (*max_sectors)--;
1338 return 0;
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1354{
1355 unsigned int err_mask;
1356 struct ata_taskfile tf;
1357 int lba48 = ata_id_has_lba48(dev->id);
1358
1359 new_sectors--;
1360
1361 ata_tf_init(dev, &tf);
1362
1363 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1364
1365 if (lba48) {
1366 tf.command = ATA_CMD_SET_MAX_EXT;
1367 tf.flags |= ATA_TFLAG_LBA48;
1368
1369 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1370 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1371 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1372 } else {
1373 tf.command = ATA_CMD_SET_MAX;
1374
1375 tf.device |= (new_sectors >> 24) & 0xf;
1376 }
1377
1378 tf.protocol |= ATA_PROT_NODATA;
1379 tf.device |= ATA_LBA;
1380
1381 tf.lbal = (new_sectors >> 0) & 0xff;
1382 tf.lbam = (new_sectors >> 8) & 0xff;
1383 tf.lbah = (new_sectors >> 16) & 0xff;
1384
1385 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1386 if (err_mask) {
1387 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1388 "max address (err_mask=0x%x)\n", err_mask);
1389 if (err_mask == AC_ERR_DEV &&
1390 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1391 return -EACCES;
1392 return -EIO;
1393 }
1394
1395 return 0;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409static int ata_hpa_resize(struct ata_device *dev)
1410{
1411 struct ata_eh_context *ehc = &dev->link->eh_context;
1412 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1413 u64 sectors = ata_id_n_sectors(dev->id);
1414 u64 native_sectors;
1415 int rc;
1416
1417
1418 if (dev->class != ATA_DEV_ATA ||
1419 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1420 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1421 return 0;
1422
1423
1424 rc = ata_read_native_max_address(dev, &native_sectors);
1425 if (rc) {
1426
1427
1428
1429 if (rc == -EACCES || !ata_ignore_hpa) {
1430 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1431 "broken, skipping HPA handling\n");
1432 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1433
1434
1435 if (rc == -EACCES)
1436 rc = 0;
1437 }
1438
1439 return rc;
1440 }
1441
1442
1443 if (native_sectors <= sectors || !ata_ignore_hpa) {
1444 if (!print_info || native_sectors == sectors)
1445 return 0;
1446
1447 if (native_sectors > sectors)
1448 ata_dev_printk(dev, KERN_INFO,
1449 "HPA detected: current %llu, native %llu\n",
1450 (unsigned long long)sectors,
1451 (unsigned long long)native_sectors);
1452 else if (native_sectors < sectors)
1453 ata_dev_printk(dev, KERN_WARNING,
1454 "native sectors (%llu) is smaller than "
1455 "sectors (%llu)\n",
1456 (unsigned long long)native_sectors,
1457 (unsigned long long)sectors);
1458 return 0;
1459 }
1460
1461
1462 rc = ata_set_max_sectors(dev, native_sectors);
1463 if (rc == -EACCES) {
1464
1465 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1466 "(%llu -> %llu), skipping HPA handling\n",
1467 (unsigned long long)sectors,
1468 (unsigned long long)native_sectors);
1469 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1470 return 0;
1471 } else if (rc)
1472 return rc;
1473
1474
1475 rc = ata_dev_reread_id(dev, 0);
1476 if (rc) {
1477 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1478 "data after HPA resizing\n");
1479 return rc;
1480 }
1481
1482 if (print_info) {
1483 u64 new_sectors = ata_id_n_sectors(dev->id);
1484 ata_dev_printk(dev, KERN_INFO,
1485 "HPA unlocked: %llu -> %llu, native %llu\n",
1486 (unsigned long long)sectors,
1487 (unsigned long long)new_sectors,
1488 (unsigned long long)native_sectors);
1489 }
1490
1491 return 0;
1492}
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505static inline void ata_dump_id(const u16 *id)
1506{
1507 DPRINTK("49==0x%04x "
1508 "53==0x%04x "
1509 "63==0x%04x "
1510 "64==0x%04x "
1511 "75==0x%04x \n",
1512 id[49],
1513 id[53],
1514 id[63],
1515 id[64],
1516 id[75]);
1517 DPRINTK("80==0x%04x "
1518 "81==0x%04x "
1519 "82==0x%04x "
1520 "83==0x%04x "
1521 "84==0x%04x \n",
1522 id[80],
1523 id[81],
1524 id[82],
1525 id[83],
1526 id[84]);
1527 DPRINTK("88==0x%04x "
1528 "93==0x%04x\n",
1529 id[88],
1530 id[93]);
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548unsigned long ata_id_xfermask(const u16 *id)
1549{
1550 unsigned long pio_mask, mwdma_mask, udma_mask;
1551
1552
1553 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1554 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1555 pio_mask <<= 3;
1556 pio_mask |= 0x7;
1557 } else {
1558
1559
1560
1561
1562 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1563 if (mode < 5)
1564 pio_mask = (2 << mode) - 1;
1565 else
1566 pio_mask = 1;
1567
1568
1569
1570
1571
1572
1573
1574 }
1575
1576 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1577
1578 if (ata_id_is_cfa(id)) {
1579
1580
1581
1582 int pio = id[163] & 0x7;
1583 int dma = (id[163] >> 3) & 7;
1584
1585 if (pio)
1586 pio_mask |= (1 << 5);
1587 if (pio > 1)
1588 pio_mask |= (1 << 6);
1589 if (dma)
1590 mwdma_mask |= (1 << 3);
1591 if (dma > 1)
1592 mwdma_mask |= (1 << 4);
1593 }
1594
1595 udma_mask = 0;
1596 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1597 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1598
1599 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
1621{
1622 ap->port_task_data = data;
1623
1624
1625 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638void ata_port_flush_task(struct ata_port *ap)
1639{
1640 DPRINTK("ENTER\n");
1641
1642 cancel_rearming_delayed_work(&ap->port_task);
1643
1644 if (ata_msg_ctl(ap))
1645 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1646}
1647
1648static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1649{
1650 struct completion *waiting = qc->private_data;
1651
1652 complete(waiting);
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677unsigned ata_exec_internal_sg(struct ata_device *dev,
1678 struct ata_taskfile *tf, const u8 *cdb,
1679 int dma_dir, struct scatterlist *sgl,
1680 unsigned int n_elem, unsigned long timeout)
1681{
1682 struct ata_link *link = dev->link;
1683 struct ata_port *ap = link->ap;
1684 u8 command = tf->command;
1685 int auto_timeout = 0;
1686 struct ata_queued_cmd *qc;
1687 unsigned int tag, preempted_tag;
1688 u32 preempted_sactive, preempted_qc_active;
1689 int preempted_nr_active_links;
1690 DECLARE_COMPLETION_ONSTACK(wait);
1691 unsigned long flags;
1692 unsigned int err_mask;
1693 int rc;
1694
1695 spin_lock_irqsave(ap->lock, flags);
1696
1697
1698 if (ap->pflags & ATA_PFLAG_FROZEN) {
1699 spin_unlock_irqrestore(ap->lock, flags);
1700 return AC_ERR_SYSTEM;
1701 }
1702
1703
1704
1705
1706
1707
1708
1709
1710 if (ap->ops->error_handler)
1711 tag = ATA_TAG_INTERNAL;
1712 else
1713 tag = 0;
1714
1715 if (test_and_set_bit(tag, &ap->qc_allocated))
1716 BUG();
1717 qc = __ata_qc_from_tag(ap, tag);
1718
1719 qc->tag = tag;
1720 qc->scsicmd = NULL;
1721 qc->ap = ap;
1722 qc->dev = dev;
1723 ata_qc_reinit(qc);
1724
1725 preempted_tag = link->active_tag;
1726 preempted_sactive = link->sactive;
1727 preempted_qc_active = ap->qc_active;
1728 preempted_nr_active_links = ap->nr_active_links;
1729 link->active_tag = ATA_TAG_POISON;
1730 link->sactive = 0;
1731 ap->qc_active = 0;
1732 ap->nr_active_links = 0;
1733
1734
1735 qc->tf = *tf;
1736 if (cdb)
1737 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1738 qc->flags |= ATA_QCFLAG_RESULT_TF;
1739 qc->dma_dir = dma_dir;
1740 if (dma_dir != DMA_NONE) {
1741 unsigned int i, buflen = 0;
1742 struct scatterlist *sg;
1743
1744 for_each_sg(sgl, sg, n_elem, i)
1745 buflen += sg->length;
1746
1747 ata_sg_init(qc, sgl, n_elem);
1748 qc->nbytes = buflen;
1749 }
1750
1751 qc->private_data = &wait;
1752 qc->complete_fn = ata_qc_complete_internal;
1753
1754 ata_qc_issue(qc);
1755
1756 spin_unlock_irqrestore(ap->lock, flags);
1757
1758 if (!timeout) {
1759 if (ata_probe_timeout)
1760 timeout = ata_probe_timeout * 1000;
1761 else {
1762 timeout = ata_internal_cmd_timeout(dev, command);
1763 auto_timeout = 1;
1764 }
1765 }
1766
1767 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1768
1769 ata_port_flush_task(ap);
1770
1771 if (!rc) {
1772 spin_lock_irqsave(ap->lock, flags);
1773
1774
1775
1776
1777
1778
1779 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1780 qc->err_mask |= AC_ERR_TIMEOUT;
1781
1782 if (ap->ops->error_handler)
1783 ata_port_freeze(ap);
1784 else
1785 ata_qc_complete(qc);
1786
1787 if (ata_msg_warn(ap))
1788 ata_dev_printk(dev, KERN_WARNING,
1789 "qc timeout (cmd 0x%x)\n", command);
1790 }
1791
1792 spin_unlock_irqrestore(ap->lock, flags);
1793 }
1794
1795
1796 if (ap->ops->post_internal_cmd)
1797 ap->ops->post_internal_cmd(qc);
1798
1799
1800 if (qc->flags & ATA_QCFLAG_FAILED) {
1801 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1802 qc->err_mask |= AC_ERR_DEV;
1803
1804 if (!qc->err_mask)
1805 qc->err_mask |= AC_ERR_OTHER;
1806
1807 if (qc->err_mask & ~AC_ERR_OTHER)
1808 qc->err_mask &= ~AC_ERR_OTHER;
1809 }
1810
1811
1812 spin_lock_irqsave(ap->lock, flags);
1813
1814 *tf = qc->result_tf;
1815 err_mask = qc->err_mask;
1816
1817 ata_qc_free(qc);
1818 link->active_tag = preempted_tag;
1819 link->sactive = preempted_sactive;
1820 ap->qc_active = preempted_qc_active;
1821 ap->nr_active_links = preempted_nr_active_links;
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834 if (ap->flags & ATA_FLAG_DISABLED) {
1835 err_mask |= AC_ERR_SYSTEM;
1836 ata_port_probe(ap);
1837 }
1838
1839 spin_unlock_irqrestore(ap->lock, flags);
1840
1841 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1842 ata_internal_cmd_timed_out(dev, command);
1843
1844 return err_mask;
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866unsigned ata_exec_internal(struct ata_device *dev,
1867 struct ata_taskfile *tf, const u8 *cdb,
1868 int dma_dir, void *buf, unsigned int buflen,
1869 unsigned long timeout)
1870{
1871 struct scatterlist *psg = NULL, sg;
1872 unsigned int n_elem = 0;
1873
1874 if (dma_dir != DMA_NONE) {
1875 WARN_ON(!buf);
1876 sg_init_one(&sg, buf, buflen);
1877 psg = &sg;
1878 n_elem++;
1879 }
1880
1881 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1882 timeout);
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1900{
1901 struct ata_taskfile tf;
1902
1903 ata_tf_init(dev, &tf);
1904
1905 tf.command = cmd;
1906 tf.flags |= ATA_TFLAG_DEVICE;
1907 tf.protocol = ATA_PROT_NODATA;
1908
1909 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1910}
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1921{
1922
1923
1924 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1925 return 0;
1926
1927 if (adev->pio_mode > XFER_PIO_2)
1928 return 1;
1929
1930 if (ata_id_has_iordy(adev->id))
1931 return 1;
1932 return 0;
1933}
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1944{
1945
1946 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
1947 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1948
1949 if (pio) {
1950
1951 if (pio > 240)
1952 return 3 << ATA_SHIFT_PIO;
1953 return 7 << ATA_SHIFT_PIO;
1954 }
1955 }
1956 return 3 << ATA_SHIFT_PIO;
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969unsigned int ata_do_dev_read_id(struct ata_device *dev,
1970 struct ata_taskfile *tf, u16 *id)
1971{
1972 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1973 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1974}
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1998 unsigned int flags, u16 *id)
1999{
2000 struct ata_port *ap = dev->link->ap;
2001 unsigned int class = *p_class;
2002 struct ata_taskfile tf;
2003 unsigned int err_mask = 0;
2004 const char *reason;
2005 int may_fallback = 1, tried_spinup = 0;
2006 int rc;
2007
2008 if (ata_msg_ctl(ap))
2009 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2010
2011retry:
2012 ata_tf_init(dev, &tf);
2013
2014 switch (class) {
2015 case ATA_DEV_ATA:
2016 tf.command = ATA_CMD_ID_ATA;
2017 break;
2018 case ATA_DEV_ATAPI:
2019 tf.command = ATA_CMD_ID_ATAPI;
2020 break;
2021 default:
2022 rc = -ENODEV;
2023 reason = "unsupported class";
2024 goto err_out;
2025 }
2026
2027 tf.protocol = ATA_PROT_PIO;
2028
2029
2030
2031
2032 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2033
2034
2035
2036
2037 tf.flags |= ATA_TFLAG_POLLING;
2038
2039 if (ap->ops->read_id)
2040 err_mask = ap->ops->read_id(dev, &tf, id);
2041 else
2042 err_mask = ata_do_dev_read_id(dev, &tf, id);
2043
2044 if (err_mask) {
2045 if (err_mask & AC_ERR_NODEV_HINT) {
2046 ata_dev_printk(dev, KERN_DEBUG,
2047 "NODEV after polling detection\n");
2048 return -ENOENT;
2049 }
2050
2051 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2052
2053
2054
2055
2056
2057 if (may_fallback) {
2058 may_fallback = 0;
2059
2060 if (class == ATA_DEV_ATA)
2061 class = ATA_DEV_ATAPI;
2062 else
2063 class = ATA_DEV_ATA;
2064 goto retry;
2065 }
2066
2067
2068
2069
2070
2071 ata_dev_printk(dev, KERN_DEBUG,
2072 "both IDENTIFYs aborted, assuming NODEV\n");
2073 return -ENOENT;
2074 }
2075
2076 rc = -EIO;
2077 reason = "I/O error";
2078 goto err_out;
2079 }
2080
2081
2082
2083
2084 may_fallback = 0;
2085
2086 swap_buf_le16(id, ATA_ID_WORDS);
2087
2088
2089 rc = -EINVAL;
2090 reason = "device reports invalid type";
2091
2092 if (class == ATA_DEV_ATA) {
2093 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2094 goto err_out;
2095 } else {
2096 if (ata_id_is_ata(id))
2097 goto err_out;
2098 }
2099
2100 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2101 tried_spinup = 1;
2102
2103
2104
2105
2106
2107 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2108 if (err_mask && id[2] != 0x738c) {
2109 rc = -EIO;
2110 reason = "SPINUP failed";
2111 goto err_out;
2112 }
2113
2114
2115
2116
2117 if (id[2] == 0x37c8)
2118 goto retry;
2119 }
2120
2121 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2134 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2135 if (err_mask) {
2136 rc = -EIO;
2137 reason = "INIT_DEV_PARAMS failed";
2138 goto err_out;
2139 }
2140
2141
2142
2143
2144 flags &= ~ATA_READID_POSTRESET;
2145 goto retry;
2146 }
2147 }
2148
2149 *p_class = class;
2150
2151 return 0;
2152
2153 err_out:
2154 if (ata_msg_warn(ap))
2155 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2156 "(%s, err_mask=0x%x)\n", reason, err_mask);
2157 return rc;
2158}
2159
2160static inline u8 ata_dev_knobble(struct ata_device *dev)
2161{
2162 struct ata_port *ap = dev->link->ap;
2163
2164 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2165 return 0;
2166
2167 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2168}
2169
2170static void ata_dev_config_ncq(struct ata_device *dev,
2171 char *desc, size_t desc_sz)
2172{
2173 struct ata_port *ap = dev->link->ap;
2174 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2175
2176 if (!ata_id_has_ncq(dev->id)) {
2177 desc[0] = '\0';
2178 return;
2179 }
2180 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2181 snprintf(desc, desc_sz, "NCQ (not used)");
2182 return;
2183 }
2184 if (ap->flags & ATA_FLAG_NCQ) {
2185 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2186 dev->flags |= ATA_DFLAG_NCQ;
2187 }
2188
2189 if (hdepth >= ddepth)
2190 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2191 else
2192 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208int ata_dev_configure(struct ata_device *dev)
2209{
2210 struct ata_port *ap = dev->link->ap;
2211 struct ata_eh_context *ehc = &dev->link->eh_context;
2212 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2213 const u16 *id = dev->id;
2214 unsigned long xfer_mask;
2215 char revbuf[7];
2216 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2217 char modelbuf[ATA_ID_PROD_LEN+1];
2218 int rc;
2219
2220 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2221 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2222 __func__);
2223 return 0;
2224 }
2225
2226 if (ata_msg_probe(ap))
2227 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2228
2229
2230 dev->horkage |= ata_dev_blacklisted(dev);
2231 ata_force_horkage(dev);
2232
2233 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2234 ata_dev_printk(dev, KERN_INFO,
2235 "unsupported device, disabling\n");
2236 ata_dev_disable(dev);
2237 return 0;
2238 }
2239
2240 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2241 dev->class == ATA_DEV_ATAPI) {
2242 ata_dev_printk(dev, KERN_WARNING,
2243 "WARNING: ATAPI is %s, device ignored.\n",
2244 atapi_enabled ? "not supported with this driver"
2245 : "disabled");
2246 ata_dev_disable(dev);
2247 return 0;
2248 }
2249
2250
2251 rc = ata_acpi_on_devcfg(dev);
2252 if (rc)
2253 return rc;
2254
2255
2256 rc = ata_hpa_resize(dev);
2257 if (rc)
2258 return rc;
2259
2260
2261 if (ata_msg_probe(ap))
2262 ata_dev_printk(dev, KERN_DEBUG,
2263 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2264 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2265 __func__,
2266 id[49], id[82], id[83], id[84],
2267 id[85], id[86], id[87], id[88]);
2268
2269
2270 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2271 dev->max_sectors = 0;
2272 dev->cdb_len = 0;
2273 dev->n_sectors = 0;
2274 dev->cylinders = 0;
2275 dev->heads = 0;
2276 dev->sectors = 0;
2277
2278
2279
2280
2281
2282
2283 xfer_mask = ata_id_xfermask(id);
2284
2285 if (ata_msg_probe(ap))
2286 ata_dump_id(id);
2287
2288
2289 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2290 sizeof(fwrevbuf));
2291
2292 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2293 sizeof(modelbuf));
2294
2295
2296 if (dev->class == ATA_DEV_ATA) {
2297 if (ata_id_is_cfa(id)) {
2298 if (id[162] & 1)
2299 ata_dev_printk(dev, KERN_WARNING,
2300 "supports DRM functions and may "
2301 "not be fully accessable.\n");
2302 snprintf(revbuf, 7, "CFA");
2303 } else {
2304 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2305
2306 if (ata_id_has_tpm(id))
2307 ata_dev_printk(dev, KERN_WARNING,
2308 "supports DRM functions and may "
2309 "not be fully accessable.\n");
2310 }
2311
2312 dev->n_sectors = ata_id_n_sectors(id);
2313
2314 if (dev->id[59] & 0x100)
2315 dev->multi_count = dev->id[59] & 0xff;
2316
2317 if (ata_id_has_lba(id)) {
2318 const char *lba_desc;
2319 char ncq_desc[20];
2320
2321 lba_desc = "LBA";
2322 dev->flags |= ATA_DFLAG_LBA;
2323 if (ata_id_has_lba48(id)) {
2324 dev->flags |= ATA_DFLAG_LBA48;
2325 lba_desc = "LBA48";
2326
2327 if (dev->n_sectors >= (1UL << 28) &&
2328 ata_id_has_flush_ext(id))
2329 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2330 }
2331
2332
2333 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2334
2335
2336 if (ata_msg_drv(ap) && print_info) {
2337 ata_dev_printk(dev, KERN_INFO,
2338 "%s: %s, %s, max %s\n",
2339 revbuf, modelbuf, fwrevbuf,
2340 ata_mode_string(xfer_mask));
2341 ata_dev_printk(dev, KERN_INFO,
2342 "%Lu sectors, multi %u: %s %s\n",
2343 (unsigned long long)dev->n_sectors,
2344 dev->multi_count, lba_desc, ncq_desc);
2345 }
2346 } else {
2347
2348
2349
2350 dev->cylinders = id[1];
2351 dev->heads = id[3];
2352 dev->sectors = id[6];
2353
2354 if (ata_id_current_chs_valid(id)) {
2355
2356 dev->cylinders = id[54];
2357 dev->heads = id[55];
2358 dev->sectors = id[56];
2359 }
2360
2361
2362 if (ata_msg_drv(ap) && print_info) {
2363 ata_dev_printk(dev, KERN_INFO,
2364 "%s: %s, %s, max %s\n",
2365 revbuf, modelbuf, fwrevbuf,
2366 ata_mode_string(xfer_mask));
2367 ata_dev_printk(dev, KERN_INFO,
2368 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2369 (unsigned long long)dev->n_sectors,
2370 dev->multi_count, dev->cylinders,
2371 dev->heads, dev->sectors);
2372 }
2373 }
2374
2375 dev->cdb_len = 16;
2376 }
2377
2378
2379 else if (dev->class == ATA_DEV_ATAPI) {
2380 const char *cdb_intr_string = "";
2381 const char *atapi_an_string = "";
2382 const char *dma_dir_string = "";
2383 u32 sntf;
2384
2385 rc = atapi_cdb_len(id);
2386 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2387 if (ata_msg_warn(ap))
2388 ata_dev_printk(dev, KERN_WARNING,
2389 "unsupported CDB len\n");
2390 rc = -EINVAL;
2391 goto err_out_nosup;
2392 }
2393 dev->cdb_len = (unsigned int) rc;
2394
2395
2396
2397
2398
2399
2400 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2401 (!sata_pmp_attached(ap) ||
2402 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2403 unsigned int err_mask;
2404
2405
2406 err_mask = ata_dev_set_feature(dev,
2407 SETFEATURES_SATA_ENABLE, SATA_AN);
2408 if (err_mask)
2409 ata_dev_printk(dev, KERN_ERR,
2410 "failed to enable ATAPI AN "
2411 "(err_mask=0x%x)\n", err_mask);
2412 else {
2413 dev->flags |= ATA_DFLAG_AN;
2414 atapi_an_string = ", ATAPI AN";
2415 }
2416 }
2417
2418 if (ata_id_cdb_intr(dev->id)) {
2419 dev->flags |= ATA_DFLAG_CDB_INTR;
2420 cdb_intr_string = ", CDB intr";
2421 }
2422
2423 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2424 dev->flags |= ATA_DFLAG_DMADIR;
2425 dma_dir_string = ", DMADIR";
2426 }
2427
2428
2429 if (ata_msg_drv(ap) && print_info)
2430 ata_dev_printk(dev, KERN_INFO,
2431 "ATAPI: %s, %s, max %s%s%s%s\n",
2432 modelbuf, fwrevbuf,
2433 ata_mode_string(xfer_mask),
2434 cdb_intr_string, atapi_an_string,
2435 dma_dir_string);
2436 }
2437
2438
2439 dev->max_sectors = ATA_MAX_SECTORS;
2440 if (dev->flags & ATA_DFLAG_LBA48)
2441 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2442
2443 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2444 if (ata_id_has_hipm(dev->id))
2445 dev->flags |= ATA_DFLAG_HIPM;
2446 if (ata_id_has_dipm(dev->id))
2447 dev->flags |= ATA_DFLAG_DIPM;
2448 }
2449
2450
2451
2452 if (ata_dev_knobble(dev)) {
2453 if (ata_msg_drv(ap) && print_info)
2454 ata_dev_printk(dev, KERN_INFO,
2455 "applying bridge limits\n");
2456 dev->udma_mask &= ATA_UDMA5;
2457 dev->max_sectors = ATA_MAX_SECTORS;
2458 }
2459
2460 if ((dev->class == ATA_DEV_ATAPI) &&
2461 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2462 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2463 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2464 }
2465
2466 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2467 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2468 dev->max_sectors);
2469
2470 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2471 dev->horkage |= ATA_HORKAGE_IPM;
2472
2473
2474 ap->pm_policy = MAX_PERFORMANCE;
2475 }
2476
2477 if (ap->ops->dev_config)
2478 ap->ops->dev_config(dev);
2479
2480 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2481
2482
2483
2484
2485
2486
2487 if (print_info) {
2488 ata_dev_printk(dev, KERN_WARNING,
2489"Drive reports diagnostics failure. This may indicate a drive\n");
2490 ata_dev_printk(dev, KERN_WARNING,
2491"fault or invalid emulation. Contact drive vendor for information.\n");
2492 }
2493 }
2494
2495 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2496 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
2497 "firmware update to be fully functional.\n");
2498 ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
2499 "or visit http://ata.wiki.kernel.org.\n");
2500 }
2501
2502 return 0;
2503
2504err_out_nosup:
2505 if (ata_msg_probe(ap))
2506 ata_dev_printk(dev, KERN_DEBUG,
2507 "%s: EXIT, err\n", __func__);
2508 return rc;
2509}
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519int ata_cable_40wire(struct ata_port *ap)
2520{
2521 return ATA_CBL_PATA40;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532int ata_cable_80wire(struct ata_port *ap)
2533{
2534 return ATA_CBL_PATA80;
2535}
2536
2537
2538
2539
2540
2541
2542
2543
2544int ata_cable_unknown(struct ata_port *ap)
2545{
2546 return ATA_CBL_PATA_UNK;
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556int ata_cable_ignore(struct ata_port *ap)
2557{
2558 return ATA_CBL_PATA_IGN;
2559}
2560
2561
2562
2563
2564
2565
2566
2567
2568int ata_cable_sata(struct ata_port *ap)
2569{
2570 return ATA_CBL_SATA;
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588int ata_bus_probe(struct ata_port *ap)
2589{
2590 unsigned int classes[ATA_MAX_DEVICES];
2591 int tries[ATA_MAX_DEVICES];
2592 int rc;
2593 struct ata_device *dev;
2594
2595 ata_port_probe(ap);
2596
2597 ata_link_for_each_dev(dev, &ap->link)
2598 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2599
2600 retry:
2601 ata_link_for_each_dev(dev, &ap->link) {
2602
2603
2604
2605
2606
2607
2608
2609 dev->pio_mode = XFER_PIO_0;
2610
2611
2612
2613
2614
2615
2616 if (ap->ops->set_piomode)
2617 ap->ops->set_piomode(ap, dev);
2618 }
2619
2620
2621 ap->ops->phy_reset(ap);
2622
2623 ata_link_for_each_dev(dev, &ap->link) {
2624 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2625 dev->class != ATA_DEV_UNKNOWN)
2626 classes[dev->devno] = dev->class;
2627 else
2628 classes[dev->devno] = ATA_DEV_NONE;
2629
2630 dev->class = ATA_DEV_UNKNOWN;
2631 }
2632
2633 ata_port_probe(ap);
2634
2635
2636
2637
2638
2639 ata_link_for_each_dev_reverse(dev, &ap->link) {
2640 if (tries[dev->devno])
2641 dev->class = classes[dev->devno];
2642
2643 if (!ata_dev_enabled(dev))
2644 continue;
2645
2646 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2647 dev->id);
2648 if (rc)
2649 goto fail;
2650 }
2651
2652
2653 if (ap->ops->cable_detect)
2654 ap->cbl = ap->ops->cable_detect(ap);
2655
2656
2657
2658 ata_link_for_each_dev(dev, &ap->link) {
2659 if (!ata_dev_enabled(dev))
2660 continue;
2661
2662
2663 if (ata_id_is_sata(dev->id))
2664 ap->cbl = ATA_CBL_SATA;
2665 }
2666
2667
2668
2669
2670 ata_link_for_each_dev(dev, &ap->link) {
2671 if (!ata_dev_enabled(dev))
2672 continue;
2673
2674 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2675 rc = ata_dev_configure(dev);
2676 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2677 if (rc)
2678 goto fail;
2679 }
2680
2681
2682 rc = ata_set_mode(&ap->link, &dev);
2683 if (rc)
2684 goto fail;
2685
2686 ata_link_for_each_dev(dev, &ap->link)
2687 if (ata_dev_enabled(dev))
2688 return 0;
2689
2690
2691 ata_port_disable(ap);
2692 return -ENODEV;
2693
2694 fail:
2695 tries[dev->devno]--;
2696
2697 switch (rc) {
2698 case -EINVAL:
2699
2700 tries[dev->devno] = 0;
2701 break;
2702
2703 case -ENODEV:
2704
2705 tries[dev->devno] = min(tries[dev->devno], 1);
2706 case -EIO:
2707 if (tries[dev->devno] == 1) {
2708
2709
2710
2711 sata_down_spd_limit(&ap->link);
2712 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2713 }
2714 }
2715
2716 if (!tries[dev->devno])
2717 ata_dev_disable(dev);
2718
2719 goto retry;
2720}
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733void ata_port_probe(struct ata_port *ap)
2734{
2735 ap->flags &= ~ATA_FLAG_DISABLED;
2736}
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747static void sata_print_link_status(struct ata_link *link)
2748{
2749 u32 sstatus, scontrol, tmp;
2750
2751 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2752 return;
2753 sata_scr_read(link, SCR_CONTROL, &scontrol);
2754
2755 if (ata_phys_link_online(link)) {
2756 tmp = (sstatus >> 4) & 0xf;
2757 ata_link_printk(link, KERN_INFO,
2758 "SATA link up %s (SStatus %X SControl %X)\n",
2759 sata_spd_string(tmp), sstatus, scontrol);
2760 } else {
2761 ata_link_printk(link, KERN_INFO,
2762 "SATA link down (SStatus %X SControl %X)\n",
2763 sstatus, scontrol);
2764 }
2765}
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775struct ata_device *ata_dev_pair(struct ata_device *adev)
2776{
2777 struct ata_link *link = adev->link;
2778 struct ata_device *pair = &link->device[1 - adev->devno];
2779 if (!ata_dev_enabled(pair))
2780 return NULL;
2781 return pair;
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797void ata_port_disable(struct ata_port *ap)
2798{
2799 ap->link.device[0].class = ATA_DEV_NONE;
2800 ap->link.device[1].class = ATA_DEV_NONE;
2801 ap->flags |= ATA_FLAG_DISABLED;
2802}
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818int sata_down_spd_limit(struct ata_link *link)
2819{
2820 u32 sstatus, spd, mask;
2821 int rc, highbit;
2822
2823 if (!sata_scr_valid(link))
2824 return -EOPNOTSUPP;
2825
2826
2827
2828
2829 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2830 if (rc == 0)
2831 spd = (sstatus >> 4) & 0xf;
2832 else
2833 spd = link->sata_spd;
2834
2835 mask = link->sata_spd_limit;
2836 if (mask <= 1)
2837 return -EINVAL;
2838
2839
2840 highbit = fls(mask) - 1;
2841 mask &= ~(1 << highbit);
2842
2843
2844
2845
2846 if (spd > 1)
2847 mask &= (1 << (spd - 1)) - 1;
2848 else
2849 mask &= 1;
2850
2851
2852 if (!mask)
2853 return -EINVAL;
2854
2855 link->sata_spd_limit = mask;
2856
2857 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2858 sata_spd_string(fls(mask)));
2859
2860 return 0;
2861}
2862
2863static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2864{
2865 struct ata_link *host_link = &link->ap->link;
2866 u32 limit, target, spd;
2867
2868 limit = link->sata_spd_limit;
2869
2870
2871
2872
2873
2874 if (!ata_is_host_link(link) && host_link->sata_spd)
2875 limit &= (1 << host_link->sata_spd) - 1;
2876
2877 if (limit == UINT_MAX)
2878 target = 0;
2879 else
2880 target = fls(limit);
2881
2882 spd = (*scontrol >> 4) & 0xf;
2883 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2884
2885 return spd != target;
2886}
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903static int sata_set_spd_needed(struct ata_link *link)
2904{
2905 u32 scontrol;
2906
2907 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2908 return 1;
2909
2910 return __sata_set_spd_needed(link, &scontrol);
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926int sata_set_spd(struct ata_link *link)
2927{
2928 u32 scontrol;
2929 int rc;
2930
2931 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2932 return rc;
2933
2934 if (!__sata_set_spd_needed(link, &scontrol))
2935 return 0;
2936
2937 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2938 return rc;
2939
2940 return 1;
2941}
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955static const struct ata_timing ata_timing[] = {
2956
2957 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2958 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2959 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2960 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2961 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2962 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2963 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2964
2965 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2966 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2967 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2968
2969 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2970 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2971 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2972 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2973 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2974
2975
2976 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2977 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2978 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2979 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2980 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2981 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2982 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2983
2984 { 0xFF }
2985};
2986
2987#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2988#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
2989
2990static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2991{
2992 q->setup = EZ(t->setup * 1000, T);
2993 q->act8b = EZ(t->act8b * 1000, T);
2994 q->rec8b = EZ(t->rec8b * 1000, T);
2995 q->cyc8b = EZ(t->cyc8b * 1000, T);
2996 q->active = EZ(t->active * 1000, T);
2997 q->recover = EZ(t->recover * 1000, T);
2998 q->cycle = EZ(t->cycle * 1000, T);
2999 q->udma = EZ(t->udma * 1000, UT);
3000}
3001
3002void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3003 struct ata_timing *m, unsigned int what)
3004{
3005 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3006 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3007 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3008 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3009 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3010 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3011 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3012 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3013}
3014
3015const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3016{
3017 const struct ata_timing *t = ata_timing;
3018
3019 while (xfer_mode > t->mode)
3020 t++;
3021
3022 if (xfer_mode == t->mode)
3023 return t;
3024 return NULL;
3025}
3026
3027int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3028 struct ata_timing *t, int T, int UT)
3029{
3030 const struct ata_timing *s;
3031 struct ata_timing p;
3032
3033
3034
3035
3036
3037 if (!(s = ata_timing_find_mode(speed)))
3038 return -EINVAL;
3039
3040 memcpy(t, s, sizeof(*s));
3041
3042
3043
3044
3045
3046
3047 if (adev->id[ATA_ID_FIELD_VALID] & 2) {
3048 memset(&p, 0, sizeof(p));
3049 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3050 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3051 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3052 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3053 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3054 }
3055 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3056 }
3057
3058
3059
3060
3061
3062 ata_timing_quantize(t, t, T, UT);
3063
3064
3065
3066
3067
3068
3069
3070 if (speed > XFER_PIO_6) {
3071 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3072 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3073 }
3074
3075
3076
3077
3078
3079 if (t->act8b + t->rec8b < t->cyc8b) {
3080 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3081 t->rec8b = t->cyc8b - t->act8b;
3082 }
3083
3084 if (t->active + t->recover < t->cycle) {
3085 t->active += (t->cycle - (t->active + t->recover)) / 2;
3086 t->recover = t->cycle - t->active;
3087 }
3088
3089
3090
3091
3092 if (t->active + t->recover > t->cycle)
3093 t->cycle = t->active + t->recover;
3094
3095 return 0;
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3115{
3116 u8 base_mode = 0xff, last_mode = 0xff;
3117 const struct ata_xfer_ent *ent;
3118 const struct ata_timing *t;
3119
3120 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3121 if (ent->shift == xfer_shift)
3122 base_mode = ent->base;
3123
3124 for (t = ata_timing_find_mode(base_mode);
3125 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3126 unsigned short this_cycle;
3127
3128 switch (xfer_shift) {
3129 case ATA_SHIFT_PIO:
3130 case ATA_SHIFT_MWDMA:
3131 this_cycle = t->cycle;
3132 break;
3133 case ATA_SHIFT_UDMA:
3134 this_cycle = t->udma;
3135 break;
3136 default:
3137 return 0xff;
3138 }
3139
3140 if (cycle > this_cycle)
3141 break;
3142
3143 last_mode = t->mode;
3144 }
3145
3146 return last_mode;
3147}
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3165{
3166 char buf[32];
3167 unsigned long orig_mask, xfer_mask;
3168 unsigned long pio_mask, mwdma_mask, udma_mask;
3169 int quiet, highbit;
3170
3171 quiet = !!(sel & ATA_DNXFER_QUIET);
3172 sel &= ~ATA_DNXFER_QUIET;
3173
3174 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3175 dev->mwdma_mask,
3176 dev->udma_mask);
3177 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3178
3179 switch (sel) {
3180 case ATA_DNXFER_PIO:
3181 highbit = fls(pio_mask) - 1;
3182 pio_mask &= ~(1 << highbit);
3183 break;
3184
3185 case ATA_DNXFER_DMA:
3186 if (udma_mask) {
3187 highbit = fls(udma_mask) - 1;
3188 udma_mask &= ~(1 << highbit);
3189 if (!udma_mask)
3190 return -ENOENT;
3191 } else if (mwdma_mask) {
3192 highbit = fls(mwdma_mask) - 1;
3193 mwdma_mask &= ~(1 << highbit);
3194 if (!mwdma_mask)
3195 return -ENOENT;
3196 }
3197 break;
3198
3199 case ATA_DNXFER_40C:
3200 udma_mask &= ATA_UDMA_MASK_40C;
3201 break;
3202
3203 case ATA_DNXFER_FORCE_PIO0:
3204 pio_mask &= 1;
3205 case ATA_DNXFER_FORCE_PIO:
3206 mwdma_mask = 0;
3207 udma_mask = 0;
3208 break;
3209
3210 default:
3211 BUG();
3212 }
3213
3214 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3215
3216 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3217 return -ENOENT;
3218
3219 if (!quiet) {
3220 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3221 snprintf(buf, sizeof(buf), "%s:%s",
3222 ata_mode_string(xfer_mask),
3223 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3224 else
3225 snprintf(buf, sizeof(buf), "%s",
3226 ata_mode_string(xfer_mask));
3227
3228 ata_dev_printk(dev, KERN_WARNING,
3229 "limiting speed to %s\n", buf);
3230 }
3231
3232 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3233 &dev->udma_mask);
3234
3235 return 0;
3236}
3237
3238static int ata_dev_set_mode(struct ata_device *dev)
3239{
3240 struct ata_eh_context *ehc = &dev->link->eh_context;
3241 const char *dev_err_whine = "";
3242 int ign_dev_err = 0;
3243 unsigned int err_mask;
3244 int rc;
3245
3246 dev->flags &= ~ATA_DFLAG_PIO;
3247 if (dev->xfer_shift == ATA_SHIFT_PIO)
3248 dev->flags |= ATA_DFLAG_PIO;
3249
3250 err_mask = ata_dev_set_xfermode(dev);
3251
3252 if (err_mask & ~AC_ERR_DEV)
3253 goto fail;
3254
3255
3256 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3257 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3258 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3259 if (rc)
3260 return rc;
3261
3262 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3263
3264 if (ata_id_is_cfa(dev->id))
3265 ign_dev_err = 1;
3266
3267
3268 if (ata_id_major_version(dev->id) == 0 &&
3269 dev->pio_mode <= XFER_PIO_2)
3270 ign_dev_err = 1;
3271
3272
3273
3274 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3275 ign_dev_err = 1;
3276 }
3277
3278
3279 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3280 dev->dma_mode == XFER_MW_DMA_0 &&
3281 (dev->id[63] >> 8) & 1)
3282 ign_dev_err = 1;
3283
3284
3285 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3286 ign_dev_err = 1;
3287
3288 if (err_mask & AC_ERR_DEV) {
3289 if (!ign_dev_err)
3290 goto fail;
3291 else
3292 dev_err_whine = " (device error ignored)";
3293 }
3294
3295 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3296 dev->xfer_shift, (int)dev->xfer_mode);
3297
3298 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3299 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3300 dev_err_whine);
3301
3302 return 0;
3303
3304 fail:
3305 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3306 "(err_mask=0x%x)\n", err_mask);
3307 return -EIO;
3308}
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3328{
3329 struct ata_port *ap = link->ap;
3330 struct ata_device *dev;
3331 int rc = 0, used_dma = 0, found = 0;
3332
3333
3334 ata_link_for_each_dev(dev, link) {
3335 unsigned long pio_mask, dma_mask;
3336 unsigned int mode_mask;
3337
3338 if (!ata_dev_enabled(dev))
3339 continue;
3340
3341 mode_mask = ATA_DMA_MASK_ATA;
3342 if (dev->class == ATA_DEV_ATAPI)
3343 mode_mask = ATA_DMA_MASK_ATAPI;
3344 else if (ata_id_is_cfa(dev->id))
3345 mode_mask = ATA_DMA_MASK_CFA;
3346
3347 ata_dev_xfermask(dev);
3348 ata_force_xfermask(dev);
3349
3350 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3351 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3352
3353 if (libata_dma_mask & mode_mask)
3354 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3355 else
3356 dma_mask = 0;
3357
3358 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3359 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3360
3361 found = 1;
3362 if (ata_dma_enabled(dev))
3363 used_dma = 1;
3364 }
3365 if (!found)
3366 goto out;
3367
3368
3369 ata_link_for_each_dev(dev, link) {
3370 if (!ata_dev_enabled(dev))
3371 continue;
3372
3373 if (dev->pio_mode == 0xff) {
3374 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3375 rc = -EINVAL;
3376 goto out;
3377 }
3378
3379 dev->xfer_mode = dev->pio_mode;
3380 dev->xfer_shift = ATA_SHIFT_PIO;
3381 if (ap->ops->set_piomode)
3382 ap->ops->set_piomode(ap, dev);
3383 }
3384
3385
3386 ata_link_for_each_dev(dev, link) {
3387 if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
3388 continue;
3389
3390 dev->xfer_mode = dev->dma_mode;
3391 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3392 if (ap->ops->set_dmamode)
3393 ap->ops->set_dmamode(ap, dev);
3394 }
3395
3396
3397 ata_link_for_each_dev(dev, link) {
3398
3399 if (!ata_dev_enabled(dev))
3400 continue;
3401
3402 rc = ata_dev_set_mode(dev);
3403 if (rc)
3404 goto out;
3405 }
3406
3407
3408
3409
3410 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3411 ap->host->simplex_claimed = ap;
3412
3413 out:
3414 if (rc)
3415 *r_failed_dev = dev;
3416 return rc;
3417}
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3440 int (*check_ready)(struct ata_link *link))
3441{
3442 unsigned long start = jiffies;
3443 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3444 int warned = 0;
3445
3446
3447
3448
3449
3450 WARN_ON(link == link->ap->slave_link);
3451
3452 if (time_after(nodev_deadline, deadline))
3453 nodev_deadline = deadline;
3454
3455 while (1) {
3456 unsigned long now = jiffies;
3457 int ready, tmp;
3458
3459 ready = tmp = check_ready(link);
3460 if (ready > 0)
3461 return 0;
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474 if (ready == -ENODEV) {
3475 if (ata_link_online(link))
3476 ready = 0;
3477 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3478 !ata_link_offline(link) &&
3479 time_before(now, nodev_deadline))
3480 ready = 0;
3481 }
3482
3483 if (ready)
3484 return ready;
3485 if (time_after(now, deadline))
3486 return -EBUSY;
3487
3488 if (!warned && time_after(now, start + 5 * HZ) &&
3489 (deadline - now > 3 * HZ)) {
3490 ata_link_printk(link, KERN_WARNING,
3491 "link is slow to respond, please be patient "
3492 "(ready=%d)\n", tmp);
3493 warned = 1;
3494 }
3495
3496 msleep(50);
3497 }
3498}
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3515 int (*check_ready)(struct ata_link *link))
3516{
3517 msleep(ATA_WAIT_AFTER_RESET);
3518
3519 return ata_wait_ready(link, deadline, check_ready);
3520}
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3545 unsigned long deadline)
3546{
3547 unsigned long interval = params[0];
3548 unsigned long duration = params[1];
3549 unsigned long last_jiffies, t;
3550 u32 last, cur;
3551 int rc;
3552
3553 t = ata_deadline(jiffies, params[2]);
3554 if (time_before(t, deadline))
3555 deadline = t;
3556
3557 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3558 return rc;
3559 cur &= 0xf;
3560
3561 last = cur;
3562 last_jiffies = jiffies;
3563
3564 while (1) {
3565 msleep(interval);
3566 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3567 return rc;
3568 cur &= 0xf;
3569
3570
3571 if (cur == last) {
3572 if (cur == 1 && time_before(jiffies, deadline))
3573 continue;
3574 if (time_after(jiffies,
3575 ata_deadline(last_jiffies, duration)))
3576 return 0;
3577 continue;
3578 }
3579
3580
3581 last = cur;
3582 last_jiffies = jiffies;
3583
3584
3585
3586
3587 if (time_after(jiffies, deadline))
3588 return -EPIPE;
3589 }
3590}
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606int sata_link_resume(struct ata_link *link, const unsigned long *params,
3607 unsigned long deadline)
3608{
3609 u32 scontrol, serror;
3610 int rc;
3611
3612 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3613 return rc;
3614
3615 scontrol = (scontrol & 0x0f0) | 0x300;
3616
3617 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3618 return rc;
3619
3620
3621
3622
3623 msleep(200);
3624
3625 if ((rc = sata_link_debounce(link, params, deadline)))
3626 return rc;
3627
3628
3629 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3630 rc = sata_scr_write(link, SCR_ERROR, serror);
3631
3632 return rc != -EINVAL ? rc : 0;
3633}
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3653{
3654 struct ata_port *ap = link->ap;
3655 struct ata_eh_context *ehc = &link->eh_context;
3656 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3657 int rc;
3658
3659
3660 if (ehc->i.action & ATA_EH_HARDRESET)
3661 return 0;
3662
3663
3664 if (ap->flags & ATA_FLAG_SATA) {
3665 rc = sata_link_resume(link, timing, deadline);
3666
3667 if (rc && rc != -EOPNOTSUPP)
3668 ata_link_printk(link, KERN_WARNING, "failed to resume "
3669 "link for reset (errno=%d)\n", rc);
3670 }
3671
3672
3673 if (ata_phys_link_offline(link))
3674 ehc->i.action &= ~ATA_EH_SOFTRESET;
3675
3676 return 0;
3677}
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3704 unsigned long deadline,
3705 bool *online, int (*check_ready)(struct ata_link *))
3706{
3707 u32 scontrol;
3708 int rc;
3709
3710 DPRINTK("ENTER\n");
3711
3712 if (online)
3713 *online = false;
3714
3715 if (sata_set_spd_needed(link)) {
3716
3717
3718
3719
3720
3721 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3722 goto out;
3723
3724 scontrol = (scontrol & 0x0f0) | 0x304;
3725
3726 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3727 goto out;
3728
3729 sata_set_spd(link);
3730 }
3731
3732
3733 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3734 goto out;
3735
3736 scontrol = (scontrol & 0x0f0) | 0x301;
3737
3738 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3739 goto out;
3740
3741
3742
3743
3744 msleep(1);
3745
3746
3747 rc = sata_link_resume(link, timing, deadline);
3748 if (rc)
3749 goto out;
3750
3751 if (ata_phys_link_offline(link))
3752 goto out;
3753
3754
3755 if (online)
3756 *online = true;
3757
3758 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
3759
3760
3761
3762
3763
3764 if (check_ready) {
3765 unsigned long pmp_deadline;
3766
3767 pmp_deadline = ata_deadline(jiffies,
3768 ATA_TMOUT_PMP_SRST_WAIT);
3769 if (time_after(pmp_deadline, deadline))
3770 pmp_deadline = deadline;
3771 ata_wait_ready(link, pmp_deadline, check_ready);
3772 }
3773 rc = -EAGAIN;
3774 goto out;
3775 }
3776
3777 rc = 0;
3778 if (check_ready)
3779 rc = ata_wait_ready(link, deadline, check_ready);
3780 out:
3781 if (rc && rc != -EAGAIN) {
3782
3783 if (online)
3784 *online = false;
3785 ata_link_printk(link, KERN_ERR,
3786 "COMRESET failed (errno=%d)\n", rc);
3787 }
3788 DPRINTK("EXIT, rc=%d\n", rc);
3789 return rc;
3790}
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3807 unsigned long deadline)
3808{
3809 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3810 bool online;
3811 int rc;
3812
3813
3814 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3815 return online ? -EAGAIN : rc;
3816}
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3831{
3832 u32 serror;
3833
3834 DPRINTK("ENTER\n");
3835
3836
3837 if (!sata_scr_read(link, SCR_ERROR, &serror))
3838 sata_scr_write(link, SCR_ERROR, serror);
3839
3840
3841 sata_print_link_status(link);
3842
3843 DPRINTK("EXIT\n");
3844}
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3863 const u16 *new_id)
3864{
3865 const u16 *old_id = dev->id;
3866 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3867 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3868
3869 if (dev->class != new_class) {
3870 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3871 dev->class, new_class);
3872 return 0;
3873 }
3874
3875 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3876 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3877 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3878 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3879
3880 if (strcmp(model[0], model[1])) {
3881 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3882 "'%s' != '%s'\n", model[0], model[1]);
3883 return 0;
3884 }
3885
3886 if (strcmp(serial[0], serial[1])) {
3887 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3888 "'%s' != '%s'\n", serial[0], serial[1]);
3889 return 0;
3890 }
3891
3892 return 1;
3893}
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3910{
3911 unsigned int class = dev->class;
3912 u16 *id = (void *)dev->link->ap->sector_buf;
3913 int rc;
3914
3915
3916 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3917 if (rc)
3918 return rc;
3919
3920
3921 if (!ata_dev_same_device(dev, class, id))
3922 return -ENODEV;
3923
3924 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3925 return 0;
3926}
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3944 unsigned int readid_flags)
3945{
3946 u64 n_sectors = dev->n_sectors;
3947 int rc;
3948
3949 if (!ata_dev_enabled(dev))
3950 return -ENODEV;
3951
3952
3953 if (ata_class_enabled(new_class) &&
3954 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3955 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3956 dev->class, new_class);
3957 rc = -ENODEV;
3958 goto fail;
3959 }
3960
3961
3962 rc = ata_dev_reread_id(dev, readid_flags);
3963 if (rc)
3964 goto fail;
3965
3966
3967 rc = ata_dev_configure(dev);
3968 if (rc)
3969 goto fail;
3970
3971
3972 if (dev->class == ATA_DEV_ATA && n_sectors &&
3973 dev->n_sectors != n_sectors) {
3974 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3975 "%llu != %llu\n",
3976 (unsigned long long)n_sectors,
3977 (unsigned long long)dev->n_sectors);
3978
3979
3980 dev->n_sectors = n_sectors;
3981
3982 rc = -ENODEV;
3983 goto fail;
3984 }
3985
3986 return 0;
3987
3988 fail:
3989 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3990 return rc;
3991}
3992
3993struct ata_blacklist_entry {
3994 const char *model_num;
3995 const char *model_rev;
3996 unsigned long horkage;
3997};
3998
3999static const struct ata_blacklist_entry ata_device_blacklist [] = {
4000
4001 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4002 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4003 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4004 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4005 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4006 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4007 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4008 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4009 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4010 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4011 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4012 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4013 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4014 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4015 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4016 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4017 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4018 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4019 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4020 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4021 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4022 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4023 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4024 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4025 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4026 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4027 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4028 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4029 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4030 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4031
4032 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4033
4034
4035 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4036 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
4037
4038
4039
4040
4041
4042 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4043 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4044
4045 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4046
4047 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4048 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4049 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4050 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4051
4052
4053 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ |
4054 ATA_HORKAGE_FIRMWARE_WARN },
4055 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ |
4056 ATA_HORKAGE_FIRMWARE_WARN },
4057 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ |
4058 ATA_HORKAGE_FIRMWARE_WARN },
4059 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ |
4060 ATA_HORKAGE_FIRMWARE_WARN },
4061 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ |
4062 ATA_HORKAGE_FIRMWARE_WARN },
4063
4064 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ |
4065 ATA_HORKAGE_FIRMWARE_WARN },
4066 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ |
4067 ATA_HORKAGE_FIRMWARE_WARN },
4068 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ |
4069 ATA_HORKAGE_FIRMWARE_WARN },
4070 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ |
4071 ATA_HORKAGE_FIRMWARE_WARN },
4072 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ |
4073 ATA_HORKAGE_FIRMWARE_WARN },
4074
4075 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ |
4076 ATA_HORKAGE_FIRMWARE_WARN },
4077 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ |
4078 ATA_HORKAGE_FIRMWARE_WARN },
4079 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ |
4080 ATA_HORKAGE_FIRMWARE_WARN },
4081 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ |
4082 ATA_HORKAGE_FIRMWARE_WARN },
4083 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ |
4084 ATA_HORKAGE_FIRMWARE_WARN },
4085
4086 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ |
4087 ATA_HORKAGE_FIRMWARE_WARN },
4088 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ |
4089 ATA_HORKAGE_FIRMWARE_WARN },
4090 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ |
4091 ATA_HORKAGE_FIRMWARE_WARN },
4092 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ |
4093 ATA_HORKAGE_FIRMWARE_WARN },
4094 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ |
4095 ATA_HORKAGE_FIRMWARE_WARN },
4096
4097 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ |
4098 ATA_HORKAGE_FIRMWARE_WARN },
4099 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ |
4100 ATA_HORKAGE_FIRMWARE_WARN },
4101 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ |
4102 ATA_HORKAGE_FIRMWARE_WARN },
4103 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ |
4104 ATA_HORKAGE_FIRMWARE_WARN },
4105 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ |
4106 ATA_HORKAGE_FIRMWARE_WARN },
4107
4108 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ |
4109 ATA_HORKAGE_FIRMWARE_WARN },
4110 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ |
4111 ATA_HORKAGE_FIRMWARE_WARN },
4112 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ |
4113 ATA_HORKAGE_FIRMWARE_WARN },
4114 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ |
4115 ATA_HORKAGE_FIRMWARE_WARN },
4116 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ |
4117 ATA_HORKAGE_FIRMWARE_WARN },
4118
4119
4120
4121 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4122 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4123 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4124
4125
4126 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4127 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4128 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4129 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4130
4131
4132 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4133 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4134 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4135
4136
4137 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4138
4139 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
4140 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
4141 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4142 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4143 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4144 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4145
4146
4147 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
4148
4149
4150 { }
4151};
4152
4153static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4154{
4155 const char *p;
4156 int len;
4157
4158
4159
4160
4161 p = strchr(patt, wildchar);
4162 if (p && ((*(p + 1)) == 0))
4163 len = p - patt;
4164 else {
4165 len = strlen(name);
4166 if (!len) {
4167 if (!*patt)
4168 return 0;
4169 return -1;
4170 }
4171 }
4172
4173 return strncmp(patt, name, len);
4174}
4175
4176static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4177{
4178 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4179 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4180 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4181
4182 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4183 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4184
4185 while (ad->model_num) {
4186 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4187 if (ad->model_rev == NULL)
4188 return ad->horkage;
4189 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4190 return ad->horkage;
4191 }
4192 ad++;
4193 }
4194 return 0;
4195}
4196
4197static int ata_dma_blacklisted(const struct ata_device *dev)
4198{
4199
4200
4201
4202
4203 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4204 (dev->flags & ATA_DFLAG_CDB_INTR))
4205 return 1;
4206 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4207}
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217static int ata_is_40wire(struct ata_device *dev)
4218{
4219 if (dev->horkage & ATA_HORKAGE_IVB)
4220 return ata_drive_40wire_relaxed(dev->id);
4221 return ata_drive_40wire(dev->id);
4222}
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232
4233
4234
4235
4236
4237static int cable_is_40wire(struct ata_port *ap)
4238{
4239 struct ata_link *link;
4240 struct ata_device *dev;
4241
4242
4243 if (ap->cbl == ATA_CBL_PATA40)
4244 return 1;
4245
4246
4247 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4248 return 0;
4249
4250
4251
4252
4253
4254 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4255 return 0;
4256
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266 ata_port_for_each_link(link, ap) {
4267 ata_link_for_each_dev(dev, link) {
4268 if (ata_dev_enabled(dev) && !ata_is_40wire(dev))
4269 return 0;
4270 }
4271 }
4272 return 1;
4273}
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285
4286
4287static void ata_dev_xfermask(struct ata_device *dev)
4288{
4289 struct ata_link *link = dev->link;
4290 struct ata_port *ap = link->ap;
4291 struct ata_host *host = ap->host;
4292 unsigned long xfer_mask;
4293
4294
4295 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4296 ap->mwdma_mask, ap->udma_mask);
4297
4298
4299 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4300 dev->mwdma_mask, dev->udma_mask);
4301 xfer_mask &= ata_id_xfermask(dev->id);
4302
4303
4304
4305
4306
4307 if (ata_dev_pair(dev)) {
4308
4309 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4310
4311 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4312 }
4313
4314 if (ata_dma_blacklisted(dev)) {
4315 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4316 ata_dev_printk(dev, KERN_WARNING,
4317 "device is on DMA blacklist, disabling DMA\n");
4318 }
4319
4320 if ((host->flags & ATA_HOST_SIMPLEX) &&
4321 host->simplex_claimed && host->simplex_claimed != ap) {
4322 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4323 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4324 "other device, disabling DMA\n");
4325 }
4326
4327 if (ap->flags & ATA_FLAG_NO_IORDY)
4328 xfer_mask &= ata_pio_mask_no_iordy(dev);
4329
4330 if (ap->ops->mode_filter)
4331 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4332
4333
4334
4335
4336
4337
4338
4339
4340
4341 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4342
4343 if (cable_is_40wire(ap)) {
4344 ata_dev_printk(dev, KERN_WARNING,
4345 "limited to UDMA/33 due to 40-wire cable\n");
4346 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4347 }
4348
4349 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4350 &dev->mwdma_mask, &dev->udma_mask);
4351}
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366
4367static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4368{
4369 struct ata_taskfile tf;
4370 unsigned int err_mask;
4371
4372
4373 DPRINTK("set features - xfer mode\n");
4374
4375
4376
4377
4378 ata_tf_init(dev, &tf);
4379 tf.command = ATA_CMD_SET_FEATURES;
4380 tf.feature = SETFEATURES_XFER;
4381 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4382 tf.protocol = ATA_PROT_NODATA;
4383
4384 if (ata_pio_need_iordy(dev))
4385 tf.nsect = dev->xfer_mode;
4386
4387 else if (ata_id_has_iordy(dev->id))
4388 tf.nsect = 0x01;
4389 else
4390 return 0;
4391
4392 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4393
4394 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4395 return err_mask;
4396}
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4413 u8 feature)
4414{
4415 struct ata_taskfile tf;
4416 unsigned int err_mask;
4417
4418
4419 DPRINTK("set features - SATA features\n");
4420
4421 ata_tf_init(dev, &tf);
4422 tf.command = ATA_CMD_SET_FEATURES;
4423 tf.feature = enable;
4424 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4425 tf.protocol = ATA_PROT_NODATA;
4426 tf.nsect = feature;
4427
4428 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4429
4430 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4431 return err_mask;
4432}
4433
4434
4435
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446static unsigned int ata_dev_init_params(struct ata_device *dev,
4447 u16 heads, u16 sectors)
4448{
4449 struct ata_taskfile tf;
4450 unsigned int err_mask;
4451
4452
4453 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4454 return AC_ERR_INVALID;
4455
4456
4457 DPRINTK("init dev params \n");
4458
4459 ata_tf_init(dev, &tf);
4460 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4461 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4462 tf.protocol = ATA_PROT_NODATA;
4463 tf.nsect = sectors;
4464 tf.device |= (heads - 1) & 0x0f;
4465
4466 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4467
4468
4469
4470 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4471 err_mask = 0;
4472
4473 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4474 return err_mask;
4475}
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486void ata_sg_clean(struct ata_queued_cmd *qc)
4487{
4488 struct ata_port *ap = qc->ap;
4489 struct scatterlist *sg = qc->sg;
4490 int dir = qc->dma_dir;
4491
4492 WARN_ON(sg == NULL);
4493
4494 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4495
4496 if (qc->n_elem)
4497 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4498
4499 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4500 qc->sg = NULL;
4501}
4502
4503
4504
4505
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516
4517int atapi_check_dma(struct ata_queued_cmd *qc)
4518{
4519 struct ata_port *ap = qc->ap;
4520
4521
4522
4523
4524 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4525 unlikely(qc->nbytes & 15))
4526 return 1;
4527
4528 if (ap->ops->check_atapi_dma)
4529 return ap->ops->check_atapi_dma(qc);
4530
4531 return 0;
4532}
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549int ata_std_qc_defer(struct ata_queued_cmd *qc)
4550{
4551 struct ata_link *link = qc->dev->link;
4552
4553 if (qc->tf.protocol == ATA_PROT_NCQ) {
4554 if (!ata_tag_valid(link->active_tag))
4555 return 0;
4556 } else {
4557 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4558 return 0;
4559 }
4560
4561 return ATA_DEFER_LINK;
4562}
4563
4564void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4580 unsigned int n_elem)
4581{
4582 qc->sg = sg;
4583 qc->n_elem = n_elem;
4584 qc->cursg = qc->sg;
4585}
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600static int ata_sg_setup(struct ata_queued_cmd *qc)
4601{
4602 struct ata_port *ap = qc->ap;
4603 unsigned int n_elem;
4604
4605 VPRINTK("ENTER, ata%u\n", ap->print_id);
4606
4607 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4608 if (n_elem < 1)
4609 return -1;
4610
4611 DPRINTK("%d sg elements mapped\n", n_elem);
4612
4613 qc->n_elem = n_elem;
4614 qc->flags |= ATA_QCFLAG_DMAMAP;
4615
4616 return 0;
4617}
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631void swap_buf_le16(u16 *buf, unsigned int buf_words)
4632{
4633#ifdef __BIG_ENDIAN
4634 unsigned int i;
4635
4636 for (i = 0; i < buf_words; i++)
4637 buf[i] = le16_to_cpu(buf[i]);
4638#endif
4639}
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4651{
4652 struct ata_queued_cmd *qc = NULL;
4653 unsigned int i;
4654
4655
4656 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4657 return NULL;
4658
4659
4660 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4661 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4662 qc = __ata_qc_from_tag(ap, i);
4663 break;
4664 }
4665
4666 if (qc)
4667 qc->tag = i;
4668
4669 return qc;
4670}
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4682{
4683 struct ata_port *ap = dev->link->ap;
4684 struct ata_queued_cmd *qc;
4685
4686 qc = ata_qc_new(ap);
4687 if (qc) {
4688 qc->scsicmd = NULL;
4689 qc->ap = ap;
4690 qc->dev = dev;
4691
4692 ata_qc_reinit(qc);
4693 }
4694
4695 return qc;
4696}
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706
4707
4708void ata_qc_free(struct ata_queued_cmd *qc)
4709{
4710 struct ata_port *ap = qc->ap;
4711 unsigned int tag;
4712
4713 WARN_ON(qc == NULL);
4714
4715 qc->flags = 0;
4716 tag = qc->tag;
4717 if (likely(ata_tag_valid(tag))) {
4718 qc->tag = ATA_TAG_POISON;
4719 clear_bit(tag, &ap->qc_allocated);
4720 }
4721}
4722
4723void __ata_qc_complete(struct ata_queued_cmd *qc)
4724{
4725 struct ata_port *ap = qc->ap;
4726 struct ata_link *link = qc->dev->link;
4727
4728 WARN_ON(qc == NULL);
4729 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4730
4731 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4732 ata_sg_clean(qc);
4733
4734
4735 if (qc->tf.protocol == ATA_PROT_NCQ) {
4736 link->sactive &= ~(1 << qc->tag);
4737 if (!link->sactive)
4738 ap->nr_active_links--;
4739 } else {
4740 link->active_tag = ATA_TAG_POISON;
4741 ap->nr_active_links--;
4742 }
4743
4744
4745 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4746 ap->excl_link == link))
4747 ap->excl_link = NULL;
4748
4749
4750
4751
4752
4753 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4754 ap->qc_active &= ~(1 << qc->tag);
4755
4756
4757 qc->complete_fn(qc);
4758}
4759