1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/kernel.h>
37#include <linux/blkdev.h>
38#include <linux/spinlock.h>
39#include <scsi/scsi.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_eh.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_tcq.h>
45#include <scsi/scsi_transport.h>
46#include <linux/libata.h>
47#include <linux/hdreg.h>
48#include <linux/uaccess.h>
49#include <linux/suspend.h>
50
51#include "libata.h"
52
53#define SECTOR_SIZE 512
54#define ATA_SCSI_RBUF_SIZE 4096
55
56static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
57static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
58
59typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
60
61static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
62 const struct scsi_device *scsidev);
63static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
64 const struct scsi_device *scsidev);
65static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
66 unsigned int id, unsigned int lun);
67
68
69#define RW_RECOVERY_MPAGE 0x1
70#define RW_RECOVERY_MPAGE_LEN 12
71#define CACHE_MPAGE 0x8
72#define CACHE_MPAGE_LEN 20
73#define CONTROL_MPAGE 0xa
74#define CONTROL_MPAGE_LEN 12
75#define ALL_MPAGES 0x3f
76#define ALL_SUB_MPAGES 0xff
77
78
79static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = {
80 RW_RECOVERY_MPAGE,
81 RW_RECOVERY_MPAGE_LEN - 2,
82 (1 << 7),
83 0,
84 0, 0, 0, 0,
85 0,
86 0, 0, 0
87};
88
89static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = {
90 CACHE_MPAGE,
91 CACHE_MPAGE_LEN - 2,
92 0,
93 0, 0, 0, 0, 0, 0, 0, 0, 0,
94 0,
95 0, 0, 0, 0, 0, 0, 0
96};
97
98static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
99 CONTROL_MPAGE,
100 CONTROL_MPAGE_LEN - 2,
101 2,
102 0,
103 0, 0, 0, 0, 0xff, 0xff,
104 0, 30
105};
106
107
108
109
110
111static struct scsi_transport_template ata_scsi_transport_template = {
112 .eh_strategy_handler = ata_scsi_error,
113 .eh_timed_out = ata_scsi_timed_out,
114 .user_scan = ata_scsi_user_scan,
115};
116
117
118static const struct {
119 enum link_pm value;
120 const char *name;
121} link_pm_policy[] = {
122 { NOT_AVAILABLE, "max_performance" },
123 { MIN_POWER, "min_power" },
124 { MAX_PERFORMANCE, "max_performance" },
125 { MEDIUM_POWER, "medium_power" },
126};
127
128static const char *ata_scsi_lpm_get(enum link_pm policy)
129{
130 int i;
131
132 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
133 if (link_pm_policy[i].value == policy)
134 return link_pm_policy[i].name;
135
136 return NULL;
137}
138
139static ssize_t ata_scsi_lpm_put(struct device *dev,
140 struct device_attribute *attr,
141 const char *buf, size_t count)
142{
143 struct Scsi_Host *shost = class_to_shost(dev);
144 struct ata_port *ap = ata_shost_to_port(shost);
145 enum link_pm policy = 0;
146 int i;
147
148
149
150
151
152
153
154
155 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
156 const int len = strlen(link_pm_policy[i].name);
157 if (strncmp(link_pm_policy[i].name, buf, len) == 0 &&
158 buf[len] == '\n') {
159 policy = link_pm_policy[i].value;
160 break;
161 }
162 }
163 if (!policy)
164 return -EINVAL;
165
166 ata_lpm_schedule(ap, policy);
167 return count;
168}
169
170static ssize_t
171ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf)
172{
173 struct Scsi_Host *shost = class_to_shost(dev);
174 struct ata_port *ap = ata_shost_to_port(shost);
175 const char *policy =
176 ata_scsi_lpm_get(ap->pm_policy);
177
178 if (!policy)
179 return -EINVAL;
180
181 return snprintf(buf, 23, "%s\n", policy);
182}
183DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
184 ata_scsi_lpm_show, ata_scsi_lpm_put);
185EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
186
187static ssize_t ata_scsi_park_show(struct device *device,
188 struct device_attribute *attr, char *buf)
189{
190 struct scsi_device *sdev = to_scsi_device(device);
191 struct ata_port *ap;
192 struct ata_link *link;
193 struct ata_device *dev;
194 unsigned long flags, now;
195 unsigned int uninitialized_var(msecs);
196 int rc = 0;
197
198 ap = ata_shost_to_port(sdev->host);
199
200 spin_lock_irqsave(ap->lock, flags);
201 dev = ata_scsi_find_dev(ap, sdev);
202 if (!dev) {
203 rc = -ENODEV;
204 goto unlock;
205 }
206 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
207 rc = -EOPNOTSUPP;
208 goto unlock;
209 }
210
211 link = dev->link;
212 now = jiffies;
213 if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS &&
214 link->eh_context.unloaded_mask & (1 << dev->devno) &&
215 time_after(dev->unpark_deadline, now))
216 msecs = jiffies_to_msecs(dev->unpark_deadline - now);
217 else
218 msecs = 0;
219
220unlock:
221 spin_unlock_irq(ap->lock);
222
223 return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
224}
225
226static ssize_t ata_scsi_park_store(struct device *device,
227 struct device_attribute *attr,
228 const char *buf, size_t len)
229{
230 struct scsi_device *sdev = to_scsi_device(device);
231 struct ata_port *ap;
232 struct ata_device *dev;
233 long int input;
234 unsigned long flags;
235 int rc;
236
237 rc = strict_strtol(buf, 10, &input);
238 if (rc || input < -2)
239 return -EINVAL;
240 if (input > ATA_TMOUT_MAX_PARK) {
241 rc = -EOVERFLOW;
242 input = ATA_TMOUT_MAX_PARK;
243 }
244
245 ap = ata_shost_to_port(sdev->host);
246
247 spin_lock_irqsave(ap->lock, flags);
248 dev = ata_scsi_find_dev(ap, sdev);
249 if (unlikely(!dev)) {
250 rc = -ENODEV;
251 goto unlock;
252 }
253 if (dev->class != ATA_DEV_ATA) {
254 rc = -EOPNOTSUPP;
255 goto unlock;
256 }
257
258 if (input >= 0) {
259 if (dev->flags & ATA_DFLAG_NO_UNLOAD) {
260 rc = -EOPNOTSUPP;
261 goto unlock;
262 }
263
264 dev->unpark_deadline = ata_deadline(jiffies, input);
265 dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK;
266 ata_port_schedule_eh(ap);
267 complete(&ap->park_req_pending);
268 } else {
269 switch (input) {
270 case -1:
271 dev->flags &= ~ATA_DFLAG_NO_UNLOAD;
272 break;
273 case -2:
274 dev->flags |= ATA_DFLAG_NO_UNLOAD;
275 break;
276 }
277 }
278unlock:
279 spin_unlock_irqrestore(ap->lock, flags);
280
281 return rc ? rc : len;
282}
283DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
284 ata_scsi_park_show, ata_scsi_park_store);
285EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
286
287static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
288{
289 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
290
291 scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
292}
293
294static ssize_t
295ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
296 const char *buf, size_t count)
297{
298 struct Scsi_Host *shost = class_to_shost(dev);
299 struct ata_port *ap = ata_shost_to_port(shost);
300 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM))
301 return ap->ops->em_store(ap, buf, count);
302 return -EINVAL;
303}
304
305static ssize_t
306ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr,
307 char *buf)
308{
309 struct Scsi_Host *shost = class_to_shost(dev);
310 struct ata_port *ap = ata_shost_to_port(shost);
311
312 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM))
313 return ap->ops->em_show(ap, buf);
314 return -EINVAL;
315}
316DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR,
317 ata_scsi_em_message_show, ata_scsi_em_message_store);
318EXPORT_SYMBOL_GPL(dev_attr_em_message);
319
320static ssize_t
321ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
322 char *buf)
323{
324 struct Scsi_Host *shost = class_to_shost(dev);
325 struct ata_port *ap = ata_shost_to_port(shost);
326
327 return snprintf(buf, 23, "%d\n", ap->em_message_type);
328}
329DEVICE_ATTR(em_message_type, S_IRUGO,
330 ata_scsi_em_message_type_show, NULL);
331EXPORT_SYMBOL_GPL(dev_attr_em_message_type);
332
333static ssize_t
334ata_scsi_activity_show(struct device *dev, struct device_attribute *attr,
335 char *buf)
336{
337 struct scsi_device *sdev = to_scsi_device(dev);
338 struct ata_port *ap = ata_shost_to_port(sdev->host);
339 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
340
341 if (ap->ops->sw_activity_show && (ap->flags & ATA_FLAG_SW_ACTIVITY))
342 return ap->ops->sw_activity_show(atadev, buf);
343 return -EINVAL;
344}
345
346static ssize_t
347ata_scsi_activity_store(struct device *dev, struct device_attribute *attr,
348 const char *buf, size_t count)
349{
350 struct scsi_device *sdev = to_scsi_device(dev);
351 struct ata_port *ap = ata_shost_to_port(sdev->host);
352 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev);
353 enum sw_activity val;
354 int rc;
355
356 if (ap->ops->sw_activity_store && (ap->flags & ATA_FLAG_SW_ACTIVITY)) {
357 val = simple_strtoul(buf, NULL, 0);
358 switch (val) {
359 case OFF: case BLINK_ON: case BLINK_OFF:
360 rc = ap->ops->sw_activity_store(atadev, val);
361 if (!rc)
362 return count;
363 else
364 return rc;
365 }
366 }
367 return -EINVAL;
368}
369DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show,
370 ata_scsi_activity_store);
371EXPORT_SYMBOL_GPL(dev_attr_sw_activity);
372
373struct device_attribute *ata_common_sdev_attrs[] = {
374 &dev_attr_unload_heads,
375 NULL
376};
377EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
378
379static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
380 void (*done)(struct scsi_cmnd *))
381{
382 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
383
384 done(cmd);
385}
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
406 sector_t capacity, int geom[])
407{
408 geom[0] = 255;
409 geom[1] = 63;
410 sector_div(capacity, 255*63);
411 geom[2] = capacity;
412
413 return 0;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427
428static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev,
429 void __user *arg)
430{
431 struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
432 u16 __user *dst = arg;
433 char buf[40];
434
435 if (!dev)
436 return -ENOMSG;
437
438 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16)))
439 return -EFAULT;
440
441 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN);
442 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN))
443 return -EFAULT;
444
445 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN);
446 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN))
447 return -EFAULT;
448
449 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN);
450 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN))
451 return -EFAULT;
452
453 return 0;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
468{
469 int rc = 0;
470 u8 scsi_cmd[MAX_COMMAND_SIZE];
471 u8 args[4], *argbuf = NULL, *sensebuf = NULL;
472 int argsize = 0;
473 enum dma_data_direction data_dir;
474 int cmd_result;
475
476 if (arg == NULL)
477 return -EINVAL;
478
479 if (copy_from_user(args, arg, sizeof(args)))
480 return -EFAULT;
481
482 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
483 if (!sensebuf)
484 return -ENOMEM;
485
486 memset(scsi_cmd, 0, sizeof(scsi_cmd));
487
488 if (args[3]) {
489 argsize = SECTOR_SIZE * args[3];
490 argbuf = kmalloc(argsize, GFP_KERNEL);
491 if (argbuf == NULL) {
492 rc = -ENOMEM;
493 goto error;
494 }
495
496 scsi_cmd[1] = (4 << 1);
497 scsi_cmd[2] = 0x0e;
498
499 data_dir = DMA_FROM_DEVICE;
500 } else {
501 scsi_cmd[1] = (3 << 1);
502 scsi_cmd[2] = 0x20;
503 data_dir = DMA_NONE;
504 }
505
506 scsi_cmd[0] = ATA_16;
507
508 scsi_cmd[4] = args[2];
509 if (args[0] == ATA_CMD_SMART) {
510 scsi_cmd[6] = args[3];
511 scsi_cmd[8] = args[1];
512 scsi_cmd[10] = 0x4f;
513 scsi_cmd[12] = 0xc2;
514 } else {
515 scsi_cmd[6] = args[1];
516 }
517 scsi_cmd[14] = args[0];
518
519
520
521 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
522 sensebuf, (10*HZ), 5, 0, NULL);
523
524 if (driver_byte(cmd_result) == DRIVER_SENSE) {
525 u8 *desc = sensebuf + 8;
526 cmd_result &= ~(0xFF<<24);
527
528
529
530 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
531 struct scsi_sense_hdr sshdr;
532 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
533 &sshdr);
534 if (sshdr.sense_key == 0 &&
535 sshdr.asc == 0 && sshdr.ascq == 0)
536 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
537 }
538
539
540 if (sensebuf[0] == 0x72 &&
541 desc[0] == 0x09) {
542 args[0] = desc[13];
543 args[1] = desc[3];
544 args[2] = desc[5];
545 if (copy_to_user(arg, args, sizeof(args)))
546 rc = -EFAULT;
547 }
548 }
549
550
551 if (cmd_result) {
552 rc = -EIO;
553 goto error;
554 }
555
556 if ((argbuf)
557 && copy_to_user(arg + sizeof(args), argbuf, argsize))
558 rc = -EFAULT;
559error:
560 kfree(sensebuf);
561 kfree(argbuf);
562 return rc;
563}
564
565
566
567
568
569
570
571
572
573
574
575
576int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
577{
578 int rc = 0;
579 u8 scsi_cmd[MAX_COMMAND_SIZE];
580 u8 args[7], *sensebuf = NULL;
581 int cmd_result;
582
583 if (arg == NULL)
584 return -EINVAL;
585
586 if (copy_from_user(args, arg, sizeof(args)))
587 return -EFAULT;
588
589 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
590 if (!sensebuf)
591 return -ENOMEM;
592
593 memset(scsi_cmd, 0, sizeof(scsi_cmd));
594 scsi_cmd[0] = ATA_16;
595 scsi_cmd[1] = (3 << 1);
596 scsi_cmd[2] = 0x20;
597 scsi_cmd[4] = args[1];
598 scsi_cmd[6] = args[2];
599 scsi_cmd[8] = args[3];
600 scsi_cmd[10] = args[4];
601 scsi_cmd[12] = args[5];
602 scsi_cmd[13] = args[6] & 0x4f;
603 scsi_cmd[14] = args[0];
604
605
606
607 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
608 sensebuf, (10*HZ), 5, 0, NULL);
609
610 if (driver_byte(cmd_result) == DRIVER_SENSE) {
611 u8 *desc = sensebuf + 8;
612 cmd_result &= ~(0xFF<<24);
613
614
615
616 if (cmd_result & SAM_STAT_CHECK_CONDITION) {
617 struct scsi_sense_hdr sshdr;
618 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
619 &sshdr);
620 if (sshdr.sense_key == 0 &&
621 sshdr.asc == 0 && sshdr.ascq == 0)
622 cmd_result &= ~SAM_STAT_CHECK_CONDITION;
623 }
624
625
626 if (sensebuf[0] == 0x72 &&
627 desc[0] == 0x09) {
628 args[0] = desc[13];
629 args[1] = desc[3];
630 args[2] = desc[5];
631 args[3] = desc[7];
632 args[4] = desc[9];
633 args[5] = desc[11];
634 args[6] = desc[12];
635 if (copy_to_user(arg, args, sizeof(args)))
636 rc = -EFAULT;
637 }
638 }
639
640 if (cmd_result) {
641 rc = -EIO;
642 goto error;
643 }
644
645 error:
646 kfree(sensebuf);
647 return rc;
648}
649
650static int ata_ioc32(struct ata_port *ap)
651{
652 if (ap->flags & ATA_FLAG_PIO_DMA)
653 return 1;
654 if (ap->pflags & ATA_PFLAG_PIO32)
655 return 1;
656 return 0;
657}
658
659int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
660 int cmd, void __user *arg)
661{
662 int val = -EINVAL, rc = -EINVAL;
663 unsigned long flags;
664
665 switch (cmd) {
666 case ATA_IOC_GET_IO32:
667 spin_lock_irqsave(ap->lock, flags);
668 val = ata_ioc32(ap);
669 spin_unlock_irqrestore(ap->lock, flags);
670 if (copy_to_user(arg, &val, 1))
671 return -EFAULT;
672 return 0;
673
674 case ATA_IOC_SET_IO32:
675 val = (unsigned long) arg;
676 rc = 0;
677 spin_lock_irqsave(ap->lock, flags);
678 if (ap->pflags & ATA_PFLAG_PIO32CHANGE) {
679 if (val)
680 ap->pflags |= ATA_PFLAG_PIO32;
681 else
682 ap->pflags &= ~ATA_PFLAG_PIO32;
683 } else {
684 if (val != ata_ioc32(ap))
685 rc = -EINVAL;
686 }
687 spin_unlock_irqrestore(ap->lock, flags);
688 return rc;
689
690 case HDIO_GET_IDENTITY:
691 return ata_get_identity(ap, scsidev, arg);
692
693 case HDIO_DRIVE_CMD:
694 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
695 return -EACCES;
696 return ata_cmd_ioctl(scsidev, arg);
697
698 case HDIO_DRIVE_TASK:
699 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
700 return -EACCES;
701 return ata_task_ioctl(scsidev, arg);
702
703 default:
704 rc = -ENOTTY;
705 break;
706 }
707
708 return rc;
709}
710EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl);
711
712int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
713{
714 return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host),
715 scsidev, cmd, arg);
716}
717EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
740 struct scsi_cmnd *cmd,
741 void (*done)(struct scsi_cmnd *))
742{
743 struct ata_queued_cmd *qc;
744
745 qc = ata_qc_new_init(dev);
746 if (qc) {
747 qc->scsicmd = cmd;
748 qc->scsidone = done;
749
750 qc->sg = scsi_sglist(cmd);
751 qc->n_elem = scsi_sg_count(cmd);
752 } else {
753 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
754 done(cmd);
755 }
756
757 return qc;
758}
759
760static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
761{
762 struct scsi_cmnd *scmd = qc->scsicmd;
763
764 qc->extrabytes = scmd->request->extra_len;
765 qc->nbytes = scsi_bufflen(scmd) + qc->extrabytes;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
781{
782 u8 stat = tf->command, err = tf->feature;
783
784 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat);
785 if (stat & ATA_BUSY) {
786 printk("Busy }\n");
787 } else {
788 if (stat & 0x40) printk("DriveReady ");
789 if (stat & 0x20) printk("DeviceFault ");
790 if (stat & 0x10) printk("SeekComplete ");
791 if (stat & 0x08) printk("DataRequest ");
792 if (stat & 0x04) printk("CorrectedError ");
793 if (stat & 0x02) printk("Index ");
794 if (stat & 0x01) printk("Error ");
795 printk("}\n");
796
797 if (err) {
798 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err);
799 if (err & 0x04) printk("DriveStatusError ");
800 if (err & 0x80) {
801 if (err & 0x04) printk("BadCRC ");
802 else printk("Sector ");
803 }
804 if (err & 0x40) printk("UncorrectableError ");
805 if (err & 0x10) printk("SectorIdNotFound ");
806 if (err & 0x02) printk("TrackZeroNotFound ");
807 if (err & 0x01) printk("AddrMarkNotFound ");
808 printk("}\n");
809 }
810 }
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
831 u8 *asc, u8 *ascq, int verbose)
832{
833 int i;
834
835
836 static const unsigned char sense_table[][4] = {
837
838 {0xd1, ABORTED_COMMAND, 0x00, 0x00},
839
840 {0xd0, ABORTED_COMMAND, 0x00, 0x00},
841
842 {0x61, HARDWARE_ERROR, 0x00, 0x00},
843
844 {0x84, ABORTED_COMMAND, 0x47, 0x00},
845
846 {0x37, NOT_READY, 0x04, 0x00},
847
848 {0x09, NOT_READY, 0x04, 0x00},
849
850 {0x01, MEDIUM_ERROR, 0x13, 0x00},
851
852 {0x02, HARDWARE_ERROR, 0x00, 0x00},
853
854 {0x04, ABORTED_COMMAND, 0x00, 0x00},
855
856 {0x08, NOT_READY, 0x04, 0x00},
857
858 {0x10, ABORTED_COMMAND, 0x14, 0x00},
859
860 {0x08, NOT_READY, 0x04, 0x00},
861
862 {0x40, MEDIUM_ERROR, 0x11, 0x04},
863
864 {0x80, MEDIUM_ERROR, 0x11, 0x04},
865 {0xFF, 0xFF, 0xFF, 0xFF},
866 };
867 static const unsigned char stat_table[][4] = {
868
869 {0x80, ABORTED_COMMAND, 0x47, 0x00},
870 {0x20, HARDWARE_ERROR, 0x00, 0x00},
871 {0x08, ABORTED_COMMAND, 0x47, 0x00},
872 {0x04, RECOVERED_ERROR, 0x11, 0x00},
873 {0xFF, 0xFF, 0xFF, 0xFF},
874 };
875
876
877
878
879 if (drv_stat & ATA_BUSY) {
880 drv_err = 0;
881 }
882
883 if (drv_err) {
884
885 for (i = 0; sense_table[i][0] != 0xFF; i++) {
886
887 if ((sense_table[i][0] & drv_err) ==
888 sense_table[i][0]) {
889 *sk = sense_table[i][1];
890 *asc = sense_table[i][2];
891 *ascq = sense_table[i][3];
892 goto translate_done;
893 }
894 }
895
896 if (verbose)
897 printk(KERN_WARNING "ata%u: no sense translation for "
898 "error 0x%02x\n", id, drv_err);
899 }
900
901
902 for (i = 0; stat_table[i][0] != 0xFF; i++) {
903 if (stat_table[i][0] & drv_stat) {
904 *sk = stat_table[i][1];
905 *asc = stat_table[i][2];
906 *ascq = stat_table[i][3];
907 goto translate_done;
908 }
909 }
910
911 if (verbose)
912 printk(KERN_WARNING "ata%u: no sense translation for "
913 "status: 0x%02x\n", id, drv_stat);
914
915
916
917 *sk = ABORTED_COMMAND;
918 *asc = 0x00;
919 *ascq = 0x00;
920
921 translate_done:
922 if (verbose)
923 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x "
924 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
925 id, drv_stat, drv_err, *sk, *asc, *ascq);
926 return;
927}
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
943{
944 struct scsi_cmnd *cmd = qc->scsicmd;
945 struct ata_taskfile *tf = &qc->result_tf;
946 unsigned char *sb = cmd->sense_buffer;
947 unsigned char *desc = sb + 8;
948 int verbose = qc->ap->ops->error_handler == NULL;
949
950 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
951
952 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
953
954
955
956
957
958 if (qc->err_mask ||
959 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
960 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
961 &sb[1], &sb[2], &sb[3], verbose);
962 sb[1] &= 0x0f;
963 }
964
965
966
967
968 sb[0] = 0x72;
969
970 desc[0] = 0x09;
971
972
973 sb[7] = 14;
974 desc[1] = 12;
975
976
977
978
979 desc[2] = 0x00;
980 desc[3] = tf->feature;
981 desc[5] = tf->nsect;
982 desc[7] = tf->lbal;
983 desc[9] = tf->lbam;
984 desc[11] = tf->lbah;
985 desc[12] = tf->device;
986 desc[13] = tf->command;
987
988
989
990
991
992 if (tf->flags & ATA_TFLAG_LBA48) {
993 desc[2] |= 0x01;
994 desc[4] = tf->hob_nsect;
995 desc[6] = tf->hob_lbal;
996 desc[8] = tf->hob_lbam;
997 desc[10] = tf->hob_lbah;
998 }
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
1012{
1013 struct ata_device *dev = qc->dev;
1014 struct scsi_cmnd *cmd = qc->scsicmd;
1015 struct ata_taskfile *tf = &qc->result_tf;
1016 unsigned char *sb = cmd->sense_buffer;
1017 unsigned char *desc = sb + 8;
1018 int verbose = qc->ap->ops->error_handler == NULL;
1019 u64 block;
1020
1021 memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
1022
1023 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1024
1025
1026 sb[0] = 0x72;
1027
1028
1029
1030
1031 if (qc->err_mask ||
1032 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
1033 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature,
1034 &sb[1], &sb[2], &sb[3], verbose);
1035 sb[1] &= 0x0f;
1036 }
1037
1038 block = ata_tf_read_block(&qc->result_tf, dev);
1039
1040
1041 sb[7] = 12;
1042 desc[0] = 0x00;
1043 desc[1] = 10;
1044
1045 desc[2] |= 0x80;
1046 desc[6] = block >> 40;
1047 desc[7] = block >> 32;
1048 desc[8] = block >> 24;
1049 desc[9] = block >> 16;
1050 desc[10] = block >> 8;
1051 desc[11] = block;
1052}
1053
1054static void ata_scsi_sdev_config(struct scsi_device *sdev)
1055{
1056 sdev->use_10_for_rw = 1;
1057 sdev->use_10_for_ms = 1;
1058
1059
1060
1061
1062
1063
1064 sdev->max_device_blocked = 1;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082static int atapi_drain_needed(struct request *rq)
1083{
1084 if (likely(!blk_pc_request(rq)))
1085 return 0;
1086
1087 if (!rq->data_len || (rq->cmd_flags & REQ_RW))
1088 return 0;
1089
1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
1091}
1092
1093static int ata_scsi_dev_config(struct scsi_device *sdev,
1094 struct ata_device *dev)
1095{
1096 if (!ata_id_has_unload(dev->id))
1097 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1098
1099
1100 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors);
1101
1102 if (dev->class == ATA_DEV_ATAPI) {
1103 struct request_queue *q = sdev->request_queue;
1104 void *buf;
1105
1106
1107 blk_queue_update_dma_alignment(sdev->request_queue,
1108 ATA_DMA_PAD_SZ - 1);
1109 blk_queue_update_dma_pad(sdev->request_queue,
1110 ATA_DMA_PAD_SZ - 1);
1111
1112
1113 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
1114 if (!buf) {
1115 ata_dev_printk(dev, KERN_ERR,
1116 "drain buffer allocation failed\n");
1117 return -ENOMEM;
1118 }
1119
1120 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1121 } else {
1122 if (ata_id_is_ssd(dev->id))
1123 queue_flag_set_unlocked(QUEUE_FLAG_NONROT,
1124 sdev->request_queue);
1125
1126
1127 blk_queue_update_dma_alignment(sdev->request_queue,
1128 ATA_SECT_SIZE - 1);
1129 sdev->manage_start_stop = 1;
1130 }
1131
1132 if (dev->flags & ATA_DFLAG_AN)
1133 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1134
1135 if (dev->flags & ATA_DFLAG_NCQ) {
1136 int depth;
1137
1138 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
1139 depth = min(ATA_MAX_QUEUE - 1, depth);
1140 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1141 }
1142
1143 return 0;
1144}
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int ata_scsi_slave_config(struct scsi_device *sdev)
1159{
1160 struct ata_port *ap = ata_shost_to_port(sdev->host);
1161 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
1162 int rc = 0;
1163
1164 ata_scsi_sdev_config(sdev);
1165
1166 if (dev)
1167 rc = ata_scsi_dev_config(sdev, dev);
1168
1169 return rc;
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186void ata_scsi_slave_destroy(struct scsi_device *sdev)
1187{
1188 struct ata_port *ap = ata_shost_to_port(sdev->host);
1189 struct request_queue *q = sdev->request_queue;
1190 unsigned long flags;
1191 struct ata_device *dev;
1192
1193 if (!ap->ops->error_handler)
1194 return;
1195
1196 spin_lock_irqsave(ap->lock, flags);
1197 dev = __ata_scsi_find_dev(ap, sdev);
1198 if (dev && dev->sdev) {
1199
1200 dev->sdev = NULL;
1201 dev->flags |= ATA_DFLAG_DETACH;
1202 ata_port_schedule_eh(ap);
1203 }
1204 spin_unlock_irqrestore(ap->lock, flags);
1205
1206 kfree(q->dma_drain_buffer);
1207 q->dma_drain_buffer = NULL;
1208 q->dma_drain_size = 0;
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
1227{
1228 struct ata_port *ap = ata_shost_to_port(sdev->host);
1229 struct ata_device *dev;
1230 unsigned long flags;
1231
1232 if (queue_depth < 1 || queue_depth == sdev->queue_depth)
1233 return sdev->queue_depth;
1234
1235 dev = ata_scsi_find_dev(ap, sdev);
1236 if (!dev || !ata_dev_enabled(dev))
1237 return sdev->queue_depth;
1238
1239
1240 spin_lock_irqsave(ap->lock, flags);
1241 dev->flags &= ~ATA_DFLAG_NCQ_OFF;
1242 if (queue_depth == 1 || !ata_ncq_enabled(dev)) {
1243 dev->flags |= ATA_DFLAG_NCQ_OFF;
1244 queue_depth = 1;
1245 }
1246 spin_unlock_irqrestore(ap->lock, flags);
1247
1248
1249 queue_depth = min(queue_depth, sdev->host->can_queue);
1250 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
1251 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1);
1252
1253 if (sdev->queue_depth == queue_depth)
1254 return -EINVAL;
1255
1256 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth);
1257 return queue_depth;
1258}
1259
1260
1261static void ata_delayed_done_timerfn(unsigned long arg)
1262{
1263 struct scsi_cmnd *scmd = (void *)arg;
1264
1265 scmd->scsi_done(scmd);
1266}
1267
1268
1269static void ata_delayed_done(struct scsi_cmnd *scmd)
1270{
1271 static struct timer_list timer;
1272
1273 setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd);
1274 mod_timer(&timer, jiffies + 5 * HZ);
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
1293{
1294 struct scsi_cmnd *scmd = qc->scsicmd;
1295 struct ata_taskfile *tf = &qc->tf;
1296 const u8 *cdb = scmd->cmnd;
1297
1298 if (scmd->cmd_len < 5)
1299 goto invalid_fld;
1300
1301 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1302 tf->protocol = ATA_PROT_NODATA;
1303 if (cdb[1] & 0x1) {
1304 ;
1305 }
1306 if (cdb[4] & 0x2)
1307 goto invalid_fld;
1308 if (((cdb[4] >> 4) & 0xf) != 0)
1309 goto invalid_fld;
1310
1311 if (cdb[4] & 0x1) {
1312 tf->nsect = 1;
1313
1314 if (qc->dev->flags & ATA_DFLAG_LBA) {
1315 tf->flags |= ATA_TFLAG_LBA;
1316
1317 tf->lbah = 0x0;
1318 tf->lbam = 0x0;
1319 tf->lbal = 0x0;
1320 tf->device |= ATA_LBA;
1321 } else {
1322
1323 tf->lbal = 0x1;
1324 tf->lbam = 0x0;
1325 tf->lbah = 0x0;
1326 }
1327
1328 tf->command = ATA_CMD_VERIFY;
1329 } else {
1330
1331
1332
1333 if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
1334 system_state == SYSTEM_POWER_OFF)
1335 goto skip;
1336
1337 if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
1338 system_entering_hibernation())
1339 goto skip;
1340
1341
1342
1343
1344
1345 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) &&
1346 (system_state == SYSTEM_HALT ||
1347 system_state == SYSTEM_POWER_OFF)) {
1348 static unsigned long warned;
1349
1350 if (!test_and_set_bit(0, &warned)) {
1351 ata_dev_printk(qc->dev, KERN_WARNING,
1352 "DISK MIGHT NOT BE SPUN DOWN PROPERLY. "
1353 "UPDATE SHUTDOWN UTILITY\n");
1354 ata_dev_printk(qc->dev, KERN_WARNING,
1355 "For more info, visit "
1356 "http://linux-ata.org/shutdown.html\n");
1357
1358
1359
1360
1361 scmd->scsi_done = qc->scsidone;
1362 qc->scsidone = ata_delayed_done;
1363 }
1364 goto skip;
1365 }
1366
1367
1368 tf->command = ATA_CMD_STANDBYNOW1;
1369 }
1370
1371
1372
1373
1374
1375
1376
1377
1378 return 0;
1379
1380 invalid_fld:
1381 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1382
1383 return 1;
1384 skip:
1385 scmd->result = SAM_STAT_GOOD;
1386 return 1;
1387}
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc)
1404{
1405 struct ata_taskfile *tf = &qc->tf;
1406
1407 tf->flags |= ATA_TFLAG_DEVICE;
1408 tf->protocol = ATA_PROT_NODATA;
1409
1410 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT)
1411 tf->command = ATA_CMD_FLUSH_EXT;
1412 else
1413 tf->command = ATA_CMD_FLUSH;
1414
1415
1416 qc->flags |= ATA_QCFLAG_IO;
1417
1418 return 0;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1432{
1433 u64 lba = 0;
1434 u32 len;
1435
1436 VPRINTK("six-byte command\n");
1437
1438 lba |= ((u64)(cdb[1] & 0x1f)) << 16;
1439 lba |= ((u64)cdb[2]) << 8;
1440 lba |= ((u64)cdb[3]);
1441
1442 len = cdb[4];
1443
1444 *plba = lba;
1445 *plen = len;
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1459{
1460 u64 lba = 0;
1461 u32 len = 0;
1462
1463 VPRINTK("ten-byte command\n");
1464
1465 lba |= ((u64)cdb[2]) << 24;
1466 lba |= ((u64)cdb[3]) << 16;
1467 lba |= ((u64)cdb[4]) << 8;
1468 lba |= ((u64)cdb[5]);
1469
1470 len |= ((u32)cdb[7]) << 8;
1471 len |= ((u32)cdb[8]);
1472
1473 *plba = lba;
1474 *plen = len;
1475}
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
1488{
1489 u64 lba = 0;
1490 u32 len = 0;
1491
1492 VPRINTK("sixteen-byte command\n");
1493
1494 lba |= ((u64)cdb[2]) << 56;
1495 lba |= ((u64)cdb[3]) << 48;
1496 lba |= ((u64)cdb[4]) << 40;
1497 lba |= ((u64)cdb[5]) << 32;
1498 lba |= ((u64)cdb[6]) << 24;
1499 lba |= ((u64)cdb[7]) << 16;
1500 lba |= ((u64)cdb[8]) << 8;
1501 lba |= ((u64)cdb[9]);
1502
1503 len |= ((u32)cdb[10]) << 24;
1504 len |= ((u32)cdb[11]) << 16;
1505 len |= ((u32)cdb[12]) << 8;
1506 len |= ((u32)cdb[13]);
1507
1508 *plba = lba;
1509 *plen = len;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
1525{
1526 struct scsi_cmnd *scmd = qc->scsicmd;
1527 struct ata_taskfile *tf = &qc->tf;
1528 struct ata_device *dev = qc->dev;
1529 u64 dev_sectors = qc->dev->n_sectors;
1530 const u8 *cdb = scmd->cmnd;
1531 u64 block;
1532 u32 n_block;
1533
1534 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1535 tf->protocol = ATA_PROT_NODATA;
1536
1537 if (cdb[0] == VERIFY) {
1538 if (scmd->cmd_len < 10)
1539 goto invalid_fld;
1540 scsi_10_lba_len(cdb, &block, &n_block);
1541 } else if (cdb[0] == VERIFY_16) {
1542 if (scmd->cmd_len < 16)
1543 goto invalid_fld;
1544 scsi_16_lba_len(cdb, &block, &n_block);
1545 } else
1546 goto invalid_fld;
1547
1548 if (!n_block)
1549 goto nothing_to_do;
1550 if (block >= dev_sectors)
1551 goto out_of_range;
1552 if ((block + n_block) > dev_sectors)
1553 goto out_of_range;
1554
1555 if (dev->flags & ATA_DFLAG_LBA) {
1556 tf->flags |= ATA_TFLAG_LBA;
1557
1558 if (lba_28_ok(block, n_block)) {
1559
1560 tf->command = ATA_CMD_VERIFY;
1561 tf->device |= (block >> 24) & 0xf;
1562 } else if (lba_48_ok(block, n_block)) {
1563 if (!(dev->flags & ATA_DFLAG_LBA48))
1564 goto out_of_range;
1565
1566
1567 tf->flags |= ATA_TFLAG_LBA48;
1568 tf->command = ATA_CMD_VERIFY_EXT;
1569
1570 tf->hob_nsect = (n_block >> 8) & 0xff;
1571
1572 tf->hob_lbah = (block >> 40) & 0xff;
1573 tf->hob_lbam = (block >> 32) & 0xff;
1574 tf->hob_lbal = (block >> 24) & 0xff;
1575 } else
1576
1577 goto out_of_range;
1578
1579 tf->nsect = n_block & 0xff;
1580
1581 tf->lbah = (block >> 16) & 0xff;
1582 tf->lbam = (block >> 8) & 0xff;
1583 tf->lbal = block & 0xff;
1584
1585 tf->device |= ATA_LBA;
1586 } else {
1587
1588 u32 sect, head, cyl, track;
1589
1590 if (!lba_28_ok(block, n_block))
1591 goto out_of_range;
1592
1593
1594 track = (u32)block / dev->sectors;
1595 cyl = track / dev->heads;
1596 head = track % dev->heads;
1597 sect = (u32)block % dev->sectors + 1;
1598
1599 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
1600 (u32)block, track, cyl, head, sect);
1601
1602
1603
1604
1605
1606 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
1607 goto out_of_range;
1608
1609 tf->command = ATA_CMD_VERIFY;
1610 tf->nsect = n_block & 0xff;
1611 tf->lbal = sect;
1612 tf->lbam = cyl;
1613 tf->lbah = cyl >> 8;
1614 tf->device |= head;
1615 }
1616
1617 return 0;
1618
1619invalid_fld:
1620 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1621
1622 return 1;
1623
1624out_of_range:
1625 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1626
1627 return 1;
1628
1629nothing_to_do:
1630 scmd->result = SAM_STAT_GOOD;
1631 return 1;
1632}
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1653{
1654 struct scsi_cmnd *scmd = qc->scsicmd;
1655 const u8 *cdb = scmd->cmnd;
1656 unsigned int tf_flags = 0;
1657 u64 block;
1658 u32 n_block;
1659 int rc;
1660
1661 if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16)
1662 tf_flags |= ATA_TFLAG_WRITE;
1663
1664
1665 switch (cdb[0]) {
1666 case READ_10:
1667 case WRITE_10:
1668 if (unlikely(scmd->cmd_len < 10))
1669 goto invalid_fld;
1670 scsi_10_lba_len(cdb, &block, &n_block);
1671 if (unlikely(cdb[1] & (1 << 3)))
1672 tf_flags |= ATA_TFLAG_FUA;
1673 break;
1674 case READ_6:
1675 case WRITE_6:
1676 if (unlikely(scmd->cmd_len < 6))
1677 goto invalid_fld;
1678 scsi_6_lba_len(cdb, &block, &n_block);
1679
1680
1681
1682
1683 if (!n_block)
1684 n_block = 256;
1685 break;
1686 case READ_16:
1687 case WRITE_16:
1688 if (unlikely(scmd->cmd_len < 16))
1689 goto invalid_fld;
1690 scsi_16_lba_len(cdb, &block, &n_block);
1691 if (unlikely(cdb[1] & (1 << 3)))
1692 tf_flags |= ATA_TFLAG_FUA;
1693 break;
1694 default:
1695 DPRINTK("no-byte command\n");
1696 goto invalid_fld;
1697 }
1698
1699
1700 if (!n_block)
1701
1702
1703
1704
1705
1706
1707
1708 goto nothing_to_do;
1709
1710 qc->flags |= ATA_QCFLAG_IO;
1711 qc->nbytes = n_block * ATA_SECT_SIZE;
1712
1713 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1714 qc->tag);
1715 if (likely(rc == 0))
1716 return 0;
1717
1718 if (rc == -ERANGE)
1719 goto out_of_range;
1720
1721invalid_fld:
1722 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0);
1723
1724 return 1;
1725
1726out_of_range:
1727 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0);
1728
1729 return 1;
1730
1731nothing_to_do:
1732 scmd->result = SAM_STAT_GOOD;
1733 return 1;
1734}
1735
1736static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1737{
1738 struct ata_port *ap = qc->ap;
1739 struct scsi_cmnd *cmd = qc->scsicmd;
1740 u8 *cdb = cmd->cmnd;
1741 int need_sense = (qc->err_mask != 0);
1742
1743
1744
1745
1746
1747
1748
1749
1750 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1751 ((cdb[2] & 0x20) || need_sense)) {
1752 ata_gen_passthru_sense(qc);
1753 } else {
1754 if (!need_sense) {
1755 cmd->result = SAM_STAT_GOOD;
1756 } else {
1757
1758
1759
1760
1761
1762
1763 ata_gen_ata_sense(qc);
1764 }
1765 }
1766
1767
1768 if (unlikely(qc->tf.command == ATA_CMD_STANDBY ||
1769 qc->tf.command == ATA_CMD_STANDBYNOW1))
1770 qc->dev->flags |= ATA_DFLAG_SPUNDOWN;
1771 else if (likely(system_state != SYSTEM_HALT &&
1772 system_state != SYSTEM_POWER_OFF))
1773 qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN;
1774
1775 if (need_sense && !ap->ops->error_handler)
1776 ata_dump_status(ap->print_id, &qc->result_tf);
1777
1778 qc->scsidone(cmd);
1779
1780 ata_qc_free(qc);
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1811 void (*done)(struct scsi_cmnd *),
1812 ata_xlat_func_t xlat_func)
1813{
1814 struct ata_port *ap = dev->link->ap;
1815 struct ata_queued_cmd *qc;
1816 int rc;
1817
1818 VPRINTK("ENTER\n");
1819
1820 qc = ata_scsi_qc_new(dev, cmd, done);
1821 if (!qc)
1822 goto err_mem;
1823
1824
1825 if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1826 cmd->sc_data_direction == DMA_TO_DEVICE) {
1827 if (unlikely(scsi_bufflen(cmd) < 1)) {
1828 ata_dev_printk(dev, KERN_WARNING,
1829 "WARNING: zero len r/w req\n");
1830 goto err_did;
1831 }
1832
1833 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd));
1834
1835 qc->dma_dir = cmd->sc_data_direction;
1836 }
1837
1838 qc->complete_fn = ata_scsi_qc_complete;
1839
1840 if (xlat_func(qc))
1841 goto early_finish;
1842
1843 if (ap->ops->qc_defer) {
1844 if ((rc = ap->ops->qc_defer(qc)))
1845 goto defer;
1846 }
1847
1848
1849 ata_qc_issue(qc);
1850
1851 VPRINTK("EXIT\n");
1852 return 0;
1853
1854early_finish:
1855 ata_qc_free(qc);
1856 qc->scsidone(cmd);
1857 DPRINTK("EXIT - early finish (good or error)\n");
1858 return 0;
1859
1860err_did:
1861 ata_qc_free(qc);
1862 cmd->result = (DID_ERROR << 16);
1863 qc->scsidone(cmd);
1864err_mem:
1865 DPRINTK("EXIT - internal\n");
1866 return 0;
1867
1868defer:
1869 ata_qc_free(qc);
1870 DPRINTK("EXIT - defer\n");
1871 if (rc == ATA_DEFER_LINK)
1872 return SCSI_MLQUEUE_DEVICE_BUSY;
1873 else
1874 return SCSI_MLQUEUE_HOST_BUSY;
1875}
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
1892 unsigned long *flags)
1893{
1894 spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
1895
1896 memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
1897 if (copy_in)
1898 sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1899 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1900 return ata_scsi_rbuf;
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
1916 unsigned long *flags)
1917{
1918 if (copy_out)
1919 sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
1920 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
1921 spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
1922}
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1940 unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
1941{
1942 u8 *rbuf;
1943 unsigned int rc;
1944 struct scsi_cmnd *cmd = args->cmd;
1945 unsigned long flags;
1946
1947 rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
1948 rc = actor(args, rbuf);
1949 ata_scsi_rbuf_put(cmd, rc == 0, &flags);
1950
1951 if (rc == 0)
1952 cmd->result = SAM_STAT_GOOD;
1953 args->done(cmd);
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1968{
1969 const u8 versions[] = {
1970 0x60,
1971
1972 0x03,
1973 0x20,
1974
1975 0x02,
1976 0x60
1977 };
1978 u8 hdr[] = {
1979 TYPE_DISK,
1980 0,
1981 0x5,
1982 2,
1983 95 - 4
1984 };
1985
1986 VPRINTK("ENTER\n");
1987
1988
1989 if (ata_id_removeable(args->id))
1990 hdr[1] |= (1 << 7);
1991
1992 memcpy(rbuf, hdr, sizeof(hdr));
1993 memcpy(&rbuf[8], "ATA ", 8);
1994 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
1995 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
1996
1997 if (rbuf[32] == 0 || rbuf[32] == ' ')
1998 memcpy(&rbuf[32], "n/a ", 4);
1999
2000 memcpy(rbuf + 59, versions, sizeof(versions));
2001
2002 return 0;
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
2016{
2017 const u8 pages[] = {
2018 0x00,
2019 0x80,
2020 0x83,
2021 0x89,
2022 0xb1,
2023 };
2024
2025 rbuf[3] = sizeof(pages);
2026 memcpy(rbuf + 4, pages, sizeof(pages));
2027 return 0;
2028}
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
2041{
2042 const u8 hdr[] = {
2043 0,
2044 0x80,
2045 0,
2046 ATA_ID_SERNO_LEN,
2047 };
2048
2049 memcpy(rbuf, hdr, sizeof(hdr));
2050 ata_id_string(args->id, (unsigned char *) &rbuf[4],
2051 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2052 return 0;
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2069{
2070 const int sat_model_serial_desc_len = 68;
2071 int num;
2072
2073 rbuf[1] = 0x83;
2074 num = 4;
2075
2076
2077 rbuf[num + 0] = 2;
2078 rbuf[num + 3] = ATA_ID_SERNO_LEN;
2079 num += 4;
2080 ata_id_string(args->id, (unsigned char *) rbuf + num,
2081 ATA_ID_SERNO, ATA_ID_SERNO_LEN);
2082 num += ATA_ID_SERNO_LEN;
2083
2084
2085
2086 rbuf[num + 0] = 2;
2087 rbuf[num + 1] = 1;
2088 rbuf[num + 3] = sat_model_serial_desc_len;
2089 num += 4;
2090 memcpy(rbuf + num, "ATA ", 8);
2091 num += 8;
2092 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
2093 ATA_ID_PROD_LEN);
2094 num += ATA_ID_PROD_LEN;
2095 ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
2096 ATA_ID_SERNO_LEN);
2097 num += ATA_ID_SERNO_LEN;
2098
2099 rbuf[3] = num - 4;
2100 return 0;
2101}
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2114{
2115 struct ata_taskfile tf;
2116
2117 memset(&tf, 0, sizeof(tf));
2118
2119 rbuf[1] = 0x89;
2120 rbuf[2] = (0x238 >> 8);
2121 rbuf[3] = (0x238 & 0xff);
2122
2123 memcpy(&rbuf[8], "linux ", 8);
2124 memcpy(&rbuf[16], "libata ", 16);
2125 memcpy(&rbuf[32], DRV_VERSION, 4);
2126 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
2127
2128
2129
2130 tf.command = ATA_DRDY;
2131 tf.lbal = 0x1;
2132 tf.nsect = 0x1;
2133
2134 ata_tf_to_fis(&tf, 0, 1, &rbuf[36]);
2135 rbuf[36] = 0x34;
2136
2137 rbuf[56] = ATA_CMD_ID_ATA;
2138
2139 memcpy(&rbuf[60], &args->id[0], 512);
2140 return 0;
2141}
2142
2143static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2144{
2145 int form_factor = ata_id_form_factor(args->id);
2146 int media_rotation_rate = ata_id_rotation_rate(args->id);
2147
2148 rbuf[1] = 0xb1;
2149 rbuf[3] = 0x3c;
2150 rbuf[4] = media_rotation_rate >> 8;
2151 rbuf[5] = media_rotation_rate;
2152 rbuf[7] = form_factor;
2153
2154 return 0;
2155}
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
2169{
2170 VPRINTK("ENTER\n");
2171 return 0;
2172}
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186static unsigned int ata_msense_caching(u16 *id, u8 *buf)
2187{
2188 memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
2189 if (ata_id_wcache_enabled(id))
2190 buf[2] |= (1 << 2);
2191 if (!ata_id_rahead_enabled(id))
2192 buf[12] |= (1 << 5);
2193 return sizeof(def_cache_mpage);
2194}
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205static unsigned int ata_msense_ctl_mode(u8 *buf)
2206{
2207 memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
2208 return sizeof(def_control_mpage);
2209}
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220static unsigned int ata_msense_rw_recovery(u8 *buf)
2221{
2222 memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
2223 return sizeof(def_rw_recovery_mpage);
2224}
2225
2226
2227
2228
2229
2230static int ata_dev_supports_fua(u16 *id)
2231{
2232 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
2233
2234 if (!libata_fua)
2235 return 0;
2236 if (!ata_id_has_fua(id))
2237 return 0;
2238
2239 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
2240 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
2241
2242 if (strcmp(model, "Maxtor"))
2243 return 1;
2244 if (strcmp(fw, "BANC1G10"))
2245 return 1;
2246
2247 return 0;
2248}
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
2263{
2264 struct ata_device *dev = args->dev;
2265 u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
2266 const u8 sat_blk_desc[] = {
2267 0, 0, 0, 0,
2268 0,
2269 0, 0x2, 0x0
2270 };
2271 u8 pg, spg;
2272 unsigned int ebd, page_control, six_byte;
2273 u8 dpofua;
2274
2275 VPRINTK("ENTER\n");
2276
2277 six_byte = (scsicmd[0] == MODE_SENSE);
2278 ebd = !(scsicmd[1] & 0x8);
2279
2280
2281
2282
2283 page_control = scsicmd[2] >> 6;
2284 switch (page_control) {
2285 case 0:
2286 break;
2287 case 3:
2288 goto saving_not_supp;
2289 case 1:
2290 case 2:
2291 default:
2292 goto invalid_fld;
2293 }
2294
2295 if (six_byte)
2296 p += 4 + (ebd ? 8 : 0);
2297 else
2298 p += 8 + (ebd ? 8 : 0);
2299
2300 pg = scsicmd[2] & 0x3f;
2301 spg = scsicmd[3];
2302
2303
2304
2305
2306 if (spg && (spg != ALL_SUB_MPAGES))
2307 goto invalid_fld;
2308
2309 switch(pg) {
2310 case RW_RECOVERY_MPAGE:
2311 p += ata_msense_rw_recovery(p);
2312 break;
2313
2314 case CACHE_MPAGE:
2315 p += ata_msense_caching(args->id, p);
2316 break;
2317
2318 case CONTROL_MPAGE:
2319 p += ata_msense_ctl_mode(p);
2320 break;
2321
2322 case ALL_MPAGES:
2323 p += ata_msense_rw_recovery(p);
2324 p += ata_msense_caching(args->id, p);
2325 p += ata_msense_ctl_mode(p);
2326 break;
2327
2328 default:
2329 goto invalid_fld;
2330 }
2331
2332 dpofua = 0;
2333 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
2334 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
2335 dpofua = 1 << 4;
2336
2337 if (six_byte) {
2338 rbuf[0] = p - rbuf - 1;
2339 rbuf[2] |= dpofua;
2340 if (ebd) {
2341 rbuf[3] = sizeof(sat_blk_desc);
2342 memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
2343 }
2344 } else {
2345 unsigned int output_len = p - rbuf - 2;
2346
2347 rbuf[0] = output_len >> 8;
2348 rbuf[1] = output_len;
2349 rbuf[3] |= dpofua;
2350 if (ebd) {
2351 rbuf[7] = sizeof(sat_blk_desc);
2352 memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
2353 }
2354 }
2355 return 0;
2356
2357invalid_fld:
2358 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0);
2359
2360 return 1;
2361
2362saving_not_supp:
2363 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0);
2364
2365 return 1;
2366}
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2379{
2380 struct ata_device *dev = args->dev;
2381 u64 last_lba = dev->n_sectors - 1;
2382 u8 log_per_phys = 0;
2383 u16 lowest_aligned = 0;
2384 u16 word_106 = dev->id[106];
2385 u16 word_209 = dev->id[209];
2386
2387 if ((word_106 & 0xc000) == 0x4000) {
2388
2389 if (word_106 & (1 << 13))
2390 log_per_phys = word_106 & 0xf;
2391 if ((word_209 & 0xc000) == 0x4000) {
2392 u16 first = dev->id[209] & 0x3fff;
2393 if (first > 0)
2394 lowest_aligned = (1 << log_per_phys) - first;
2395 }
2396 }
2397
2398 VPRINTK("ENTER\n");
2399
2400 if (args->cmd->cmnd[0] == READ_CAPACITY) {
2401 if (last_lba >= 0xffffffffULL)
2402 last_lba = 0xffffffff;
2403
2404
2405 rbuf[0] = last_lba >> (8 * 3);
2406 rbuf[1] = last_lba >> (8 * 2);
2407 rbuf[2] = last_lba >> (8 * 1);
2408 rbuf[3] = last_lba;
2409
2410
2411 rbuf[6] = ATA_SECT_SIZE >> 8;
2412 rbuf[7] = ATA_SECT_SIZE & 0xff;
2413 } else {
2414
2415 rbuf[0] = last_lba >> (8 * 7);
2416 rbuf[1] = last_lba >> (8 * 6);
2417 rbuf[2] = last_lba >> (8 * 5);
2418 rbuf[3] = last_lba >> (8 * 4);
2419 rbuf[4] = last_lba >> (8 * 3);
2420 rbuf[5] = last_lba >> (8 * 2);
2421 rbuf[6] = last_lba >> (8 * 1);
2422 rbuf[7] = last_lba;
2423
2424
2425 rbuf[10] = ATA_SECT_SIZE >> 8;
2426 rbuf[11] = ATA_SECT_SIZE & 0xff;
2427
2428 rbuf[12] = 0;
2429 rbuf[13] = log_per_phys;
2430 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2431 rbuf[15] = lowest_aligned;
2432 }
2433
2434 return 0;
2435}
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
2448{
2449 VPRINTK("ENTER\n");
2450 rbuf[3] = 8;
2451
2452 return 0;
2453}
2454
2455static void atapi_sense_complete(struct ata_queued_cmd *qc)
2456{
2457 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2458
2459
2460
2461
2462
2463 ata_gen_passthru_sense(qc);
2464 }
2465
2466 qc->scsidone(qc->scsicmd);
2467 ata_qc_free(qc);
2468}
2469
2470
2471static inline int ata_pio_use_silly(struct ata_port *ap)
2472{
2473 return (ap->flags & ATA_FLAG_PIO_DMA);
2474}
2475
2476static void atapi_request_sense(struct ata_queued_cmd *qc)
2477{
2478 struct ata_port *ap = qc->ap;
2479 struct scsi_cmnd *cmd = qc->scsicmd;
2480
2481 DPRINTK("ATAPI request sense\n");
2482
2483
2484 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2485
2486#ifdef CONFIG_ATA_SFF
2487 if (ap->ops->sff_tf_read)
2488 ap->ops->sff_tf_read(ap, &qc->tf);
2489#endif
2490
2491
2492 cmd->sense_buffer[0] = 0x70;
2493 cmd->sense_buffer[2] = qc->tf.feature >> 4;
2494
2495 ata_qc_reinit(qc);
2496
2497
2498 sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
2499 ata_sg_init(qc, &qc->sgent, 1);
2500 qc->dma_dir = DMA_FROM_DEVICE;
2501
2502 memset(&qc->cdb, 0, qc->dev->cdb_len);
2503 qc->cdb[0] = REQUEST_SENSE;
2504 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2505
2506 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2507 qc->tf.command = ATA_CMD_PACKET;
2508
2509 if (ata_pio_use_silly(ap)) {
2510 qc->tf.protocol = ATAPI_PROT_DMA;
2511 qc->tf.feature |= ATAPI_PKT_DMA;
2512 } else {
2513 qc->tf.protocol = ATAPI_PROT_PIO;
2514 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
2515 qc->tf.lbah = 0;
2516 }
2517 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2518
2519 qc->complete_fn = atapi_sense_complete;
2520
2521 ata_qc_issue(qc);
2522
2523 DPRINTK("EXIT\n");
2524}
2525
2526static void atapi_qc_complete(struct ata_queued_cmd *qc)
2527{
2528 struct scsi_cmnd *cmd = qc->scsicmd;
2529 unsigned int err_mask = qc->err_mask;
2530
2531 VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
2532
2533
2534 if (unlikely(qc->ap->ops->error_handler &&
2535 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
2536
2537 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
2538
2539
2540
2541
2542
2543 ata_gen_passthru_sense(qc);
2544 }
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
2557 qc->dev->sdev->locked = 0;
2558
2559 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
2560 qc->scsidone(cmd);
2561 ata_qc_free(qc);
2562 return;
2563 }
2564
2565
2566 if (unlikely(err_mask & AC_ERR_DEV)) {
2567 cmd->result = SAM_STAT_CHECK_CONDITION;
2568 atapi_request_sense(qc);
2569 return;
2570 } else if (unlikely(err_mask)) {
2571
2572
2573
2574
2575
2576 ata_gen_passthru_sense(qc);
2577 } else {
2578 u8 *scsicmd = cmd->cmnd;
2579
2580 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
2581 unsigned long flags;
2582 u8 *buf;
2583
2584 buf = ata_scsi_rbuf_get(cmd, true, &flags);
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594 if (buf[2] == 0) {
2595 buf[2] = 0x5;
2596 buf[3] = 0x32;
2597 }
2598
2599 ata_scsi_rbuf_put(cmd, true, &flags);
2600 }
2601
2602 cmd->result = SAM_STAT_GOOD;
2603 }
2604
2605 qc->scsidone(cmd);
2606 ata_qc_free(qc);
2607}
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
2619{
2620 struct scsi_cmnd *scmd = qc->scsicmd;
2621 struct ata_device *dev = qc->dev;
2622 int nodata = (scmd->sc_data_direction == DMA_NONE);
2623 int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO);
2624 unsigned int nbytes;
2625
2626 memset(qc->cdb, 0, dev->cdb_len);
2627 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len);
2628
2629 qc->complete_fn = atapi_qc_complete;
2630
2631 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2632 if (scmd->sc_data_direction == DMA_TO_DEVICE) {
2633 qc->tf.flags |= ATA_TFLAG_WRITE;
2634 DPRINTK("direction: write\n");
2635 }
2636
2637 qc->tf.command = ATA_CMD_PACKET;
2638 ata_qc_set_pc_nbytes(qc);
2639
2640
2641 if (!nodata && !using_pio && atapi_check_dma(qc))
2642 using_pio = 1;
2643
2644
2645
2646
2647
2648
2649 nbytes = min(ata_qc_raw_nbytes(qc), (unsigned int)63 * 1024);
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675 if (nbytes & 0x1)
2676 nbytes++;
2677
2678 qc->tf.lbam = (nbytes & 0xFF);
2679 qc->tf.lbah = (nbytes >> 8);
2680
2681 if (nodata)
2682 qc->tf.protocol = ATAPI_PROT_NODATA;
2683 else if (using_pio)
2684 qc->tf.protocol = ATAPI_PROT_PIO;
2685 else {
2686
2687 qc->tf.protocol = ATAPI_PROT_DMA;
2688 qc->tf.feature |= ATAPI_PKT_DMA;
2689
2690 if ((dev->flags & ATA_DFLAG_DMADIR) &&
2691 (scmd->sc_data_direction != DMA_TO_DEVICE))
2692
2693 qc->tf.feature |= ATAPI_DMADIR;
2694 }
2695
2696
2697
2698
2699 return 0;
2700}
2701
2702static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
2703{
2704 if (!sata_pmp_attached(ap)) {
2705 if (likely(devno < ata_link_max_devices(&ap->link)))
2706 return &ap->link.device[devno];
2707 } else {
2708 if (likely(devno < ap->nr_pmp_links))
2709 return &ap->pmp_link[devno].device[0];
2710 }
2711
2712 return NULL;
2713}
2714
2715static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
2716 const struct scsi_device *scsidev)
2717{
2718 int devno;
2719
2720
2721 if (!sata_pmp_attached(ap)) {
2722 if (unlikely(scsidev->channel || scsidev->lun))
2723 return NULL;
2724 devno = scsidev->id;
2725 } else {
2726 if (unlikely(scsidev->id || scsidev->lun))
2727 return NULL;
2728 devno = scsidev->channel;
2729 }
2730
2731 return ata_find_dev(ap, devno);
2732}
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static struct ata_device *
2751ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev)
2752{
2753 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev);
2754
2755 if (unlikely(!dev || !ata_dev_enabled(dev)))
2756 return NULL;
2757
2758 return dev;
2759}
2760
2761
2762
2763
2764
2765
2766
2767
2768static u8
2769ata_scsi_map_proto(u8 byte1)
2770{
2771 switch((byte1 & 0x1e) >> 1) {
2772 case 3:
2773 return ATA_PROT_NODATA;
2774
2775 case 6:
2776 case 10:
2777 case 11:
2778 return ATA_PROT_DMA;
2779
2780 case 4:
2781 case 5:
2782 return ATA_PROT_PIO;
2783
2784 case 0:
2785 case 1:
2786 case 8:
2787 case 9:
2788 case 7:
2789 case 12:
2790 case 15:
2791 default:
2792 break;
2793 }
2794
2795 return ATA_PROT_UNKNOWN;
2796}
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2808{
2809 struct ata_taskfile *tf = &(qc->tf);
2810 struct scsi_cmnd *scmd = qc->scsicmd;
2811 struct ata_device *dev = qc->dev;
2812 const u8 *cdb = scmd->cmnd;
2813
2814 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN)
2815 goto invalid_fld;
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832 if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm)
2833 goto invalid_fld;
2834
2835
2836 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0)
2837 goto invalid_fld;
2838
2839
2840
2841
2842
2843 if (cdb[0] == ATA_16) {
2844
2845
2846
2847
2848
2849 if (cdb[1] & 0x01) {
2850 tf->hob_feature = cdb[3];
2851 tf->hob_nsect = cdb[5];
2852 tf->hob_lbal = cdb[7];
2853 tf->hob_lbam = cdb[9];
2854 tf->hob_lbah = cdb[11];
2855 tf->flags |= ATA_TFLAG_LBA48;
2856 } else
2857 tf->flags &= ~ATA_TFLAG_LBA48;
2858
2859
2860
2861
2862 tf->feature = cdb[4];
2863 tf->nsect = cdb[6];
2864 tf->lbal = cdb[8];
2865 tf->lbam = cdb[10];
2866 tf->lbah = cdb[12];
2867 tf->device = cdb[13];
2868 tf->command = cdb[14];
2869 } else {
2870
2871
2872
2873 tf->flags &= ~ATA_TFLAG_LBA48;
2874
2875 tf->feature = cdb[3];
2876 tf->nsect = cdb[4];
2877 tf->lbal = cdb[5];
2878 tf->lbam = cdb[6];
2879 tf->lbah = cdb[7];
2880 tf->device = cdb[8];
2881 tf->command = cdb[9];
2882 }
2883
2884
2885 tf->device = dev->devno ?
2886 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2887
2888
2889 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf))
2890 goto invalid_fld;
2891
2892 if (is_multi_taskfile(tf)) {
2893 unsigned int multi_count = 1 << (cdb[1] >> 5);
2894
2895
2896
2897
2898 if (multi_count != dev->multi_count)
2899 ata_dev_printk(dev, KERN_WARNING,
2900 "invalid multi_count %u ignored\n",
2901 multi_count);
2902 }
2903
2904
2905 qc->sect_size = ATA_SECT_SIZE;
2906 switch (tf->command) {
2907 case ATA_CMD_READ_LONG:
2908 case ATA_CMD_READ_LONG_ONCE:
2909 case ATA_CMD_WRITE_LONG:
2910 case ATA_CMD_WRITE_LONG_ONCE:
2911 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2912 goto invalid_fld;
2913 qc->sect_size = scsi_bufflen(scmd);
2914 }
2915
2916
2917
2918
2919
2920
2921
2922
2923 if ((tf->command == ATA_CMD_SET_FEATURES)
2924 && (tf->feature == SETFEATURES_XFER))
2925 goto invalid_fld;
2926
2927
2928
2929
2930
2931
2932 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE);
2933
2934 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2935 tf->flags |= ATA_TFLAG_WRITE;
2936
2937
2938
2939
2940
2941
2942
2943 ata_qc_set_pc_nbytes(qc);
2944
2945
2946 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
2947
2948 return 0;
2949
2950 invalid_fld:
2951 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00);
2952
2953 return 1;
2954}
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
2969{
2970 switch (cmd) {
2971 case READ_6:
2972 case READ_10:
2973 case READ_16:
2974
2975 case WRITE_6:
2976 case WRITE_10:
2977 case WRITE_16:
2978 return ata_scsi_rw_xlat;
2979
2980 case SYNCHRONIZE_CACHE:
2981 if (ata_try_flush_cache(dev))
2982 return ata_scsi_flush_xlat;
2983 break;
2984
2985 case VERIFY:
2986 case VERIFY_16:
2987 return ata_scsi_verify_xlat;
2988
2989 case ATA_12:
2990 case ATA_16:
2991 return ata_scsi_pass_thru;
2992
2993 case START_STOP:
2994 return ata_scsi_start_stop_xlat;
2995 }
2996
2997 return NULL;
2998}
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008static inline void ata_scsi_dump_cdb(struct ata_port *ap,
3009 struct scsi_cmnd *cmd)
3010{
3011#ifdef ATA_DEBUG
3012 struct scsi_device *scsidev = cmd->device;
3013 u8 *scsicmd = cmd->cmnd;
3014
3015 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
3016 ap->print_id,
3017 scsidev->channel, scsidev->id, scsidev->lun,
3018 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3],
3019 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7],
3020 scsicmd[8]);
3021#endif
3022}
3023
3024static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3025 void (*done)(struct scsi_cmnd *),
3026 struct ata_device *dev)
3027{
3028 u8 scsi_op = scmd->cmnd[0];
3029 ata_xlat_func_t xlat_func;
3030 int rc = 0;
3031
3032 if (dev->class == ATA_DEV_ATA) {
3033 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
3034 goto bad_cdb_len;
3035
3036 xlat_func = ata_get_xlat_func(dev, scsi_op);
3037 } else {
3038 if (unlikely(!scmd->cmd_len))
3039 goto bad_cdb_len;
3040
3041 xlat_func = NULL;
3042 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
3043
3044 int len = COMMAND_SIZE(scsi_op);
3045 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
3046 goto bad_cdb_len;
3047
3048 xlat_func = atapi_xlat;
3049 } else {
3050
3051 if (unlikely(scmd->cmd_len > 16))
3052 goto bad_cdb_len;
3053
3054 xlat_func = ata_get_xlat_func(dev, scsi_op);
3055 }
3056 }
3057
3058 if (xlat_func)
3059 rc = ata_scsi_translate(dev, scmd, done, xlat_func);
3060 else
3061 ata_scsi_simulate(dev, scmd, done);
3062
3063 return rc;
3064
3065 bad_cdb_len:
3066 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
3067 scmd->cmd_len, scsi_op, dev->cdb_len);
3068 scmd->result = DID_ERROR << 16;
3069 done(scmd);
3070 return 0;
3071}
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3093{
3094 struct ata_port *ap;
3095 struct ata_device *dev;
3096 struct scsi_device *scsidev = cmd->device;
3097 struct Scsi_Host *shost = scsidev->host;
3098 int rc = 0;
3099
3100 ap = ata_shost_to_port(shost);
3101
3102 spin_unlock(shost->host_lock);
3103 spin_lock(ap->lock);
3104
3105 ata_scsi_dump_cdb(ap, cmd);
3106
3107 dev = ata_scsi_find_dev(ap, scsidev);
3108 if (likely(dev))
3109 rc = __ata_scsi_queuecmd(cmd, done, dev);
3110 else {
3111 cmd->result = (DID_BAD_TARGET << 16);
3112 done(cmd);
3113 }
3114
3115 spin_unlock(ap->lock);
3116 spin_lock(shost->host_lock);
3117 return rc;
3118}
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3134 void (*done)(struct scsi_cmnd *))
3135{
3136 struct ata_scsi_args args;
3137 const u8 *scsicmd = cmd->cmnd;
3138 u8 tmp8;
3139
3140 args.dev = dev;
3141 args.id = dev->id;
3142 args.cmd = cmd;
3143 args.done = done;
3144
3145 switch(scsicmd[0]) {
3146
3147 case FORMAT_UNIT:
3148 ata_scsi_invalid_field(cmd, done);
3149 break;
3150
3151 case INQUIRY:
3152 if (scsicmd[1] & 2)
3153 ata_scsi_invalid_field(cmd, done);
3154 else if ((scsicmd[1] & 1) == 0)
3155 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3156 else switch (scsicmd[2]) {
3157 case 0x00:
3158 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00);
3159 break;
3160 case 0x80:
3161 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80);
3162 break;
3163 case 0x83:
3164 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83);
3165 break;
3166 case 0x89:
3167 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89);
3168 break;
3169 case 0xb1:
3170 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3171 break;
3172 default:
3173 ata_scsi_invalid_field(cmd, done);
3174 break;
3175 }
3176 break;
3177
3178 case MODE_SENSE:
3179 case MODE_SENSE_10:
3180 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense);
3181 break;
3182
3183 case MODE_SELECT:
3184 case MODE_SELECT_10:
3185 ata_scsi_invalid_field(cmd, done);
3186 break;
3187
3188 case READ_CAPACITY:
3189 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3190 break;
3191
3192 case SERVICE_ACTION_IN:
3193 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3194 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3195 else
3196 ata_scsi_invalid_field(cmd, done);
3197 break;
3198
3199 case REPORT_LUNS:
3200 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns);
3201 break;
3202
3203 case REQUEST_SENSE:
3204 ata_scsi_set_sense(cmd, 0, 0, 0);
3205 cmd->result = (DRIVER_SENSE << 24);
3206 done(cmd);
3207 break;
3208
3209
3210
3211
3212 case SYNCHRONIZE_CACHE:
3213
3214
3215
3216 case REZERO_UNIT:
3217 case SEEK_6:
3218 case SEEK_10:
3219 case TEST_UNIT_READY:
3220 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3221 break;
3222
3223 case SEND_DIAGNOSTIC:
3224 tmp8 = scsicmd[1] & ~(1 << 3);
3225 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3226 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3227 else
3228 ata_scsi_invalid_field(cmd, done);
3229 break;
3230
3231
3232 default:
3233 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
3234
3235 done(cmd);
3236 break;
3237 }
3238}
3239
3240int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3241{
3242 int i, rc;
3243
3244 for (i = 0; i < host->n_ports; i++) {
3245 struct ata_port *ap = host->ports[i];
3246 struct Scsi_Host *shost;
3247
3248 rc = -ENOMEM;
3249 shost = scsi_host_alloc(sht, sizeof(struct ata_port *));
3250 if (!shost)
3251 goto err_alloc;
3252
3253 *(struct ata_port **)&shost->hostdata[0] = ap;
3254 ap->scsi_host = shost;
3255
3256 shost->transportt = &ata_scsi_transport_template;
3257 shost->unique_id = ap->print_id;
3258 shost->max_id = 16;
3259 shost->max_lun = 1;
3260 shost->max_channel = 1;
3261 shost->max_cmd_len = 16;
3262
3263
3264
3265
3266
3267
3268 shost->max_host_blocked = 1;
3269
3270 rc = scsi_add_host(ap->scsi_host, ap->host->dev);
3271 if (rc)
3272 goto err_add;
3273 }
3274
3275 return 0;
3276
3277 err_add:
3278 scsi_host_put(host->ports[i]->scsi_host);
3279 err_alloc:
3280 while (--i >= 0) {
3281 struct Scsi_Host *shost = host->ports[i]->scsi_host;
3282
3283 scsi_remove_host(shost);
3284 scsi_host_put(shost);
3285 }
3286 return rc;
3287}
3288
3289void ata_scsi_scan_host(struct ata_port *ap, int sync)
3290{
3291 int tries = 5;
3292 struct ata_device *last_failed_dev = NULL;
3293 struct ata_link *link;
3294 struct ata_device *dev;
3295
3296 if (ap->flags & ATA_FLAG_DISABLED)
3297 return;
3298
3299 repeat:
3300 ata_for_each_link(link, ap, EDGE) {
3301 ata_for_each_dev(dev, link, ENABLED) {
3302 struct scsi_device *sdev;
3303 int channel = 0, id = 0;
3304
3305 if (dev->sdev)
3306 continue;
3307
3308 if (ata_is_host_link(link))
3309 id = dev->devno;
3310 else
3311 channel = link->pmp;
3312
3313 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0,
3314 NULL);
3315 if (!IS_ERR(sdev)) {
3316 dev->sdev = sdev;
3317 scsi_device_put(sdev);
3318 }
3319 }
3320 }
3321
3322
3323
3324
3325
3326 ata_for_each_link(link, ap, EDGE) {
3327 ata_for_each_dev(dev, link, ENABLED) {
3328 if (!dev->sdev)
3329 goto exit_loop;
3330 }
3331 }
3332 exit_loop:
3333 if (!link)
3334 return;
3335
3336
3337 if (sync) {
3338
3339
3340
3341 if (dev != last_failed_dev) {
3342 msleep(100);
3343 last_failed_dev = dev;
3344 goto repeat;
3345 }
3346
3347
3348
3349
3350 if (--tries) {
3351 msleep(100);
3352 goto repeat;
3353 }
3354
3355 ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
3356 "failed without making any progress,\n"
3357 " switching to async\n");
3358 }
3359
3360 queue_delayed_work(ata_aux_wq, &ap->hotplug_task,
3361 round_jiffies_relative(HZ));
3362}
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379int ata_scsi_offline_dev(struct ata_device *dev)
3380{
3381 if (dev->sdev) {
3382 scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
3383 return 1;
3384 }
3385 return 0;
3386}
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398static void ata_scsi_remove_dev(struct ata_device *dev)
3399{
3400 struct ata_port *ap = dev->link->ap;
3401 struct scsi_device *sdev;
3402 unsigned long flags;
3403
3404
3405
3406
3407
3408
3409
3410 mutex_lock(&ap->scsi_host->scan_mutex);
3411 spin_lock_irqsave(ap->lock, flags);
3412
3413
3414 sdev = dev->sdev;
3415 dev->sdev = NULL;
3416
3417 if (sdev) {
3418
3419
3420
3421
3422 if (scsi_device_get(sdev) == 0) {
3423
3424
3425
3426
3427
3428 scsi_device_set_state(sdev, SDEV_OFFLINE);
3429 } else {
3430 WARN_ON(1);
3431 sdev = NULL;
3432 }
3433 }
3434
3435 spin_unlock_irqrestore(ap->lock, flags);
3436 mutex_unlock(&ap->scsi_host->scan_mutex);
3437
3438 if (sdev) {
3439 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
3440 dev_name(&sdev->sdev_gendev));
3441
3442 scsi_remove_device(sdev);
3443 scsi_device_put(sdev);
3444 }
3445}
3446
3447static void ata_scsi_handle_link_detach(struct ata_link *link)
3448{
3449 struct ata_port *ap = link->ap;
3450 struct ata_device *dev;
3451
3452 ata_for_each_dev(dev, link, ALL) {
3453 unsigned long flags;
3454
3455 if (!(dev->flags & ATA_DFLAG_DETACHED))
3456 continue;
3457
3458 spin_lock_irqsave(ap->lock, flags);
3459 dev->flags &= ~ATA_DFLAG_DETACHED;
3460 spin_unlock_irqrestore(ap->lock, flags);
3461
3462 ata_scsi_remove_dev(dev);
3463 }
3464}
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476void ata_scsi_media_change_notify(struct ata_device *dev)
3477{
3478 if (dev->sdev)
3479 sdev_evt_send_simple(dev->sdev, SDEV_EVT_MEDIA_CHANGE,
3480 GFP_ATOMIC);
3481}
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495void ata_scsi_hotplug(struct work_struct *work)
3496{
3497 struct ata_port *ap =
3498 container_of(work, struct ata_port, hotplug_task.work);
3499 int i;
3500
3501 if (ap->pflags & ATA_PFLAG_UNLOADING) {
3502 DPRINTK("ENTER/EXIT - unloading\n");
3503 return;
3504 }
3505
3506 DPRINTK("ENTER\n");
3507
3508
3509
3510
3511
3512 ata_scsi_handle_link_detach(&ap->link);
3513 if (ap->pmp_link)
3514 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
3515 ata_scsi_handle_link_detach(&ap->pmp_link[i]);
3516
3517
3518 ata_scsi_scan_host(ap, 0);
3519
3520 DPRINTK("EXIT\n");
3521}
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3540 unsigned int id, unsigned int lun)
3541{
3542 struct ata_port *ap = ata_shost_to_port(shost);
3543 unsigned long flags;
3544 int devno, rc = 0;
3545
3546 if (!ap->ops->error_handler)
3547 return -EOPNOTSUPP;
3548
3549 if (lun != SCAN_WILD_CARD && lun)
3550 return -EINVAL;
3551
3552 if (!sata_pmp_attached(ap)) {
3553 if (channel != SCAN_WILD_CARD && channel)
3554 return -EINVAL;
3555 devno = id;
3556 } else {
3557 if (id != SCAN_WILD_CARD && id)
3558 return -EINVAL;
3559 devno = channel;
3560 }
3561
3562 spin_lock_irqsave(ap->lock, flags);
3563
3564 if (devno == SCAN_WILD_CARD) {
3565 struct ata_link *link;
3566
3567 ata_for_each_link(link, ap, EDGE) {
3568 struct ata_eh_info *ehi = &link->eh_info;
3569 ehi->probe_mask |= ATA_ALL_DEVICES;
3570 ehi->action |= ATA_EH_RESET;
3571 }
3572 } else {
3573 struct ata_device *dev = ata_find_dev(ap, devno);
3574
3575 if (dev) {
3576 struct ata_eh_info *ehi = &dev->link->eh_info;
3577 ehi->probe_mask |= 1 << dev->devno;
3578 ehi->action |= ATA_EH_RESET;
3579 } else
3580 rc = -EINVAL;
3581 }
3582
3583 if (rc == 0) {
3584 ata_port_schedule_eh(ap);
3585 spin_unlock_irqrestore(ap->lock, flags);
3586 ata_port_wait_eh(ap);
3587 } else
3588 spin_unlock_irqrestore(ap->lock, flags);
3589
3590 return rc;
3591}
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605void ata_scsi_dev_rescan(struct work_struct *work)
3606{
3607 struct ata_port *ap =
3608 container_of(work, struct ata_port, scsi_rescan_task);
3609 struct ata_link *link;
3610 struct ata_device *dev;
3611 unsigned long flags;
3612
3613 spin_lock_irqsave(ap->lock, flags);
3614
3615 ata_for_each_link(link, ap, EDGE) {
3616 ata_for_each_dev(dev, link, ENABLED) {
3617 struct scsi_device *sdev = dev->sdev;
3618
3619 if (!sdev)
3620 continue;
3621 if (scsi_device_get(sdev))
3622 continue;
3623
3624 spin_unlock_irqrestore(ap->lock, flags);
3625 scsi_rescan_device(&(sdev->sdev_gendev));
3626 scsi_device_put(sdev);
3627 spin_lock_irqsave(ap->lock, flags);
3628 }
3629 }
3630
3631 spin_unlock_irqrestore(ap->lock, flags);
3632}
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3648 struct ata_port_info *port_info,
3649 struct Scsi_Host *shost)
3650{
3651 struct ata_port *ap;
3652
3653 ap = ata_port_alloc(host);
3654 if (!ap)
3655 return NULL;
3656
3657 ap->port_no = 0;
3658 ap->lock = shost->host_lock;
3659 ap->pio_mask = port_info->pio_mask;
3660 ap->mwdma_mask = port_info->mwdma_mask;
3661 ap->udma_mask = port_info->udma_mask;
3662 ap->flags |= port_info->flags;
3663 ap->ops = port_info->port_ops;
3664 ap->cbl = ATA_CBL_SATA;
3665
3666 return ap;
3667}
3668EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682int ata_sas_port_start(struct ata_port *ap)
3683{
3684 return 0;
3685}
3686EXPORT_SYMBOL_GPL(ata_sas_port_start);
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698void ata_sas_port_stop(struct ata_port *ap)
3699{
3700}
3701EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714int ata_sas_port_init(struct ata_port *ap)
3715{
3716 int rc = ap->ops->port_start(ap);
3717
3718 if (!rc) {
3719 ap->print_id = ata_print_id++;
3720 rc = ata_bus_probe(ap);
3721 }
3722
3723 return rc;
3724}
3725EXPORT_SYMBOL_GPL(ata_sas_port_init);
3726
3727
3728
3729
3730
3731
3732
3733void ata_sas_port_destroy(struct ata_port *ap)
3734{
3735 if (ap->ops->port_stop)
3736 ap->ops->port_stop(ap);
3737 kfree(ap);
3738}
3739EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
3751{
3752 ata_scsi_sdev_config(sdev);
3753 ata_scsi_dev_config(sdev, ap->link.device);
3754 return 0;
3755}
3756EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
3770 struct ata_port *ap)
3771{
3772 int rc = 0;
3773
3774 ata_scsi_dump_cdb(ap, cmd);
3775
3776 if (likely(ata_dev_enabled(ap->link.device)))
3777 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device);
3778 else {
3779 cmd->result = (DID_BAD_TARGET << 16);
3780 done(cmd);
3781 }
3782 return rc;
3783}
3784EXPORT_SYMBOL_GPL(ata_sas_queuecmd);
3785