1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/module.h>
46#include <linux/kernel.h>
47#include <linux/init.h>
48#include <linux/errno.h>
49#include <linux/blkdev.h>
50#include <linux/sched.h>
51#include <linux/workqueue.h>
52#include <linux/delay.h>
53#include <linux/pci.h>
54#include <linux/interrupt.h>
55#include <linux/aer.h>
56#include <linux/raid_class.h>
57#include <linux/blk-mq-pci.h>
58#include <asm/unaligned.h>
59
60#include "mpt3sas_base.h"
61
62#define RAID_CHANNEL 1
63
64#define PCIE_CHANNEL 2
65
66
67static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69static void _firmware_event_work(struct work_struct *work);
70
71static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78static void
79_scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
81static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
82
83
84LIST_HEAD(mpt3sas_ioc_list);
85
86DEFINE_SPINLOCK(gioc_lock);
87
88MODULE_AUTHOR(MPT3SAS_AUTHOR);
89MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
90MODULE_LICENSE("GPL");
91MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
92MODULE_ALIAS("mpt2sas");
93
94
95static u8 scsi_io_cb_idx = -1;
96static u8 tm_cb_idx = -1;
97static u8 ctl_cb_idx = -1;
98static u8 base_cb_idx = -1;
99static u8 port_enable_cb_idx = -1;
100static u8 transport_cb_idx = -1;
101static u8 scsih_cb_idx = -1;
102static u8 config_cb_idx = -1;
103static int mpt2_ids;
104static int mpt3_ids;
105
106static u8 tm_tr_cb_idx = -1 ;
107static u8 tm_tr_volume_cb_idx = -1 ;
108static u8 tm_sas_control_cb_idx = -1;
109
110
111static u32 logging_level;
112MODULE_PARM_DESC(logging_level,
113 " bits for enabling additional logging info (default=0)");
114
115
116static ushort max_sectors = 0xFFFF;
117module_param(max_sectors, ushort, 0444);
118MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
119
120
121static int missing_delay[2] = {-1, -1};
122module_param_array(missing_delay, int, NULL, 0444);
123MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124
125
126#define MPT3SAS_MAX_LUN (16895)
127static u64 max_lun = MPT3SAS_MAX_LUN;
128module_param(max_lun, ullong, 0444);
129MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130
131static ushort hbas_to_enumerate;
132module_param(hbas_to_enumerate, ushort, 0444);
133MODULE_PARM_DESC(hbas_to_enumerate,
134 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
135 1 - enumerates only SAS 2.0 generation HBAs\n \
136 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137
138
139
140
141
142
143
144
145static int diag_buffer_enable = -1;
146module_param(diag_buffer_enable, int, 0444);
147MODULE_PARM_DESC(diag_buffer_enable,
148 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
149static int disable_discovery = -1;
150module_param(disable_discovery, int, 0444);
151MODULE_PARM_DESC(disable_discovery, " disable discovery ");
152
153
154
155static int prot_mask = -1;
156module_param(prot_mask, int, 0444);
157MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
158
159static bool enable_sdev_max_qd;
160module_param(enable_sdev_max_qd, bool, 0444);
161MODULE_PARM_DESC(enable_sdev_max_qd,
162 "Enable sdev max qd as can_queue, def=disabled(0)");
163
164static int multipath_on_hba = -1;
165module_param(multipath_on_hba, int, 0);
166MODULE_PARM_DESC(multipath_on_hba,
167 "Multipath support to add same target device\n\t\t"
168 "as many times as it is visible to HBA from various paths\n\t\t"
169 "(by default:\n\t\t"
170 "\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
171 "\t SAS 3.5 HBA - This will be enabled)");
172
173static int host_tagset_enable = 1;
174module_param(host_tagset_enable, int, 0444);
175MODULE_PARM_DESC(host_tagset_enable,
176 "Shared host tagset enable/disable Default: enable(1)");
177
178
179static struct raid_template *mpt3sas_raid_template;
180static struct raid_template *mpt2sas_raid_template;
181
182
183
184
185
186
187
188
189struct sense_info {
190 u8 skey;
191 u8 asc;
192 u8 ascq;
193};
194
195#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
196#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
197#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
198#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
199#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215struct fw_event_work {
216 struct list_head list;
217 struct work_struct work;
218
219 struct MPT3SAS_ADAPTER *ioc;
220 u16 device_handle;
221 u8 VF_ID;
222 u8 VP_ID;
223 u8 ignore;
224 u16 event;
225 struct kref refcount;
226 char event_data[] __aligned(4);
227};
228
229static void fw_event_work_free(struct kref *r)
230{
231 kfree(container_of(r, struct fw_event_work, refcount));
232}
233
234static void fw_event_work_get(struct fw_event_work *fw_work)
235{
236 kref_get(&fw_work->refcount);
237}
238
239static void fw_event_work_put(struct fw_event_work *fw_work)
240{
241 kref_put(&fw_work->refcount, fw_event_work_free);
242}
243
244static struct fw_event_work *alloc_fw_event_work(int len)
245{
246 struct fw_event_work *fw_event;
247
248 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
249 if (!fw_event)
250 return NULL;
251
252 kref_init(&fw_event->refcount);
253 return fw_event;
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281struct _scsi_io_transfer {
282 u16 handle;
283 u8 is_raid;
284 enum dma_data_direction dir;
285 u32 data_length;
286 dma_addr_t data_dma;
287 u8 sense[SCSI_SENSE_BUFFERSIZE];
288 u32 lun;
289 u8 cdb_length;
290 u8 cdb[32];
291 u8 timeout;
292 u8 VF_ID;
293 u8 VP_ID;
294 u8 valid_reply;
295
296 u32 sense_length;
297 u16 ioc_status;
298 u8 scsi_state;
299 u8 scsi_status;
300 u32 log_info;
301 u32 transfer_length;
302};
303
304
305
306
307
308
309
310
311static int
312_scsih_set_debug_level(const char *val, const struct kernel_param *kp)
313{
314 int ret = param_set_int(val, kp);
315 struct MPT3SAS_ADAPTER *ioc;
316
317 if (ret)
318 return ret;
319
320 pr_info("setting logging_level(0x%08x)\n", logging_level);
321 spin_lock(&gioc_lock);
322 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
323 ioc->logging_level = logging_level;
324 spin_unlock(&gioc_lock);
325 return 0;
326}
327module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
328 &logging_level, 0644);
329
330
331
332
333
334
335
336
337static inline int
338_scsih_srch_boot_sas_address(u64 sas_address,
339 Mpi2BootDeviceSasWwid_t *boot_device)
340{
341 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
342}
343
344
345
346
347
348
349
350
351static inline int
352_scsih_srch_boot_device_name(u64 device_name,
353 Mpi2BootDeviceDeviceName_t *boot_device)
354{
355 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
356}
357
358
359
360
361
362
363
364
365
366static inline int
367_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
368 Mpi2BootDeviceEnclosureSlot_t *boot_device)
369{
370 return (enclosure_logical_id == le64_to_cpu(boot_device->
371 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
372 SlotNumber)) ? 1 : 0;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386struct hba_port *
387mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
388 u8 port_id, u8 bypass_dirty_port_flag)
389{
390 struct hba_port *port, *port_next;
391
392
393
394
395
396
397 if (!ioc->multipath_on_hba)
398 port_id = MULTIPATH_DISABLED_PORT_ID;
399
400 list_for_each_entry_safe(port, port_next,
401 &ioc->port_table_list, list) {
402 if (port->port_id != port_id)
403 continue;
404 if (bypass_dirty_port_flag)
405 return port;
406 if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
407 continue;
408 return port;
409 }
410
411
412
413
414
415
416 if (!ioc->multipath_on_hba) {
417 port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
418 if (!port)
419 return NULL;
420
421 port->port_id = port_id;
422 ioc_info(ioc,
423 "hba_port entry: %p, port: %d is added to hba_port list\n",
424 port, port->port_id);
425 list_add_tail(&port->list,
426 &ioc->port_table_list);
427 return port;
428 }
429 return NULL;
430}
431
432
433
434
435
436
437
438
439
440struct virtual_phy *
441mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
442 struct hba_port *port, u32 phy)
443{
444 struct virtual_phy *vphy, *vphy_next;
445
446 if (!port->vphys_mask)
447 return NULL;
448
449 list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
450 if (vphy->phy_mask & (1 << phy))
451 return vphy;
452 }
453 return NULL;
454}
455
456
457
458
459
460
461
462
463
464
465
466
467static int
468_scsih_is_boot_device(u64 sas_address, u64 device_name,
469 u64 enclosure_logical_id, u16 slot, u8 form,
470 Mpi2BiosPage2BootDevice_t *boot_device)
471{
472 int rc = 0;
473
474 switch (form) {
475 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
476 if (!sas_address)
477 break;
478 rc = _scsih_srch_boot_sas_address(
479 sas_address, &boot_device->SasWwid);
480 break;
481 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
482 if (!enclosure_logical_id)
483 break;
484 rc = _scsih_srch_boot_encl_slot(
485 enclosure_logical_id,
486 slot, &boot_device->EnclosureSlot);
487 break;
488 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
489 if (!device_name)
490 break;
491 rc = _scsih_srch_boot_device_name(
492 device_name, &boot_device->DeviceName);
493 break;
494 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
495 break;
496 }
497
498 return rc;
499}
500
501
502
503
504
505
506
507
508
509static int
510_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
511 u64 *sas_address)
512{
513 Mpi2SasDevicePage0_t sas_device_pg0;
514 Mpi2ConfigReply_t mpi_reply;
515 u32 ioc_status;
516
517 *sas_address = 0;
518
519 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
520 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
521 ioc_err(ioc, "failure at %s:%d/%s()!\n",
522 __FILE__, __LINE__, __func__);
523 return -ENXIO;
524 }
525
526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
527 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
528
529
530
531 if ((handle <= ioc->sas_hba.num_phys) &&
532 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
533 MPI2_SAS_DEVICE_INFO_SEP)))
534 *sas_address = ioc->sas_hba.sas_address;
535 else
536 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
537 return 0;
538 }
539
540
541 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
542 return -ENXIO;
543
544
545 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
546 handle, ioc_status, __FILE__, __LINE__, __func__);
547 return -EIO;
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563static void
564_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
565 u32 channel)
566{
567 struct _sas_device *sas_device;
568 struct _pcie_device *pcie_device;
569 struct _raid_device *raid_device;
570 u64 sas_address;
571 u64 device_name;
572 u64 enclosure_logical_id;
573 u16 slot;
574
575
576 if (!ioc->is_driver_loading)
577 return;
578
579
580 if (!ioc->bios_pg3.BiosVersion)
581 return;
582
583 if (channel == RAID_CHANNEL) {
584 raid_device = device;
585 sas_address = raid_device->wwid;
586 device_name = 0;
587 enclosure_logical_id = 0;
588 slot = 0;
589 } else if (channel == PCIE_CHANNEL) {
590 pcie_device = device;
591 sas_address = pcie_device->wwid;
592 device_name = 0;
593 enclosure_logical_id = 0;
594 slot = 0;
595 } else {
596 sas_device = device;
597 sas_address = sas_device->sas_address;
598 device_name = sas_device->device_name;
599 enclosure_logical_id = sas_device->enclosure_logical_id;
600 slot = sas_device->slot;
601 }
602
603 if (!ioc->req_boot_device.device) {
604 if (_scsih_is_boot_device(sas_address, device_name,
605 enclosure_logical_id, slot,
606 (ioc->bios_pg2.ReqBootDeviceForm &
607 MPI2_BIOSPAGE2_FORM_MASK),
608 &ioc->bios_pg2.RequestedBootDevice)) {
609 dinitprintk(ioc,
610 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
611 __func__, (u64)sas_address));
612 ioc->req_boot_device.device = device;
613 ioc->req_boot_device.channel = channel;
614 }
615 }
616
617 if (!ioc->req_alt_boot_device.device) {
618 if (_scsih_is_boot_device(sas_address, device_name,
619 enclosure_logical_id, slot,
620 (ioc->bios_pg2.ReqAltBootDeviceForm &
621 MPI2_BIOSPAGE2_FORM_MASK),
622 &ioc->bios_pg2.RequestedAltBootDevice)) {
623 dinitprintk(ioc,
624 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
625 __func__, (u64)sas_address));
626 ioc->req_alt_boot_device.device = device;
627 ioc->req_alt_boot_device.channel = channel;
628 }
629 }
630
631 if (!ioc->current_boot_device.device) {
632 if (_scsih_is_boot_device(sas_address, device_name,
633 enclosure_logical_id, slot,
634 (ioc->bios_pg2.CurrentBootDeviceForm &
635 MPI2_BIOSPAGE2_FORM_MASK),
636 &ioc->bios_pg2.CurrentBootDevice)) {
637 dinitprintk(ioc,
638 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
639 __func__, (u64)sas_address));
640 ioc->current_boot_device.device = device;
641 ioc->current_boot_device.channel = channel;
642 }
643 }
644}
645
646static struct _sas_device *
647__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
648 struct MPT3SAS_TARGET *tgt_priv)
649{
650 struct _sas_device *ret;
651
652 assert_spin_locked(&ioc->sas_device_lock);
653
654 ret = tgt_priv->sas_dev;
655 if (ret)
656 sas_device_get(ret);
657
658 return ret;
659}
660
661static struct _sas_device *
662mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
663 struct MPT3SAS_TARGET *tgt_priv)
664{
665 struct _sas_device *ret;
666 unsigned long flags;
667
668 spin_lock_irqsave(&ioc->sas_device_lock, flags);
669 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
670 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
671
672 return ret;
673}
674
675static struct _pcie_device *
676__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
677 struct MPT3SAS_TARGET *tgt_priv)
678{
679 struct _pcie_device *ret;
680
681 assert_spin_locked(&ioc->pcie_device_lock);
682
683 ret = tgt_priv->pcie_dev;
684 if (ret)
685 pcie_device_get(ret);
686
687 return ret;
688}
689
690
691
692
693
694
695
696
697
698
699
700static struct _pcie_device *
701mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
702 struct MPT3SAS_TARGET *tgt_priv)
703{
704 struct _pcie_device *ret;
705 unsigned long flags;
706
707 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
708 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
709 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
710
711 return ret;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726struct _sas_device *
727__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
728 struct sas_rphy *rphy)
729{
730 struct _sas_device *sas_device;
731
732 assert_spin_locked(&ioc->sas_device_lock);
733
734 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
735 if (sas_device->rphy != rphy)
736 continue;
737 sas_device_get(sas_device);
738 return sas_device;
739 }
740
741 sas_device = NULL;
742 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
743 if (sas_device->rphy != rphy)
744 continue;
745 sas_device_get(sas_device);
746 return sas_device;
747 }
748
749 return NULL;
750}
751
752
753
754
755
756
757
758
759
760
761
762struct _sas_device *
763__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
764 u64 sas_address, struct hba_port *port)
765{
766 struct _sas_device *sas_device;
767
768 if (!port)
769 return NULL;
770
771 assert_spin_locked(&ioc->sas_device_lock);
772
773 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
774 if (sas_device->sas_address != sas_address)
775 continue;
776 if (sas_device->port != port)
777 continue;
778 sas_device_get(sas_device);
779 return sas_device;
780 }
781
782 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
783 if (sas_device->sas_address != sas_address)
784 continue;
785 if (sas_device->port != port)
786 continue;
787 sas_device_get(sas_device);
788 return sas_device;
789 }
790
791 return NULL;
792}
793
794
795
796
797
798
799
800
801
802
803
804struct _sas_device *
805mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
806 u64 sas_address, struct hba_port *port)
807{
808 struct _sas_device *sas_device;
809 unsigned long flags;
810
811 spin_lock_irqsave(&ioc->sas_device_lock, flags);
812 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
813 sas_address, port);
814 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
815
816 return sas_device;
817}
818
819static struct _sas_device *
820__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
821{
822 struct _sas_device *sas_device;
823
824 assert_spin_locked(&ioc->sas_device_lock);
825
826 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
827 if (sas_device->handle == handle)
828 goto found_device;
829
830 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
831 if (sas_device->handle == handle)
832 goto found_device;
833
834 return NULL;
835
836found_device:
837 sas_device_get(sas_device);
838 return sas_device;
839}
840
841
842
843
844
845
846
847
848
849
850struct _sas_device *
851mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
852{
853 struct _sas_device *sas_device;
854 unsigned long flags;
855
856 spin_lock_irqsave(&ioc->sas_device_lock, flags);
857 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
858 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
859
860 return sas_device;
861}
862
863
864
865
866
867
868
869
870static void
871_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
872 struct _sas_device *sas_device, struct scsi_device *sdev,
873 struct scsi_target *starget)
874{
875 if (sdev) {
876 if (sas_device->enclosure_handle != 0)
877 sdev_printk(KERN_INFO, sdev,
878 "enclosure logical id (0x%016llx), slot(%d) \n",
879 (unsigned long long)
880 sas_device->enclosure_logical_id,
881 sas_device->slot);
882 if (sas_device->connector_name[0] != '\0')
883 sdev_printk(KERN_INFO, sdev,
884 "enclosure level(0x%04x), connector name( %s)\n",
885 sas_device->enclosure_level,
886 sas_device->connector_name);
887 if (sas_device->is_chassis_slot_valid)
888 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
889 sas_device->chassis_slot);
890 } else if (starget) {
891 if (sas_device->enclosure_handle != 0)
892 starget_printk(KERN_INFO, starget,
893 "enclosure logical id(0x%016llx), slot(%d) \n",
894 (unsigned long long)
895 sas_device->enclosure_logical_id,
896 sas_device->slot);
897 if (sas_device->connector_name[0] != '\0')
898 starget_printk(KERN_INFO, starget,
899 "enclosure level(0x%04x), connector name( %s)\n",
900 sas_device->enclosure_level,
901 sas_device->connector_name);
902 if (sas_device->is_chassis_slot_valid)
903 starget_printk(KERN_INFO, starget,
904 "chassis slot(0x%04x)\n",
905 sas_device->chassis_slot);
906 } else {
907 if (sas_device->enclosure_handle != 0)
908 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
909 (u64)sas_device->enclosure_logical_id,
910 sas_device->slot);
911 if (sas_device->connector_name[0] != '\0')
912 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
913 sas_device->enclosure_level,
914 sas_device->connector_name);
915 if (sas_device->is_chassis_slot_valid)
916 ioc_info(ioc, "chassis slot(0x%04x)\n",
917 sas_device->chassis_slot);
918 }
919}
920
921
922
923
924
925
926
927
928
929static void
930_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
931 struct _sas_device *sas_device)
932{
933 unsigned long flags;
934
935 if (!sas_device)
936 return;
937 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
938 sas_device->handle, (u64)sas_device->sas_address);
939
940 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
941
942
943
944
945
946 spin_lock_irqsave(&ioc->sas_device_lock, flags);
947 if (!list_empty(&sas_device->list)) {
948 list_del_init(&sas_device->list);
949 sas_device_put(sas_device);
950 }
951 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
952}
953
954
955
956
957
958
959static void
960_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
961{
962 struct _sas_device *sas_device;
963 unsigned long flags;
964
965 if (ioc->shost_recovery)
966 return;
967
968 spin_lock_irqsave(&ioc->sas_device_lock, flags);
969 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
970 if (sas_device) {
971 list_del_init(&sas_device->list);
972 sas_device_put(sas_device);
973 }
974 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
975 if (sas_device) {
976 _scsih_remove_device(ioc, sas_device);
977 sas_device_put(sas_device);
978 }
979}
980
981
982
983
984
985
986
987
988
989
990void
991mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
992 u64 sas_address, struct hba_port *port)
993{
994 struct _sas_device *sas_device;
995 unsigned long flags;
996
997 if (ioc->shost_recovery)
998 return;
999
1000 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1001 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1002 if (sas_device) {
1003 list_del_init(&sas_device->list);
1004 sas_device_put(sas_device);
1005 }
1006 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1007 if (sas_device) {
1008 _scsih_remove_device(ioc, sas_device);
1009 sas_device_put(sas_device);
1010 }
1011}
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021static void
1022_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1023 struct _sas_device *sas_device)
1024{
1025 unsigned long flags;
1026
1027 dewtprintk(ioc,
1028 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1029 __func__, sas_device->handle,
1030 (u64)sas_device->sas_address));
1031
1032 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1033 NULL, NULL));
1034
1035 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1036 sas_device_get(sas_device);
1037 list_add_tail(&sas_device->list, &ioc->sas_device_list);
1038 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1039
1040 if (ioc->hide_drives) {
1041 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1042 return;
1043 }
1044
1045 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1046 sas_device->sas_address_parent, sas_device->port)) {
1047 _scsih_sas_device_remove(ioc, sas_device);
1048 } else if (!sas_device->starget) {
1049
1050
1051
1052
1053
1054 if (!ioc->is_driver_loading) {
1055 mpt3sas_transport_port_remove(ioc,
1056 sas_device->sas_address,
1057 sas_device->sas_address_parent,
1058 sas_device->port);
1059 _scsih_sas_device_remove(ioc, sas_device);
1060 }
1061 } else
1062 clear_bit(sas_device->handle, ioc->pend_os_device_add);
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073static void
1074_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1075 struct _sas_device *sas_device)
1076{
1077 unsigned long flags;
1078
1079 dewtprintk(ioc,
1080 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1081 __func__, sas_device->handle,
1082 (u64)sas_device->sas_address));
1083
1084 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1085 NULL, NULL));
1086
1087 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1088 sas_device_get(sas_device);
1089 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1090 _scsih_determine_boot_device(ioc, sas_device, 0);
1091 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1092}
1093
1094
1095static struct _pcie_device *
1096__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1097{
1098 struct _pcie_device *pcie_device;
1099
1100 assert_spin_locked(&ioc->pcie_device_lock);
1101
1102 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1103 if (pcie_device->wwid == wwid)
1104 goto found_device;
1105
1106 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1107 if (pcie_device->wwid == wwid)
1108 goto found_device;
1109
1110 return NULL;
1111
1112found_device:
1113 pcie_device_get(pcie_device);
1114 return pcie_device;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static struct _pcie_device *
1129mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1130{
1131 struct _pcie_device *pcie_device;
1132 unsigned long flags;
1133
1134 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1135 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1136 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1137
1138 return pcie_device;
1139}
1140
1141
1142static struct _pcie_device *
1143__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1144 int channel)
1145{
1146 struct _pcie_device *pcie_device;
1147
1148 assert_spin_locked(&ioc->pcie_device_lock);
1149
1150 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1151 if (pcie_device->id == id && pcie_device->channel == channel)
1152 goto found_device;
1153
1154 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1155 if (pcie_device->id == id && pcie_device->channel == channel)
1156 goto found_device;
1157
1158 return NULL;
1159
1160found_device:
1161 pcie_device_get(pcie_device);
1162 return pcie_device;
1163}
1164
1165static struct _pcie_device *
1166__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1167{
1168 struct _pcie_device *pcie_device;
1169
1170 assert_spin_locked(&ioc->pcie_device_lock);
1171
1172 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1173 if (pcie_device->handle == handle)
1174 goto found_device;
1175
1176 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1177 if (pcie_device->handle == handle)
1178 goto found_device;
1179
1180 return NULL;
1181
1182found_device:
1183 pcie_device_get(pcie_device);
1184 return pcie_device;
1185}
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199struct _pcie_device *
1200mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1201{
1202 struct _pcie_device *pcie_device;
1203 unsigned long flags;
1204
1205 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1206 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1207 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1208
1209 return pcie_device;
1210}
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221static void
1222_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1223{
1224 struct _pcie_device *pcie_device;
1225 unsigned long flags;
1226 u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1227
1228 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1229 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1230 if (pcie_device->shutdown_latency) {
1231 if (shutdown_latency < pcie_device->shutdown_latency)
1232 shutdown_latency =
1233 pcie_device->shutdown_latency;
1234 }
1235 }
1236 ioc->max_shutdown_latency = shutdown_latency;
1237 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1238}
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248static void
1249_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1250 struct _pcie_device *pcie_device)
1251{
1252 unsigned long flags;
1253 int was_on_pcie_device_list = 0;
1254 u8 update_latency = 0;
1255
1256 if (!pcie_device)
1257 return;
1258 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1259 pcie_device->handle, (u64)pcie_device->wwid);
1260 if (pcie_device->enclosure_handle != 0)
1261 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1262 (u64)pcie_device->enclosure_logical_id,
1263 pcie_device->slot);
1264 if (pcie_device->connector_name[0] != '\0')
1265 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1266 pcie_device->enclosure_level,
1267 pcie_device->connector_name);
1268
1269 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1270 if (!list_empty(&pcie_device->list)) {
1271 list_del_init(&pcie_device->list);
1272 was_on_pcie_device_list = 1;
1273 }
1274 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1275 update_latency = 1;
1276 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1277 if (was_on_pcie_device_list) {
1278 kfree(pcie_device->serial_number);
1279 pcie_device_put(pcie_device);
1280 }
1281
1282
1283
1284
1285
1286
1287 if (update_latency)
1288 _scsih_set_nvme_max_shutdown_latency(ioc);
1289}
1290
1291
1292
1293
1294
1295
1296
1297static void
1298_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1299{
1300 struct _pcie_device *pcie_device;
1301 unsigned long flags;
1302 int was_on_pcie_device_list = 0;
1303 u8 update_latency = 0;
1304
1305 if (ioc->shost_recovery)
1306 return;
1307
1308 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1309 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1310 if (pcie_device) {
1311 if (!list_empty(&pcie_device->list)) {
1312 list_del_init(&pcie_device->list);
1313 was_on_pcie_device_list = 1;
1314 pcie_device_put(pcie_device);
1315 }
1316 if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1317 update_latency = 1;
1318 }
1319 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1320 if (was_on_pcie_device_list) {
1321 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1322 pcie_device_put(pcie_device);
1323 }
1324
1325
1326
1327
1328
1329
1330 if (update_latency)
1331 _scsih_set_nvme_max_shutdown_latency(ioc);
1332}
1333
1334
1335
1336
1337
1338
1339
1340
1341static void
1342_scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1343 struct _pcie_device *pcie_device)
1344{
1345 unsigned long flags;
1346
1347 dewtprintk(ioc,
1348 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1349 __func__,
1350 pcie_device->handle, (u64)pcie_device->wwid));
1351 if (pcie_device->enclosure_handle != 0)
1352 dewtprintk(ioc,
1353 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1354 __func__,
1355 (u64)pcie_device->enclosure_logical_id,
1356 pcie_device->slot));
1357 if (pcie_device->connector_name[0] != '\0')
1358 dewtprintk(ioc,
1359 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1360 __func__, pcie_device->enclosure_level,
1361 pcie_device->connector_name));
1362
1363 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1364 pcie_device_get(pcie_device);
1365 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1366 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1367
1368 if (pcie_device->access_status ==
1369 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1370 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1371 return;
1372 }
1373 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1374 _scsih_pcie_device_remove(ioc, pcie_device);
1375 } else if (!pcie_device->starget) {
1376 if (!ioc->is_driver_loading) {
1377
1378 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1379 }
1380 } else
1381 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static void
1393_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1394 struct _pcie_device *pcie_device)
1395{
1396 unsigned long flags;
1397
1398 dewtprintk(ioc,
1399 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1400 __func__,
1401 pcie_device->handle, (u64)pcie_device->wwid));
1402 if (pcie_device->enclosure_handle != 0)
1403 dewtprintk(ioc,
1404 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1405 __func__,
1406 (u64)pcie_device->enclosure_logical_id,
1407 pcie_device->slot));
1408 if (pcie_device->connector_name[0] != '\0')
1409 dewtprintk(ioc,
1410 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1411 __func__, pcie_device->enclosure_level,
1412 pcie_device->connector_name));
1413
1414 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1415 pcie_device_get(pcie_device);
1416 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1417 if (pcie_device->access_status !=
1418 MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1419 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1420 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432static struct _raid_device *
1433_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1434{
1435 struct _raid_device *raid_device, *r;
1436
1437 r = NULL;
1438 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1439 if (raid_device->id == id && raid_device->channel == channel) {
1440 r = raid_device;
1441 goto out;
1442 }
1443 }
1444
1445 out:
1446 return r;
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458struct _raid_device *
1459mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1460{
1461 struct _raid_device *raid_device, *r;
1462
1463 r = NULL;
1464 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1465 if (raid_device->handle != handle)
1466 continue;
1467 r = raid_device;
1468 goto out;
1469 }
1470
1471 out:
1472 return r;
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484static struct _raid_device *
1485_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1486{
1487 struct _raid_device *raid_device, *r;
1488
1489 r = NULL;
1490 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1491 if (raid_device->wwid != wwid)
1492 continue;
1493 r = raid_device;
1494 goto out;
1495 }
1496
1497 out:
1498 return r;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508static void
1509_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1510 struct _raid_device *raid_device)
1511{
1512 unsigned long flags;
1513
1514 dewtprintk(ioc,
1515 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1516 __func__,
1517 raid_device->handle, (u64)raid_device->wwid));
1518
1519 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1520 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1521 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1522}
1523
1524
1525
1526
1527
1528
1529
1530static void
1531_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1532 struct _raid_device *raid_device)
1533{
1534 unsigned long flags;
1535
1536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1537 list_del(&raid_device->list);
1538 kfree(raid_device);
1539 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551struct _sas_node *
1552mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1553{
1554 struct _sas_node *sas_expander, *r;
1555
1556 r = NULL;
1557 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1558 if (sas_expander->handle != handle)
1559 continue;
1560 r = sas_expander;
1561 goto out;
1562 }
1563 out:
1564 return r;
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static struct _enclosure_node *
1577mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1578{
1579 struct _enclosure_node *enclosure_dev, *r;
1580
1581 r = NULL;
1582 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1583 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1584 continue;
1585 r = enclosure_dev;
1586 goto out;
1587 }
1588out:
1589 return r;
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601struct _sas_node *
1602mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1603 u64 sas_address, struct hba_port *port)
1604{
1605 struct _sas_node *sas_expander, *r = NULL;
1606
1607 if (!port)
1608 return r;
1609
1610 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1611 if (sas_expander->sas_address != sas_address)
1612 continue;
1613 if (sas_expander->port != port)
1614 continue;
1615 r = sas_expander;
1616 goto out;
1617 }
1618 out:
1619 return r;
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630static void
1631_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1632 struct _sas_node *sas_expander)
1633{
1634 unsigned long flags;
1635
1636 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1637 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1638 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648static int
1649_scsih_is_end_device(u32 device_info)
1650{
1651 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1652 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1653 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1654 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1655 return 1;
1656 else
1657 return 0;
1658}
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668static int
1669_scsih_is_nvme_pciescsi_device(u32 device_info)
1670{
1671 if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1672 == MPI26_PCIE_DEVINFO_NVME) ||
1673 ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1674 == MPI26_PCIE_DEVINFO_SCSI))
1675 return 1;
1676 else
1677 return 0;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690static u8
1691_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1692 int channel)
1693{
1694 int smid;
1695 struct scsi_cmnd *scmd;
1696
1697 for (smid = 1;
1698 smid <= ioc->shost->can_queue; smid++) {
1699 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1700 if (!scmd)
1701 continue;
1702 if (scmd->device->id == id &&
1703 scmd->device->channel == channel)
1704 return 1;
1705 }
1706 return 0;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720static u8
1721_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1722 unsigned int lun, int channel)
1723{
1724 int smid;
1725 struct scsi_cmnd *scmd;
1726
1727 for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1728
1729 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1730 if (!scmd)
1731 continue;
1732 if (scmd->device->id == id &&
1733 scmd->device->channel == channel &&
1734 scmd->device->lun == lun)
1735 return 1;
1736 }
1737 return 0;
1738}
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748struct scsi_cmnd *
1749mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1750{
1751 struct scsi_cmnd *scmd = NULL;
1752 struct scsiio_tracker *st;
1753 Mpi25SCSIIORequest_t *mpi_request;
1754 u16 tag = smid - 1;
1755
1756 if (smid > 0 &&
1757 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1758 u32 unique_tag =
1759 ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1760
1761 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1762
1763
1764
1765
1766
1767
1768
1769 if (!mpi_request->DevHandle)
1770 return scmd;
1771
1772 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1773 if (scmd) {
1774 st = scsi_cmd_priv(scmd);
1775 if (st->cb_idx == 0xFF || st->smid == 0)
1776 scmd = NULL;
1777 }
1778 }
1779 return scmd;
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789static int
1790scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1791{
1792 struct Scsi_Host *shost = sdev->host;
1793 int max_depth;
1794 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1795 struct MPT3SAS_DEVICE *sas_device_priv_data;
1796 struct MPT3SAS_TARGET *sas_target_priv_data;
1797 struct _sas_device *sas_device;
1798 unsigned long flags;
1799
1800 max_depth = shost->can_queue;
1801
1802
1803
1804
1805
1806 if (ioc->enable_sdev_max_qd)
1807 goto not_sata;
1808
1809 sas_device_priv_data = sdev->hostdata;
1810 if (!sas_device_priv_data)
1811 goto not_sata;
1812 sas_target_priv_data = sas_device_priv_data->sas_target;
1813 if (!sas_target_priv_data)
1814 goto not_sata;
1815 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1816 goto not_sata;
1817
1818 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1819 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1820 if (sas_device) {
1821 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1822 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1823
1824 sas_device_put(sas_device);
1825 }
1826 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1827
1828 not_sata:
1829
1830 if (!sdev->tagged_supported)
1831 max_depth = 1;
1832 if (qdepth > max_depth)
1833 qdepth = max_depth;
1834 scsi_change_queue_depth(sdev, qdepth);
1835 sdev_printk(KERN_INFO, sdev,
1836 "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1837 sdev->queue_depth, sdev->tagged_supported,
1838 sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1839 return sdev->queue_depth;
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849void
1850mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1851{
1852 struct Scsi_Host *shost = sdev->host;
1853 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1854
1855 if (ioc->enable_sdev_max_qd)
1856 qdepth = shost->can_queue;
1857
1858 scsih_change_queue_depth(sdev, qdepth);
1859}
1860
1861
1862
1863
1864
1865
1866
1867
1868static int
1869scsih_target_alloc(struct scsi_target *starget)
1870{
1871 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1872 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1873 struct MPT3SAS_TARGET *sas_target_priv_data;
1874 struct _sas_device *sas_device;
1875 struct _raid_device *raid_device;
1876 struct _pcie_device *pcie_device;
1877 unsigned long flags;
1878 struct sas_rphy *rphy;
1879
1880 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1881 GFP_KERNEL);
1882 if (!sas_target_priv_data)
1883 return -ENOMEM;
1884
1885 starget->hostdata = sas_target_priv_data;
1886 sas_target_priv_data->starget = starget;
1887 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1888
1889
1890 if (starget->channel == RAID_CHANNEL) {
1891 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1892 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1893 starget->channel);
1894 if (raid_device) {
1895 sas_target_priv_data->handle = raid_device->handle;
1896 sas_target_priv_data->sas_address = raid_device->wwid;
1897 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1898 if (ioc->is_warpdrive)
1899 sas_target_priv_data->raid_device = raid_device;
1900 raid_device->starget = starget;
1901 }
1902 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1903 return 0;
1904 }
1905
1906
1907 if (starget->channel == PCIE_CHANNEL) {
1908 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1909 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1910 starget->channel);
1911 if (pcie_device) {
1912 sas_target_priv_data->handle = pcie_device->handle;
1913 sas_target_priv_data->sas_address = pcie_device->wwid;
1914 sas_target_priv_data->port = NULL;
1915 sas_target_priv_data->pcie_dev = pcie_device;
1916 pcie_device->starget = starget;
1917 pcie_device->id = starget->id;
1918 pcie_device->channel = starget->channel;
1919 sas_target_priv_data->flags |=
1920 MPT_TARGET_FLAGS_PCIE_DEVICE;
1921 if (pcie_device->fast_path)
1922 sas_target_priv_data->flags |=
1923 MPT_TARGET_FASTPATH_IO;
1924 }
1925 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1926 return 0;
1927 }
1928
1929
1930 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1931 rphy = dev_to_rphy(starget->dev.parent);
1932 sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1933
1934 if (sas_device) {
1935 sas_target_priv_data->handle = sas_device->handle;
1936 sas_target_priv_data->sas_address = sas_device->sas_address;
1937 sas_target_priv_data->port = sas_device->port;
1938 sas_target_priv_data->sas_dev = sas_device;
1939 sas_device->starget = starget;
1940 sas_device->id = starget->id;
1941 sas_device->channel = starget->channel;
1942 if (test_bit(sas_device->handle, ioc->pd_handles))
1943 sas_target_priv_data->flags |=
1944 MPT_TARGET_FLAGS_RAID_COMPONENT;
1945 if (sas_device->fast_path)
1946 sas_target_priv_data->flags |=
1947 MPT_TARGET_FASTPATH_IO;
1948 }
1949 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1950
1951 return 0;
1952}
1953
1954
1955
1956
1957
1958static void
1959scsih_target_destroy(struct scsi_target *starget)
1960{
1961 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1962 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1963 struct MPT3SAS_TARGET *sas_target_priv_data;
1964 struct _sas_device *sas_device;
1965 struct _raid_device *raid_device;
1966 struct _pcie_device *pcie_device;
1967 unsigned long flags;
1968
1969 sas_target_priv_data = starget->hostdata;
1970 if (!sas_target_priv_data)
1971 return;
1972
1973 if (starget->channel == RAID_CHANNEL) {
1974 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1975 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1976 starget->channel);
1977 if (raid_device) {
1978 raid_device->starget = NULL;
1979 raid_device->sdev = NULL;
1980 }
1981 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1982 goto out;
1983 }
1984
1985 if (starget->channel == PCIE_CHANNEL) {
1986 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1987 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1988 sas_target_priv_data);
1989 if (pcie_device && (pcie_device->starget == starget) &&
1990 (pcie_device->id == starget->id) &&
1991 (pcie_device->channel == starget->channel))
1992 pcie_device->starget = NULL;
1993
1994 if (pcie_device) {
1995
1996
1997
1998 sas_target_priv_data->pcie_dev = NULL;
1999 pcie_device_put(pcie_device);
2000 pcie_device_put(pcie_device);
2001 }
2002 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2003 goto out;
2004 }
2005
2006 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2007 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2008 if (sas_device && (sas_device->starget == starget) &&
2009 (sas_device->id == starget->id) &&
2010 (sas_device->channel == starget->channel))
2011 sas_device->starget = NULL;
2012
2013 if (sas_device) {
2014
2015
2016
2017 sas_target_priv_data->sas_dev = NULL;
2018 sas_device_put(sas_device);
2019
2020 sas_device_put(sas_device);
2021 }
2022 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2023
2024 out:
2025 kfree(sas_target_priv_data);
2026 starget->hostdata = NULL;
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036static int
2037scsih_slave_alloc(struct scsi_device *sdev)
2038{
2039 struct Scsi_Host *shost;
2040 struct MPT3SAS_ADAPTER *ioc;
2041 struct MPT3SAS_TARGET *sas_target_priv_data;
2042 struct MPT3SAS_DEVICE *sas_device_priv_data;
2043 struct scsi_target *starget;
2044 struct _raid_device *raid_device;
2045 struct _sas_device *sas_device;
2046 struct _pcie_device *pcie_device;
2047 unsigned long flags;
2048
2049 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2050 GFP_KERNEL);
2051 if (!sas_device_priv_data)
2052 return -ENOMEM;
2053
2054 sas_device_priv_data->lun = sdev->lun;
2055 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2056
2057 starget = scsi_target(sdev);
2058 sas_target_priv_data = starget->hostdata;
2059 sas_target_priv_data->num_luns++;
2060 sas_device_priv_data->sas_target = sas_target_priv_data;
2061 sdev->hostdata = sas_device_priv_data;
2062 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2063 sdev->no_uld_attach = 1;
2064
2065 shost = dev_to_shost(&starget->dev);
2066 ioc = shost_priv(shost);
2067 if (starget->channel == RAID_CHANNEL) {
2068 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2069 raid_device = _scsih_raid_device_find_by_id(ioc,
2070 starget->id, starget->channel);
2071 if (raid_device)
2072 raid_device->sdev = sdev;
2073 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2074 }
2075 if (starget->channel == PCIE_CHANNEL) {
2076 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2077 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2078 sas_target_priv_data->sas_address);
2079 if (pcie_device && (pcie_device->starget == NULL)) {
2080 sdev_printk(KERN_INFO, sdev,
2081 "%s : pcie_device->starget set to starget @ %d\n",
2082 __func__, __LINE__);
2083 pcie_device->starget = starget;
2084 }
2085
2086 if (pcie_device)
2087 pcie_device_put(pcie_device);
2088 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2089
2090 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2091 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2092 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2093 sas_target_priv_data->sas_address,
2094 sas_target_priv_data->port);
2095 if (sas_device && (sas_device->starget == NULL)) {
2096 sdev_printk(KERN_INFO, sdev,
2097 "%s : sas_device->starget set to starget @ %d\n",
2098 __func__, __LINE__);
2099 sas_device->starget = starget;
2100 }
2101
2102 if (sas_device)
2103 sas_device_put(sas_device);
2104
2105 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2106 }
2107
2108 return 0;
2109}
2110
2111
2112
2113
2114
2115static void
2116scsih_slave_destroy(struct scsi_device *sdev)
2117{
2118 struct MPT3SAS_TARGET *sas_target_priv_data;
2119 struct scsi_target *starget;
2120 struct Scsi_Host *shost;
2121 struct MPT3SAS_ADAPTER *ioc;
2122 struct _sas_device *sas_device;
2123 struct _pcie_device *pcie_device;
2124 unsigned long flags;
2125
2126 if (!sdev->hostdata)
2127 return;
2128
2129 starget = scsi_target(sdev);
2130 sas_target_priv_data = starget->hostdata;
2131 sas_target_priv_data->num_luns--;
2132
2133 shost = dev_to_shost(&starget->dev);
2134 ioc = shost_priv(shost);
2135
2136 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2137 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2138 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2139 sas_target_priv_data);
2140 if (pcie_device && !sas_target_priv_data->num_luns)
2141 pcie_device->starget = NULL;
2142
2143 if (pcie_device)
2144 pcie_device_put(pcie_device);
2145
2146 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2147
2148 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2149 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2150 sas_device = __mpt3sas_get_sdev_from_target(ioc,
2151 sas_target_priv_data);
2152 if (sas_device && !sas_target_priv_data->num_luns)
2153 sas_device->starget = NULL;
2154
2155 if (sas_device)
2156 sas_device_put(sas_device);
2157 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2158 }
2159
2160 kfree(sdev->hostdata);
2161 sdev->hostdata = NULL;
2162}
2163
2164
2165
2166
2167
2168
2169
2170static void
2171_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2172 u16 handle, struct scsi_device *sdev)
2173{
2174 Mpi2ConfigReply_t mpi_reply;
2175 Mpi2SasDevicePage0_t sas_device_pg0;
2176 u32 ioc_status;
2177 u16 flags;
2178 u32 device_info;
2179
2180 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2181 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2182 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2183 __FILE__, __LINE__, __func__);
2184 return;
2185 }
2186
2187 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2188 MPI2_IOCSTATUS_MASK;
2189 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2190 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2191 __FILE__, __LINE__, __func__);
2192 return;
2193 }
2194
2195 flags = le16_to_cpu(sas_device_pg0.Flags);
2196 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2197
2198 sdev_printk(KERN_INFO, sdev,
2199 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2200 "sw_preserve(%s)\n",
2201 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2202 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2203 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2204 "n",
2205 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2206 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2207 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2208}
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221static int
2222scsih_is_raid(struct device *dev)
2223{
2224 struct scsi_device *sdev = to_scsi_device(dev);
2225 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2226
2227 if (ioc->is_warpdrive)
2228 return 0;
2229 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2230}
2231
2232static int
2233scsih_is_nvme(struct device *dev)
2234{
2235 struct scsi_device *sdev = to_scsi_device(dev);
2236
2237 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2238}
2239
2240
2241
2242
2243
2244static void
2245scsih_get_resync(struct device *dev)
2246{
2247 struct scsi_device *sdev = to_scsi_device(dev);
2248 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2249 static struct _raid_device *raid_device;
2250 unsigned long flags;
2251 Mpi2RaidVolPage0_t vol_pg0;
2252 Mpi2ConfigReply_t mpi_reply;
2253 u32 volume_status_flags;
2254 u8 percent_complete;
2255 u16 handle;
2256
2257 percent_complete = 0;
2258 handle = 0;
2259 if (ioc->is_warpdrive)
2260 goto out;
2261
2262 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2263 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2264 sdev->channel);
2265 if (raid_device) {
2266 handle = raid_device->handle;
2267 percent_complete = raid_device->percent_complete;
2268 }
2269 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2270
2271 if (!handle)
2272 goto out;
2273
2274 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2275 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2276 sizeof(Mpi2RaidVolPage0_t))) {
2277 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2278 __FILE__, __LINE__, __func__);
2279 percent_complete = 0;
2280 goto out;
2281 }
2282
2283 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2284 if (!(volume_status_flags &
2285 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2286 percent_complete = 0;
2287
2288 out:
2289
2290 switch (ioc->hba_mpi_version_belonged) {
2291 case MPI2_VERSION:
2292 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2293 break;
2294 case MPI25_VERSION:
2295 case MPI26_VERSION:
2296 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2297 break;
2298 }
2299}
2300
2301
2302
2303
2304
2305static void
2306scsih_get_state(struct device *dev)
2307{
2308 struct scsi_device *sdev = to_scsi_device(dev);
2309 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2310 static struct _raid_device *raid_device;
2311 unsigned long flags;
2312 Mpi2RaidVolPage0_t vol_pg0;
2313 Mpi2ConfigReply_t mpi_reply;
2314 u32 volstate;
2315 enum raid_state state = RAID_STATE_UNKNOWN;
2316 u16 handle = 0;
2317
2318 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2319 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2320 sdev->channel);
2321 if (raid_device)
2322 handle = raid_device->handle;
2323 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2324
2325 if (!raid_device)
2326 goto out;
2327
2328 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2329 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2330 sizeof(Mpi2RaidVolPage0_t))) {
2331 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2332 __FILE__, __LINE__, __func__);
2333 goto out;
2334 }
2335
2336 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2337 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2338 state = RAID_STATE_RESYNCING;
2339 goto out;
2340 }
2341
2342 switch (vol_pg0.VolumeState) {
2343 case MPI2_RAID_VOL_STATE_OPTIMAL:
2344 case MPI2_RAID_VOL_STATE_ONLINE:
2345 state = RAID_STATE_ACTIVE;
2346 break;
2347 case MPI2_RAID_VOL_STATE_DEGRADED:
2348 state = RAID_STATE_DEGRADED;
2349 break;
2350 case MPI2_RAID_VOL_STATE_FAILED:
2351 case MPI2_RAID_VOL_STATE_MISSING:
2352 state = RAID_STATE_OFFLINE;
2353 break;
2354 }
2355 out:
2356 switch (ioc->hba_mpi_version_belonged) {
2357 case MPI2_VERSION:
2358 raid_set_state(mpt2sas_raid_template, dev, state);
2359 break;
2360 case MPI25_VERSION:
2361 case MPI26_VERSION:
2362 raid_set_state(mpt3sas_raid_template, dev, state);
2363 break;
2364 }
2365}
2366
2367
2368
2369
2370
2371
2372
2373static void
2374_scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2375 struct scsi_device *sdev, u8 volume_type)
2376{
2377 enum raid_level level = RAID_LEVEL_UNKNOWN;
2378
2379 switch (volume_type) {
2380 case MPI2_RAID_VOL_TYPE_RAID0:
2381 level = RAID_LEVEL_0;
2382 break;
2383 case MPI2_RAID_VOL_TYPE_RAID10:
2384 level = RAID_LEVEL_10;
2385 break;
2386 case MPI2_RAID_VOL_TYPE_RAID1E:
2387 level = RAID_LEVEL_1E;
2388 break;
2389 case MPI2_RAID_VOL_TYPE_RAID1:
2390 level = RAID_LEVEL_1;
2391 break;
2392 }
2393
2394 switch (ioc->hba_mpi_version_belonged) {
2395 case MPI2_VERSION:
2396 raid_set_level(mpt2sas_raid_template,
2397 &sdev->sdev_gendev, level);
2398 break;
2399 case MPI25_VERSION:
2400 case MPI26_VERSION:
2401 raid_set_level(mpt3sas_raid_template,
2402 &sdev->sdev_gendev, level);
2403 break;
2404 }
2405}
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415static int
2416_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2417 struct _raid_device *raid_device)
2418{
2419 Mpi2RaidVolPage0_t *vol_pg0;
2420 Mpi2RaidPhysDiskPage0_t pd_pg0;
2421 Mpi2SasDevicePage0_t sas_device_pg0;
2422 Mpi2ConfigReply_t mpi_reply;
2423 u16 sz;
2424 u8 num_pds;
2425
2426 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2427 &num_pds)) || !num_pds) {
2428 dfailprintk(ioc,
2429 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2430 __FILE__, __LINE__, __func__));
2431 return 1;
2432 }
2433
2434 raid_device->num_pds = num_pds;
2435 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2436 sizeof(Mpi2RaidVol0PhysDisk_t));
2437 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2438 if (!vol_pg0) {
2439 dfailprintk(ioc,
2440 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2441 __FILE__, __LINE__, __func__));
2442 return 1;
2443 }
2444
2445 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2446 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2447 dfailprintk(ioc,
2448 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2449 __FILE__, __LINE__, __func__));
2450 kfree(vol_pg0);
2451 return 1;
2452 }
2453
2454 raid_device->volume_type = vol_pg0->VolumeType;
2455
2456
2457
2458
2459 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2460 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2461 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2462 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2463 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2464 le16_to_cpu(pd_pg0.DevHandle)))) {
2465 raid_device->device_info =
2466 le32_to_cpu(sas_device_pg0.DeviceInfo);
2467 }
2468 }
2469
2470 kfree(vol_pg0);
2471 return 0;
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483static void
2484_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2485{
2486
2487
2488 if (sdev->type != TYPE_TAPE)
2489 return;
2490
2491 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2492 return;
2493
2494 sas_enable_tlr(sdev);
2495 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2496 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2497 return;
2498
2499}
2500
2501
2502
2503
2504
2505
2506
2507
2508static int
2509scsih_slave_configure(struct scsi_device *sdev)
2510{
2511 struct Scsi_Host *shost = sdev->host;
2512 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2513 struct MPT3SAS_DEVICE *sas_device_priv_data;
2514 struct MPT3SAS_TARGET *sas_target_priv_data;
2515 struct _sas_device *sas_device;
2516 struct _pcie_device *pcie_device;
2517 struct _raid_device *raid_device;
2518 unsigned long flags;
2519 int qdepth;
2520 u8 ssp_target = 0;
2521 char *ds = "";
2522 char *r_level = "";
2523 u16 handle, volume_handle = 0;
2524 u64 volume_wwid = 0;
2525
2526 qdepth = 1;
2527 sas_device_priv_data = sdev->hostdata;
2528 sas_device_priv_data->configured_lun = 1;
2529 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2530 sas_target_priv_data = sas_device_priv_data->sas_target;
2531 handle = sas_target_priv_data->handle;
2532
2533
2534 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2535
2536 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2537 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2538 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2539 if (!raid_device) {
2540 dfailprintk(ioc,
2541 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2542 __FILE__, __LINE__, __func__));
2543 return 1;
2544 }
2545
2546 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2547 dfailprintk(ioc,
2548 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2549 __FILE__, __LINE__, __func__));
2550 return 1;
2551 }
2552
2553
2554
2555
2556 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2557
2558
2559
2560
2561
2562
2563 if (raid_device->device_info &
2564 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2565 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2566 ds = "SSP";
2567 } else {
2568 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2569 if (raid_device->device_info &
2570 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2571 ds = "SATA";
2572 else
2573 ds = "STP";
2574 }
2575
2576 switch (raid_device->volume_type) {
2577 case MPI2_RAID_VOL_TYPE_RAID0:
2578 r_level = "RAID0";
2579 break;
2580 case MPI2_RAID_VOL_TYPE_RAID1E:
2581 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2582 if (ioc->manu_pg10.OEMIdentifier &&
2583 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2584 MFG10_GF0_R10_DISPLAY) &&
2585 !(raid_device->num_pds % 2))
2586 r_level = "RAID10";
2587 else
2588 r_level = "RAID1E";
2589 break;
2590 case MPI2_RAID_VOL_TYPE_RAID1:
2591 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2592 r_level = "RAID1";
2593 break;
2594 case MPI2_RAID_VOL_TYPE_RAID10:
2595 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2596 r_level = "RAID10";
2597 break;
2598 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2599 default:
2600 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2601 r_level = "RAIDX";
2602 break;
2603 }
2604
2605 if (!ioc->hide_ir_msg)
2606 sdev_printk(KERN_INFO, sdev,
2607 "%s: handle(0x%04x), wwid(0x%016llx),"
2608 " pd_count(%d), type(%s)\n",
2609 r_level, raid_device->handle,
2610 (unsigned long long)raid_device->wwid,
2611 raid_device->num_pds, ds);
2612
2613 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2614 blk_queue_max_hw_sectors(sdev->request_queue,
2615 MPT3SAS_RAID_MAX_SECTORS);
2616 sdev_printk(KERN_INFO, sdev,
2617 "Set queue's max_sector to: %u\n",
2618 MPT3SAS_RAID_MAX_SECTORS);
2619 }
2620
2621 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2622
2623
2624 if (!ioc->is_warpdrive)
2625 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2626 return 0;
2627 }
2628
2629
2630 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2631 if (mpt3sas_config_get_volume_handle(ioc, handle,
2632 &volume_handle)) {
2633 dfailprintk(ioc,
2634 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2635 __FILE__, __LINE__, __func__));
2636 return 1;
2637 }
2638 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2639 volume_handle, &volume_wwid)) {
2640 dfailprintk(ioc,
2641 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2642 __FILE__, __LINE__, __func__));
2643 return 1;
2644 }
2645 }
2646
2647
2648 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2649 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2650 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2651 sas_device_priv_data->sas_target->sas_address);
2652 if (!pcie_device) {
2653 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2654 dfailprintk(ioc,
2655 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2656 __FILE__, __LINE__, __func__));
2657 return 1;
2658 }
2659
2660 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2661 ds = "NVMe";
2662 sdev_printk(KERN_INFO, sdev,
2663 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2664 ds, handle, (unsigned long long)pcie_device->wwid,
2665 pcie_device->port_num);
2666 if (pcie_device->enclosure_handle != 0)
2667 sdev_printk(KERN_INFO, sdev,
2668 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2669 ds,
2670 (unsigned long long)pcie_device->enclosure_logical_id,
2671 pcie_device->slot);
2672 if (pcie_device->connector_name[0] != '\0')
2673 sdev_printk(KERN_INFO, sdev,
2674 "%s: enclosure level(0x%04x),"
2675 "connector name( %s)\n", ds,
2676 pcie_device->enclosure_level,
2677 pcie_device->connector_name);
2678
2679 if (pcie_device->nvme_mdts)
2680 blk_queue_max_hw_sectors(sdev->request_queue,
2681 pcie_device->nvme_mdts/512);
2682
2683 pcie_device_put(pcie_device);
2684 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2685 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2686
2687
2688
2689
2690 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2691 sdev->request_queue);
2692 blk_queue_virt_boundary(sdev->request_queue,
2693 ioc->page_size - 1);
2694 return 0;
2695 }
2696
2697 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2698 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2699 sas_device_priv_data->sas_target->sas_address,
2700 sas_device_priv_data->sas_target->port);
2701 if (!sas_device) {
2702 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2703 dfailprintk(ioc,
2704 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2705 __FILE__, __LINE__, __func__));
2706 return 1;
2707 }
2708
2709 sas_device->volume_handle = volume_handle;
2710 sas_device->volume_wwid = volume_wwid;
2711 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2712 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2713 ssp_target = 1;
2714 if (sas_device->device_info &
2715 MPI2_SAS_DEVICE_INFO_SEP) {
2716 sdev_printk(KERN_WARNING, sdev,
2717 "set ignore_delay_remove for handle(0x%04x)\n",
2718 sas_device_priv_data->sas_target->handle);
2719 sas_device_priv_data->ignore_delay_remove = 1;
2720 ds = "SES";
2721 } else
2722 ds = "SSP";
2723 } else {
2724 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2725 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2726 ds = "STP";
2727 else if (sas_device->device_info &
2728 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2729 ds = "SATA";
2730 }
2731
2732 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2733 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2734 ds, handle, (unsigned long long)sas_device->sas_address,
2735 sas_device->phy, (unsigned long long)sas_device->device_name);
2736
2737 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2738
2739 sas_device_put(sas_device);
2740 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2741
2742 if (!ssp_target)
2743 _scsih_display_sata_capabilities(ioc, handle, sdev);
2744
2745
2746 mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2747
2748 if (ssp_target) {
2749 sas_read_port_mode_page(sdev);
2750 _scsih_enable_tlr(ioc, sdev);
2751 }
2752
2753 return 0;
2754}
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766static int
2767scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2768 sector_t capacity, int params[])
2769{
2770 int heads;
2771 int sectors;
2772 sector_t cylinders;
2773 ulong dummy;
2774
2775 heads = 64;
2776 sectors = 32;
2777
2778 dummy = heads * sectors;
2779 cylinders = capacity;
2780 sector_div(cylinders, dummy);
2781
2782
2783
2784
2785
2786 if ((ulong)capacity >= 0x200000) {
2787 heads = 255;
2788 sectors = 63;
2789 dummy = heads * sectors;
2790 cylinders = capacity;
2791 sector_div(cylinders, dummy);
2792 }
2793
2794
2795 params[0] = heads;
2796 params[1] = sectors;
2797 params[2] = cylinders;
2798
2799 return 0;
2800}
2801
2802
2803
2804
2805
2806
2807static void
2808_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2809{
2810 char *desc;
2811
2812 switch (response_code) {
2813 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2814 desc = "task management request completed";
2815 break;
2816 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2817 desc = "invalid frame";
2818 break;
2819 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2820 desc = "task management request not supported";
2821 break;
2822 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2823 desc = "task management request failed";
2824 break;
2825 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2826 desc = "task management request succeeded";
2827 break;
2828 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2829 desc = "invalid lun";
2830 break;
2831 case 0xA:
2832 desc = "overlapped tag attempted";
2833 break;
2834 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2835 desc = "task queued, however not sent to target";
2836 break;
2837 default:
2838 desc = "unknown";
2839 break;
2840 }
2841 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2842}
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857static u8
2858_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2859{
2860 MPI2DefaultReply_t *mpi_reply;
2861
2862 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2863 return 1;
2864 if (ioc->tm_cmds.smid != smid)
2865 return 1;
2866 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2867 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2868 if (mpi_reply) {
2869 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2870 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2871 }
2872 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2873 complete(&ioc->tm_cmds.done);
2874 return 1;
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884void
2885mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2886{
2887 struct MPT3SAS_DEVICE *sas_device_priv_data;
2888 struct scsi_device *sdev;
2889 u8 skip = 0;
2890
2891 shost_for_each_device(sdev, ioc->shost) {
2892 if (skip)
2893 continue;
2894 sas_device_priv_data = sdev->hostdata;
2895 if (!sas_device_priv_data)
2896 continue;
2897 if (sas_device_priv_data->sas_target->handle == handle) {
2898 sas_device_priv_data->sas_target->tm_busy = 1;
2899 skip = 1;
2900 ioc->ignore_loginfos = 1;
2901 }
2902 }
2903}
2904
2905
2906
2907
2908
2909
2910
2911
2912void
2913mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2914{
2915 struct MPT3SAS_DEVICE *sas_device_priv_data;
2916 struct scsi_device *sdev;
2917 u8 skip = 0;
2918
2919 shost_for_each_device(sdev, ioc->shost) {
2920 if (skip)
2921 continue;
2922 sas_device_priv_data = sdev->hostdata;
2923 if (!sas_device_priv_data)
2924 continue;
2925 if (sas_device_priv_data->sas_target->handle == handle) {
2926 sas_device_priv_data->sas_target->tm_busy = 0;
2927 skip = 1;
2928 ioc->ignore_loginfos = 0;
2929 }
2930 }
2931}
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945static int
2946scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2947 uint id, uint lun, u8 type, u16 smid_task)
2948{
2949
2950 if (smid_task <= ioc->shost->can_queue) {
2951 switch (type) {
2952 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2953 if (!(_scsih_scsi_lookup_find_by_target(ioc,
2954 id, channel)))
2955 return SUCCESS;
2956 break;
2957 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2958 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2959 if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2960 lun, channel)))
2961 return SUCCESS;
2962 break;
2963 default:
2964 return SUCCESS;
2965 }
2966 } else if (smid_task == ioc->scsih_cmds.smid) {
2967 if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2968 (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2969 return SUCCESS;
2970 } else if (smid_task == ioc->ctl_cmds.smid) {
2971 if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2972 (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2973 return SUCCESS;
2974 }
2975
2976 return FAILED;
2977}
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996static int
2997scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2998 uint channel, uint id, uint lun, u8 type, u16 smid_task)
2999{
3000 int rc;
3001
3002 rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3003 if (rc == SUCCESS)
3004 return rc;
3005
3006 ioc_info(ioc,
3007 "Poll ReplyDescriptor queues for completion of"
3008 " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3009 smid_task, type, handle);
3010
3011
3012
3013
3014
3015
3016 mpt3sas_base_mask_interrupts(ioc);
3017 mpt3sas_base_sync_reply_irqs(ioc, 1);
3018 mpt3sas_base_unmask_interrupts(ioc);
3019
3020 return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3021}
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044int
3045mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3046 uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3047 u8 timeout, u8 tr_method)
3048{
3049 Mpi2SCSITaskManagementRequest_t *mpi_request;
3050 Mpi2SCSITaskManagementReply_t *mpi_reply;
3051 Mpi25SCSIIORequest_t *request;
3052 u16 smid = 0;
3053 u32 ioc_state;
3054 int rc;
3055 u8 issue_reset = 0;
3056
3057 lockdep_assert_held(&ioc->tm_cmds.mutex);
3058
3059 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3060 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3061 return FAILED;
3062 }
3063
3064 if (ioc->shost_recovery || ioc->remove_host ||
3065 ioc->pci_error_recovery) {
3066 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3067 return FAILED;
3068 }
3069
3070 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3071 if (ioc_state & MPI2_DOORBELL_USED) {
3072 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3073 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3074 return (!rc) ? SUCCESS : FAILED;
3075 }
3076
3077 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3078 mpt3sas_print_fault_code(ioc, ioc_state &
3079 MPI2_DOORBELL_DATA_MASK);
3080 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3081 return (!rc) ? SUCCESS : FAILED;
3082 } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3083 MPI2_IOC_STATE_COREDUMP) {
3084 mpt3sas_print_coredump_info(ioc, ioc_state &
3085 MPI2_DOORBELL_DATA_MASK);
3086 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3087 return (!rc) ? SUCCESS : FAILED;
3088 }
3089
3090 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3091 if (!smid) {
3092 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3093 return FAILED;
3094 }
3095
3096 dtmprintk(ioc,
3097 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3098 handle, type, smid_task, timeout, tr_method));
3099 ioc->tm_cmds.status = MPT3_CMD_PENDING;
3100 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3101 ioc->tm_cmds.smid = smid;
3102 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3103 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3104 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3105 mpi_request->DevHandle = cpu_to_le16(handle);
3106 mpi_request->TaskType = type;
3107 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3108 type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3109 mpi_request->MsgFlags = tr_method;
3110 mpi_request->TaskMID = cpu_to_le16(smid_task);
3111 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3112 mpt3sas_scsih_set_tm_flag(ioc, handle);
3113 init_completion(&ioc->tm_cmds.done);
3114 ioc->put_smid_hi_priority(ioc, smid, msix_task);
3115 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3116 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3117 mpt3sas_check_cmd_timeout(ioc,
3118 ioc->tm_cmds.status, mpi_request,
3119 sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3120 if (issue_reset) {
3121 rc = mpt3sas_base_hard_reset_handler(ioc,
3122 FORCE_BIG_HAMMER);
3123 rc = (!rc) ? SUCCESS : FAILED;
3124 goto out;
3125 }
3126 }
3127
3128
3129 mpt3sas_base_sync_reply_irqs(ioc, 0);
3130
3131 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3132 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3133 mpi_reply = ioc->tm_cmds.reply;
3134 dtmprintk(ioc,
3135 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3136 le16_to_cpu(mpi_reply->IOCStatus),
3137 le32_to_cpu(mpi_reply->IOCLogInfo),
3138 le32_to_cpu(mpi_reply->TerminationCount)));
3139 if (ioc->logging_level & MPT_DEBUG_TM) {
3140 _scsih_response_code(ioc, mpi_reply->ResponseCode);
3141 if (mpi_reply->IOCStatus)
3142 _debug_dump_mf(mpi_request,
3143 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3144 }
3145 }
3146
3147 switch (type) {
3148 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3149 rc = SUCCESS;
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161 request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3162 if (le16_to_cpu(request->DevHandle) != handle)
3163 break;
3164
3165 ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3166 "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3167 handle, timeout, tr_method, smid_task, msix_task);
3168 rc = FAILED;
3169 break;
3170
3171 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3172 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3173 case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3174 rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3175 type, smid_task);
3176 break;
3177 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3178 rc = SUCCESS;
3179 break;
3180 default:
3181 rc = FAILED;
3182 break;
3183 }
3184
3185out:
3186 mpt3sas_scsih_clear_tm_flag(ioc, handle);
3187 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3188 return rc;
3189}
3190
3191int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3192 uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3193 u16 msix_task, u8 timeout, u8 tr_method)
3194{
3195 int ret;
3196
3197 mutex_lock(&ioc->tm_cmds.mutex);
3198 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3199 smid_task, msix_task, timeout, tr_method);
3200 mutex_unlock(&ioc->tm_cmds.mutex);
3201
3202 return ret;
3203}
3204
3205
3206
3207
3208
3209
3210
3211
3212static void
3213_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3214{
3215 struct scsi_target *starget = scmd->device->sdev_target;
3216 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3217 struct _sas_device *sas_device = NULL;
3218 struct _pcie_device *pcie_device = NULL;
3219 unsigned long flags;
3220 char *device_str = NULL;
3221
3222 if (!priv_target)
3223 return;
3224 if (ioc->hide_ir_msg)
3225 device_str = "WarpDrive";
3226 else
3227 device_str = "volume";
3228
3229 scsi_print_command(scmd);
3230 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3231 starget_printk(KERN_INFO, starget,
3232 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
3233 device_str, priv_target->handle,
3234 device_str, (unsigned long long)priv_target->sas_address);
3235
3236 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3237 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3238 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3239 if (pcie_device) {
3240 starget_printk(KERN_INFO, starget,
3241 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3242 pcie_device->handle,
3243 (unsigned long long)pcie_device->wwid,
3244 pcie_device->port_num);
3245 if (pcie_device->enclosure_handle != 0)
3246 starget_printk(KERN_INFO, starget,
3247 "enclosure logical id(0x%016llx), slot(%d)\n",
3248 (unsigned long long)
3249 pcie_device->enclosure_logical_id,
3250 pcie_device->slot);
3251 if (pcie_device->connector_name[0] != '\0')
3252 starget_printk(KERN_INFO, starget,
3253 "enclosure level(0x%04x), connector name( %s)\n",
3254 pcie_device->enclosure_level,
3255 pcie_device->connector_name);
3256 pcie_device_put(pcie_device);
3257 }
3258 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3259
3260 } else {
3261 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3262 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3263 if (sas_device) {
3264 if (priv_target->flags &
3265 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3266 starget_printk(KERN_INFO, starget,
3267 "volume handle(0x%04x), "
3268 "volume wwid(0x%016llx)\n",
3269 sas_device->volume_handle,
3270 (unsigned long long)sas_device->volume_wwid);
3271 }
3272 starget_printk(KERN_INFO, starget,
3273 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3274 sas_device->handle,
3275 (unsigned long long)sas_device->sas_address,
3276 sas_device->phy);
3277
3278 _scsih_display_enclosure_chassis_info(NULL, sas_device,
3279 NULL, starget);
3280
3281 sas_device_put(sas_device);
3282 }
3283 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3284 }
3285}
3286
3287
3288
3289
3290
3291
3292
3293static int
3294scsih_abort(struct scsi_cmnd *scmd)
3295{
3296 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3297 struct MPT3SAS_DEVICE *sas_device_priv_data;
3298 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3299 u16 handle;
3300 int r;
3301
3302 u8 timeout = 30;
3303 struct _pcie_device *pcie_device = NULL;
3304 sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3305 "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3306 scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3307 (scmd->request->timeout / HZ) * 1000);
3308 _scsih_tm_display_info(ioc, scmd);
3309
3310 sas_device_priv_data = scmd->device->hostdata;
3311 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3312 ioc->remove_host) {
3313 sdev_printk(KERN_INFO, scmd->device,
3314 "device been deleted! scmd(0x%p)\n", scmd);
3315 scmd->result = DID_NO_CONNECT << 16;
3316 scmd->scsi_done(scmd);
3317 r = SUCCESS;
3318 goto out;
3319 }
3320
3321
3322 if (st == NULL || st->cb_idx == 0xFF) {
3323 sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3324 "driver, assuming scmd(0x%p) might have completed\n", scmd);
3325 scmd->result = DID_RESET << 16;
3326 r = SUCCESS;
3327 goto out;
3328 }
3329
3330
3331 if (sas_device_priv_data->sas_target->flags &
3332 MPT_TARGET_FLAGS_RAID_COMPONENT ||
3333 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3334 scmd->result = DID_RESET << 16;
3335 r = FAILED;
3336 goto out;
3337 }
3338
3339 mpt3sas_halt_firmware(ioc);
3340
3341 handle = sas_device_priv_data->sas_target->handle;
3342 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3343 if (pcie_device && (!ioc->tm_custom_handling) &&
3344 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3345 timeout = ioc->nvme_abort_timeout;
3346 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3347 scmd->device->id, scmd->device->lun,
3348 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3349 st->smid, st->msix_io, timeout, 0);
3350
3351 if (r == SUCCESS && st->cb_idx != 0xFF)
3352 r = FAILED;
3353 out:
3354 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3355 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3356 if (pcie_device)
3357 pcie_device_put(pcie_device);
3358 return r;
3359}
3360
3361
3362
3363
3364
3365
3366
3367static int
3368scsih_dev_reset(struct scsi_cmnd *scmd)
3369{
3370 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3371 struct MPT3SAS_DEVICE *sas_device_priv_data;
3372 struct _sas_device *sas_device = NULL;
3373 struct _pcie_device *pcie_device = NULL;
3374 u16 handle;
3375 u8 tr_method = 0;
3376 u8 tr_timeout = 30;
3377 int r;
3378
3379 struct scsi_target *starget = scmd->device->sdev_target;
3380 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3381
3382 sdev_printk(KERN_INFO, scmd->device,
3383 "attempting device reset! scmd(0x%p)\n", scmd);
3384 _scsih_tm_display_info(ioc, scmd);
3385
3386 sas_device_priv_data = scmd->device->hostdata;
3387 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3388 ioc->remove_host) {
3389 sdev_printk(KERN_INFO, scmd->device,
3390 "device been deleted! scmd(0x%p)\n", scmd);
3391 scmd->result = DID_NO_CONNECT << 16;
3392 scmd->scsi_done(scmd);
3393 r = SUCCESS;
3394 goto out;
3395 }
3396
3397
3398 handle = 0;
3399 if (sas_device_priv_data->sas_target->flags &
3400 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3401 sas_device = mpt3sas_get_sdev_from_target(ioc,
3402 target_priv_data);
3403 if (sas_device)
3404 handle = sas_device->volume_handle;
3405 } else
3406 handle = sas_device_priv_data->sas_target->handle;
3407
3408 if (!handle) {
3409 scmd->result = DID_RESET << 16;
3410 r = FAILED;
3411 goto out;
3412 }
3413
3414 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3415
3416 if (pcie_device && (!ioc->tm_custom_handling) &&
3417 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3418 tr_timeout = pcie_device->reset_timeout;
3419 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3420 } else
3421 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3422
3423 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3424 scmd->device->id, scmd->device->lun,
3425 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3426 tr_timeout, tr_method);
3427
3428 if (r == SUCCESS && scsi_device_busy(scmd->device))
3429 r = FAILED;
3430 out:
3431 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3432 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3433
3434 if (sas_device)
3435 sas_device_put(sas_device);
3436 if (pcie_device)
3437 pcie_device_put(pcie_device);
3438
3439 return r;
3440}
3441
3442
3443
3444
3445
3446
3447
3448static int
3449scsih_target_reset(struct scsi_cmnd *scmd)
3450{
3451 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3452 struct MPT3SAS_DEVICE *sas_device_priv_data;
3453 struct _sas_device *sas_device = NULL;
3454 struct _pcie_device *pcie_device = NULL;
3455 u16 handle;
3456 u8 tr_method = 0;
3457 u8 tr_timeout = 30;
3458 int r;
3459 struct scsi_target *starget = scmd->device->sdev_target;
3460 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3461
3462 starget_printk(KERN_INFO, starget,
3463 "attempting target reset! scmd(0x%p)\n", scmd);
3464 _scsih_tm_display_info(ioc, scmd);
3465
3466 sas_device_priv_data = scmd->device->hostdata;
3467 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3468 ioc->remove_host) {
3469 starget_printk(KERN_INFO, starget,
3470 "target been deleted! scmd(0x%p)\n", scmd);
3471 scmd->result = DID_NO_CONNECT << 16;
3472 scmd->scsi_done(scmd);
3473 r = SUCCESS;
3474 goto out;
3475 }
3476
3477
3478 handle = 0;
3479 if (sas_device_priv_data->sas_target->flags &
3480 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3481 sas_device = mpt3sas_get_sdev_from_target(ioc,
3482 target_priv_data);
3483 if (sas_device)
3484 handle = sas_device->volume_handle;
3485 } else
3486 handle = sas_device_priv_data->sas_target->handle;
3487
3488 if (!handle) {
3489 scmd->result = DID_RESET << 16;
3490 r = FAILED;
3491 goto out;
3492 }
3493
3494 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3495
3496 if (pcie_device && (!ioc->tm_custom_handling) &&
3497 (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3498 tr_timeout = pcie_device->reset_timeout;
3499 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3500 } else
3501 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3502 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3503 scmd->device->id, 0,
3504 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3505 tr_timeout, tr_method);
3506
3507 if (r == SUCCESS && atomic_read(&starget->target_busy))
3508 r = FAILED;
3509 out:
3510 starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3511 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3512
3513 if (sas_device)
3514 sas_device_put(sas_device);
3515 if (pcie_device)
3516 pcie_device_put(pcie_device);
3517 return r;
3518}
3519
3520
3521
3522
3523
3524
3525
3526
3527static int
3528scsih_host_reset(struct scsi_cmnd *scmd)
3529{
3530 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3531 int r, retval;
3532
3533 ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3534 scsi_print_command(scmd);
3535
3536 if (ioc->is_driver_loading || ioc->remove_host) {
3537 ioc_info(ioc, "Blocking the host reset\n");
3538 r = FAILED;
3539 goto out;
3540 }
3541
3542 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3543 r = (retval < 0) ? FAILED : SUCCESS;
3544out:
3545 ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3546 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3547
3548 return r;
3549}
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560static void
3561_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3562{
3563 unsigned long flags;
3564
3565 if (ioc->firmware_event_thread == NULL)
3566 return;
3567
3568 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3569 fw_event_work_get(fw_event);
3570 INIT_LIST_HEAD(&fw_event->list);
3571 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3572 INIT_WORK(&fw_event->work, _firmware_event_work);
3573 fw_event_work_get(fw_event);
3574 queue_work(ioc->firmware_event_thread, &fw_event->work);
3575 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3576}
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586static void
3587_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3588 *fw_event)
3589{
3590 unsigned long flags;
3591
3592 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3593 if (!list_empty(&fw_event->list)) {
3594 list_del_init(&fw_event->list);
3595 fw_event_work_put(fw_event);
3596 }
3597 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3598}
3599
3600
3601
3602
3603
3604
3605
3606void
3607mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3608 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3609{
3610 struct fw_event_work *fw_event;
3611 u16 sz;
3612
3613 if (ioc->is_driver_loading)
3614 return;
3615 sz = sizeof(*event_data);
3616 fw_event = alloc_fw_event_work(sz);
3617 if (!fw_event)
3618 return;
3619 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3620 fw_event->ioc = ioc;
3621 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3622 _scsih_fw_event_add(ioc, fw_event);
3623 fw_event_work_put(fw_event);
3624}
3625
3626
3627
3628
3629
3630static void
3631_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3632{
3633 struct fw_event_work *fw_event;
3634
3635 fw_event = alloc_fw_event_work(0);
3636 if (!fw_event)
3637 return;
3638 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3639 fw_event->ioc = ioc;
3640 _scsih_fw_event_add(ioc, fw_event);
3641 fw_event_work_put(fw_event);
3642}
3643
3644
3645
3646
3647
3648void
3649mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3650{
3651 struct fw_event_work *fw_event;
3652
3653 fw_event = alloc_fw_event_work(0);
3654 if (!fw_event)
3655 return;
3656 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3657 fw_event->ioc = ioc;
3658 _scsih_fw_event_add(ioc, fw_event);
3659 fw_event_work_put(fw_event);
3660}
3661
3662static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3663{
3664 unsigned long flags;
3665 struct fw_event_work *fw_event = NULL;
3666
3667 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3668 if (!list_empty(&ioc->fw_event_list)) {
3669 fw_event = list_first_entry(&ioc->fw_event_list,
3670 struct fw_event_work, list);
3671 list_del_init(&fw_event->list);
3672 }
3673 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3674
3675 return fw_event;
3676}
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687static void
3688_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3689{
3690 struct fw_event_work *fw_event;
3691
3692 if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3693 !ioc->firmware_event_thread)
3694 return;
3695
3696
3697
3698
3699
3700
3701 if (ioc->shost_recovery && ioc->current_event)
3702 ioc->current_event->ignore = 1;
3703
3704 ioc->fw_events_cleanup = 1;
3705 while ((fw_event = dequeue_next_fw_event(ioc)) ||
3706 (fw_event = ioc->current_event)) {
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722 if (fw_event == ioc->current_event &&
3723 ioc->current_event->event !=
3724 MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3725 ioc->current_event = NULL;
3726 continue;
3727 }
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737 if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3738 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3739 ioc->start_scan = 0;
3740 }
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750 if (cancel_work_sync(&fw_event->work))
3751 fw_event_work_put(fw_event);
3752
3753 fw_event_work_put(fw_event);
3754 }
3755 ioc->fw_events_cleanup = 0;
3756}
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766static void
3767_scsih_internal_device_block(struct scsi_device *sdev,
3768 struct MPT3SAS_DEVICE *sas_device_priv_data)
3769{
3770 int r = 0;
3771
3772 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3773 sas_device_priv_data->sas_target->handle);
3774 sas_device_priv_data->block = 1;
3775
3776 r = scsi_internal_device_block_nowait(sdev);
3777 if (r == -EINVAL)
3778 sdev_printk(KERN_WARNING, sdev,
3779 "device_block failed with return(%d) for handle(0x%04x)\n",
3780 r, sas_device_priv_data->sas_target->handle);
3781}
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791static void
3792_scsih_internal_device_unblock(struct scsi_device *sdev,
3793 struct MPT3SAS_DEVICE *sas_device_priv_data)
3794{
3795 int r = 0;
3796
3797 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3798 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3799 sas_device_priv_data->block = 0;
3800 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3801 if (r == -EINVAL) {
3802
3803
3804
3805
3806
3807 sdev_printk(KERN_WARNING, sdev,
3808 "device_unblock failed with return(%d) for handle(0x%04x) "
3809 "performing a block followed by an unblock\n",
3810 r, sas_device_priv_data->sas_target->handle);
3811 sas_device_priv_data->block = 1;
3812 r = scsi_internal_device_block_nowait(sdev);
3813 if (r)
3814 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3815 "failed with return(%d) for handle(0x%04x)\n",
3816 r, sas_device_priv_data->sas_target->handle);
3817
3818 sas_device_priv_data->block = 0;
3819 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3820 if (r)
3821 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3822 " failed with return(%d) for handle(0x%04x)\n",
3823 r, sas_device_priv_data->sas_target->handle);
3824 }
3825}
3826
3827
3828
3829
3830
3831
3832
3833static void
3834_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3835{
3836 struct MPT3SAS_DEVICE *sas_device_priv_data;
3837 struct scsi_device *sdev;
3838
3839 shost_for_each_device(sdev, ioc->shost) {
3840 sas_device_priv_data = sdev->hostdata;
3841 if (!sas_device_priv_data)
3842 continue;
3843 if (!sas_device_priv_data->block)
3844 continue;
3845
3846 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3847 "device_running, handle(0x%04x)\n",
3848 sas_device_priv_data->sas_target->handle));
3849 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3850 }
3851}
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862static void
3863_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3864 u64 sas_address, struct hba_port *port)
3865{
3866 struct MPT3SAS_DEVICE *sas_device_priv_data;
3867 struct scsi_device *sdev;
3868
3869 shost_for_each_device(sdev, ioc->shost) {
3870 sas_device_priv_data = sdev->hostdata;
3871 if (!sas_device_priv_data)
3872 continue;
3873 if (sas_device_priv_data->sas_target->sas_address
3874 != sas_address)
3875 continue;
3876 if (sas_device_priv_data->sas_target->port != port)
3877 continue;
3878 if (sas_device_priv_data->block)
3879 _scsih_internal_device_unblock(sdev,
3880 sas_device_priv_data);
3881 }
3882}
3883
3884
3885
3886
3887
3888
3889
3890static void
3891_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3892{
3893 struct MPT3SAS_DEVICE *sas_device_priv_data;
3894 struct scsi_device *sdev;
3895
3896 shost_for_each_device(sdev, ioc->shost) {
3897 sas_device_priv_data = sdev->hostdata;
3898 if (!sas_device_priv_data)
3899 continue;
3900 if (sas_device_priv_data->block)
3901 continue;
3902 if (sas_device_priv_data->ignore_delay_remove) {
3903 sdev_printk(KERN_INFO, sdev,
3904 "%s skip device_block for SES handle(0x%04x)\n",
3905 __func__, sas_device_priv_data->sas_target->handle);
3906 continue;
3907 }
3908 _scsih_internal_device_block(sdev, sas_device_priv_data);
3909 }
3910}
3911
3912
3913
3914
3915
3916
3917
3918
3919static void
3920_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3921{
3922 struct MPT3SAS_DEVICE *sas_device_priv_data;
3923 struct scsi_device *sdev;
3924 struct _sas_device *sas_device;
3925
3926 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3927
3928 shost_for_each_device(sdev, ioc->shost) {
3929 sas_device_priv_data = sdev->hostdata;
3930 if (!sas_device_priv_data)
3931 continue;
3932 if (sas_device_priv_data->sas_target->handle != handle)
3933 continue;
3934 if (sas_device_priv_data->block)
3935 continue;
3936 if (sas_device && sas_device->pend_sas_rphy_add)
3937 continue;
3938 if (sas_device_priv_data->ignore_delay_remove) {
3939 sdev_printk(KERN_INFO, sdev,
3940 "%s skip device_block for SES handle(0x%04x)\n",
3941 __func__, sas_device_priv_data->sas_target->handle);
3942 continue;
3943 }
3944 _scsih_internal_device_block(sdev, sas_device_priv_data);
3945 }
3946
3947 if (sas_device)
3948 sas_device_put(sas_device);
3949}
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960static void
3961_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3962 struct _sas_node *sas_expander)
3963{
3964 struct _sas_port *mpt3sas_port;
3965 struct _sas_device *sas_device;
3966 struct _sas_node *expander_sibling;
3967 unsigned long flags;
3968
3969 if (!sas_expander)
3970 return;
3971
3972 list_for_each_entry(mpt3sas_port,
3973 &sas_expander->sas_port_list, port_list) {
3974 if (mpt3sas_port->remote_identify.device_type ==
3975 SAS_END_DEVICE) {
3976 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3977 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3978 mpt3sas_port->remote_identify.sas_address,
3979 mpt3sas_port->hba_port);
3980 if (sas_device) {
3981 set_bit(sas_device->handle,
3982 ioc->blocking_handles);
3983 sas_device_put(sas_device);
3984 }
3985 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3986 }
3987 }
3988
3989 list_for_each_entry(mpt3sas_port,
3990 &sas_expander->sas_port_list, port_list) {
3991
3992 if (mpt3sas_port->remote_identify.device_type ==
3993 SAS_EDGE_EXPANDER_DEVICE ||
3994 mpt3sas_port->remote_identify.device_type ==
3995 SAS_FANOUT_EXPANDER_DEVICE) {
3996 expander_sibling =
3997 mpt3sas_scsih_expander_find_by_sas_address(
3998 ioc, mpt3sas_port->remote_identify.sas_address,
3999 mpt3sas_port->hba_port);
4000 _scsih_block_io_to_children_attached_to_ex(ioc,
4001 expander_sibling);
4002 }
4003 }
4004}
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014static void
4015_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4016 Mpi2EventDataSasTopologyChangeList_t *event_data)
4017{
4018 int i;
4019 u16 handle;
4020 u16 reason_code;
4021
4022 for (i = 0; i < event_data->NumEntries; i++) {
4023 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4024 if (!handle)
4025 continue;
4026 reason_code = event_data->PHY[i].PhyStatus &
4027 MPI2_EVENT_SAS_TOPO_RC_MASK;
4028 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4029 _scsih_block_io_device(ioc, handle);
4030 }
4031}
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041static void
4042_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4043 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4044{
4045 int i;
4046 u16 handle;
4047 u16 reason_code;
4048
4049 for (i = 0; i < event_data->NumEntries; i++) {
4050 handle =
4051 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4052 if (!handle)
4053 continue;
4054 reason_code = event_data->PortEntry[i].PortStatus;
4055 if (reason_code ==
4056 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4057 _scsih_block_io_device(ioc, handle);
4058 }
4059}
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075static void
4076_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4077{
4078 Mpi2SCSITaskManagementRequest_t *mpi_request;
4079 u16 smid;
4080 struct _sas_device *sas_device = NULL;
4081 struct _pcie_device *pcie_device = NULL;
4082 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4083 u64 sas_address = 0;
4084 unsigned long flags;
4085 struct _tr_list *delayed_tr;
4086 u32 ioc_state;
4087 u8 tr_method = 0;
4088 struct hba_port *port = NULL;
4089
4090 if (ioc->pci_error_recovery) {
4091 dewtprintk(ioc,
4092 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4093 __func__, handle));
4094 return;
4095 }
4096 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4097 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4098 dewtprintk(ioc,
4099 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4100 __func__, handle));
4101 return;
4102 }
4103
4104
4105 if (test_bit(handle, ioc->pd_handles))
4106 return;
4107
4108 clear_bit(handle, ioc->pend_os_device_add);
4109
4110 spin_lock_irqsave(&ioc->sas_device_lock, flags);
4111 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4112 if (sas_device && sas_device->starget &&
4113 sas_device->starget->hostdata) {
4114 sas_target_priv_data = sas_device->starget->hostdata;
4115 sas_target_priv_data->deleted = 1;
4116 sas_address = sas_device->sas_address;
4117 port = sas_device->port;
4118 }
4119 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4120 if (!sas_device) {
4121 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4122 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4123 if (pcie_device && pcie_device->starget &&
4124 pcie_device->starget->hostdata) {
4125 sas_target_priv_data = pcie_device->starget->hostdata;
4126 sas_target_priv_data->deleted = 1;
4127 sas_address = pcie_device->wwid;
4128 }
4129 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4130 if (pcie_device && (!ioc->tm_custom_handling) &&
4131 (!(mpt3sas_scsih_is_pcie_scsi_device(
4132 pcie_device->device_info))))
4133 tr_method =
4134 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4135 else
4136 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4137 }
4138 if (sas_target_priv_data) {
4139 dewtprintk(ioc,
4140 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4141 handle, (u64)sas_address));
4142 if (sas_device) {
4143 if (sas_device->enclosure_handle != 0)
4144 dewtprintk(ioc,
4145 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4146 (u64)sas_device->enclosure_logical_id,
4147 sas_device->slot));
4148 if (sas_device->connector_name[0] != '\0')
4149 dewtprintk(ioc,
4150 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4151 sas_device->enclosure_level,
4152 sas_device->connector_name));
4153 } else if (pcie_device) {
4154 if (pcie_device->enclosure_handle != 0)
4155 dewtprintk(ioc,
4156 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4157 (u64)pcie_device->enclosure_logical_id,
4158 pcie_device->slot));
4159 if (pcie_device->connector_name[0] != '\0')
4160 dewtprintk(ioc,
4161 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4162 pcie_device->enclosure_level,
4163 pcie_device->connector_name));
4164 }
4165 _scsih_ublock_io_device(ioc, sas_address, port);
4166 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4167 }
4168
4169 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4170 if (!smid) {
4171 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4172 if (!delayed_tr)
4173 goto out;
4174 INIT_LIST_HEAD(&delayed_tr->list);
4175 delayed_tr->handle = handle;
4176 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4177 dewtprintk(ioc,
4178 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4179 handle));
4180 goto out;
4181 }
4182
4183 dewtprintk(ioc,
4184 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4185 handle, smid, ioc->tm_tr_cb_idx));
4186 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4187 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4188 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4189 mpi_request->DevHandle = cpu_to_le16(handle);
4190 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4191 mpi_request->MsgFlags = tr_method;
4192 set_bit(handle, ioc->device_remove_in_progress);
4193 ioc->put_smid_hi_priority(ioc, smid, 0);
4194 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4195
4196out:
4197 if (sas_device)
4198 sas_device_put(sas_device);
4199 if (pcie_device)
4200 pcie_device_put(pcie_device);
4201}
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219static u8
4220_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4221 u32 reply)
4222{
4223 u16 handle;
4224 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4225 Mpi2SCSITaskManagementReply_t *mpi_reply =
4226 mpt3sas_base_get_reply_virt_addr(ioc, reply);
4227 Mpi2SasIoUnitControlRequest_t *mpi_request;
4228 u16 smid_sas_ctrl;
4229 u32 ioc_state;
4230 struct _sc_list *delayed_sc;
4231
4232 if (ioc->pci_error_recovery) {
4233 dewtprintk(ioc,
4234 ioc_info(ioc, "%s: host in pci error recovery\n",
4235 __func__));
4236 return 1;