1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/init.h>
32#include <linux/blkdev.h>
33#include <linux/delay.h>
34#include <linux/kthread.h>
35#include <linux/spinlock.h>
36#include <linux/async.h>
37#include <linux/slab.h>
38#include <asm/unaligned.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_driver.h>
44#include <scsi/scsi_devinfo.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_transport.h>
47#include <scsi/scsi_dh.h>
48#include <scsi/scsi_eh.h>
49
50#include "scsi_priv.h"
51#include "scsi_logging.h"
52
53#define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56
57
58
59#define SCSI_TIMEOUT (2*HZ)
60#define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62
63
64
65#define SCSI_UID_SER_NUM 'S'
66#define SCSI_UID_UNKNOWN 'Z'
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SCSI_SCAN_NO_RESPONSE 0
81#define SCSI_SCAN_TARGET_PRESENT 1
82#define SCSI_SCAN_LUN_PRESENT 2
83
84static const char *scsi_null_device_strs = "nullnullnullnull";
85
86#define MAX_SCSI_LUNS 512
87
88static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94#ifdef CONFIG_SCSI_SCAN_ASYNC
95#define SCSI_SCAN_TYPE_DEFAULT "async"
96#else
97#define SCSI_SCAN_TYPE_DEFAULT "sync"
98#endif
99
100char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115
116static DEFINE_SPINLOCK(async_scan_lock);
117static LIST_HEAD(scanning_hosts);
118
119struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123};
124
125
126
127
128
129
130
131
132
133int scsi_complete_async_scans(void)
134{
135 struct async_scan_data *data;
136
137 do {
138 if (list_empty(&scanning_hosts))
139 return 0;
140
141
142
143
144 data = kmalloc(sizeof(*data), GFP_KERNEL);
145 if (!data)
146 msleep(1);
147 } while (!data);
148
149 data->shost = NULL;
150 init_completion(&data->prev_finished);
151
152 spin_lock(&async_scan_lock);
153
154 if (list_empty(&scanning_hosts))
155 goto done;
156 list_add_tail(&data->list, &scanning_hosts);
157 spin_unlock(&async_scan_lock);
158
159 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
160 wait_for_completion(&data->prev_finished);
161
162 spin_lock(&async_scan_lock);
163 list_del(&data->list);
164 if (!list_empty(&scanning_hosts)) {
165 struct async_scan_data *next = list_entry(scanning_hosts.next,
166 struct async_scan_data, list);
167 complete(&next->prev_finished);
168 }
169 done:
170 spin_unlock(&async_scan_lock);
171
172 kfree(data);
173 return 0;
174}
175
176
177
178
179
180
181
182
183
184
185static void scsi_unlock_floptical(struct scsi_device *sdev,
186 unsigned char *result)
187{
188 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
189
190 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
191 scsi_cmd[0] = MODE_SENSE;
192 scsi_cmd[1] = 0;
193 scsi_cmd[2] = 0x2e;
194 scsi_cmd[3] = 0;
195 scsi_cmd[4] = 0x2a;
196 scsi_cmd[5] = 0;
197 scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
198 SCSI_TIMEOUT, 3, NULL);
199}
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
216 u64 lun, void *hostdata)
217{
218 unsigned int depth;
219 struct scsi_device *sdev;
220 struct request_queue *q;
221 int display_failure_msg = 1, ret;
222 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
223
224 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
225 GFP_KERNEL);
226 if (!sdev)
227 goto out;
228
229 sdev->vendor = scsi_null_device_strs;
230 sdev->model = scsi_null_device_strs;
231 sdev->rev = scsi_null_device_strs;
232 sdev->host = shost;
233 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
234 sdev->id = starget->id;
235 sdev->lun = lun;
236 sdev->channel = starget->channel;
237 mutex_init(&sdev->state_mutex);
238 sdev->sdev_state = SDEV_CREATED;
239 INIT_LIST_HEAD(&sdev->siblings);
240 INIT_LIST_HEAD(&sdev->same_target_siblings);
241 INIT_LIST_HEAD(&sdev->starved_entry);
242 INIT_LIST_HEAD(&sdev->event_list);
243 spin_lock_init(&sdev->list_lock);
244 mutex_init(&sdev->inquiry_mutex);
245 INIT_WORK(&sdev->event_work, scsi_evt_thread);
246 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
247
248 sdev->sdev_gendev.parent = get_device(&starget->dev);
249 sdev->sdev_target = starget;
250
251
252 sdev->hostdata = hostdata;
253
254
255
256 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
257
258
259
260
261 sdev->type = -1;
262
263
264
265
266
267
268 sdev->borken = 1;
269
270 q = blk_mq_init_queue(&sdev->host->tag_set);
271 if (IS_ERR(q)) {
272
273
274 put_device(&starget->dev);
275 kfree(sdev);
276 goto out;
277 }
278 sdev->request_queue = q;
279 q->queuedata = sdev;
280 __scsi_init_queue(sdev->host, q);
281 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
282 WARN_ON_ONCE(!blk_get_queue(q));
283
284 depth = sdev->host->cmd_per_lun ?: 1;
285
286
287
288
289
290
291
292 if (sbitmap_init_node(&sdev->budget_map,
293 scsi_device_max_queue_depth(sdev),
294 sbitmap_calculate_shift(depth),
295 GFP_KERNEL, sdev->request_queue->node,
296 false, true)) {
297 put_device(&starget->dev);
298 kfree(sdev);
299 goto out;
300 }
301
302 scsi_change_queue_depth(sdev, depth);
303
304 scsi_sysfs_device_initialize(sdev);
305
306 if (shost->hostt->slave_alloc) {
307 ret = shost->hostt->slave_alloc(sdev);
308 if (ret) {
309
310
311
312
313 if (ret == -ENXIO)
314 display_failure_msg = 0;
315 goto out_device_destroy;
316 }
317 }
318
319 return sdev;
320
321out_device_destroy:
322 __scsi_remove_device(sdev);
323out:
324 if (display_failure_msg)
325 printk(ALLOC_FAILURE_MSG, __func__);
326 return NULL;
327}
328
329static void scsi_target_destroy(struct scsi_target *starget)
330{
331 struct device *dev = &starget->dev;
332 struct Scsi_Host *shost = dev_to_shost(dev->parent);
333 unsigned long flags;
334
335 BUG_ON(starget->state == STARGET_DEL);
336 starget->state = STARGET_DEL;
337 transport_destroy_device(dev);
338 spin_lock_irqsave(shost->host_lock, flags);
339 if (shost->hostt->target_destroy)
340 shost->hostt->target_destroy(starget);
341 list_del_init(&starget->siblings);
342 spin_unlock_irqrestore(shost->host_lock, flags);
343 put_device(dev);
344}
345
346static void scsi_target_dev_release(struct device *dev)
347{
348 struct device *parent = dev->parent;
349 struct scsi_target *starget = to_scsi_target(dev);
350
351 kfree(starget);
352 put_device(parent);
353}
354
355static struct device_type scsi_target_type = {
356 .name = "scsi_target",
357 .release = scsi_target_dev_release,
358};
359
360int scsi_is_target_device(const struct device *dev)
361{
362 return dev->type == &scsi_target_type;
363}
364EXPORT_SYMBOL(scsi_is_target_device);
365
366static struct scsi_target *__scsi_find_target(struct device *parent,
367 int channel, uint id)
368{
369 struct scsi_target *starget, *found_starget = NULL;
370 struct Scsi_Host *shost = dev_to_shost(parent);
371
372
373
374 list_for_each_entry(starget, &shost->__targets, siblings) {
375 if (starget->id == id &&
376 starget->channel == channel) {
377 found_starget = starget;
378 break;
379 }
380 }
381 if (found_starget)
382 get_device(&found_starget->dev);
383
384 return found_starget;
385}
386
387
388
389
390
391
392
393
394
395
396static void scsi_target_reap_ref_release(struct kref *kref)
397{
398 struct scsi_target *starget
399 = container_of(kref, struct scsi_target, reap_ref);
400
401
402
403
404
405
406 if ((starget->state != STARGET_CREATED) &&
407 (starget->state != STARGET_CREATED_REMOVE)) {
408 transport_remove_device(&starget->dev);
409 device_del(&starget->dev);
410 }
411 scsi_target_destroy(starget);
412}
413
414static void scsi_target_reap_ref_put(struct scsi_target *starget)
415{
416 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431static struct scsi_target *scsi_alloc_target(struct device *parent,
432 int channel, uint id)
433{
434 struct Scsi_Host *shost = dev_to_shost(parent);
435 struct device *dev = NULL;
436 unsigned long flags;
437 const int size = sizeof(struct scsi_target)
438 + shost->transportt->target_size;
439 struct scsi_target *starget;
440 struct scsi_target *found_target;
441 int error, ref_got;
442
443 starget = kzalloc(size, GFP_KERNEL);
444 if (!starget) {
445 printk(KERN_ERR "%s: allocation failure\n", __func__);
446 return NULL;
447 }
448 dev = &starget->dev;
449 device_initialize(dev);
450 kref_init(&starget->reap_ref);
451 dev->parent = get_device(parent);
452 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
453 dev->bus = &scsi_bus_type;
454 dev->type = &scsi_target_type;
455 starget->id = id;
456 starget->channel = channel;
457 starget->can_queue = 0;
458 INIT_LIST_HEAD(&starget->siblings);
459 INIT_LIST_HEAD(&starget->devices);
460 starget->state = STARGET_CREATED;
461 starget->scsi_level = SCSI_2;
462 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
463 retry:
464 spin_lock_irqsave(shost->host_lock, flags);
465
466 found_target = __scsi_find_target(parent, channel, id);
467 if (found_target)
468 goto found;
469
470 list_add_tail(&starget->siblings, &shost->__targets);
471 spin_unlock_irqrestore(shost->host_lock, flags);
472
473 transport_setup_device(dev);
474 if (shost->hostt->target_alloc) {
475 error = shost->hostt->target_alloc(starget);
476
477 if(error) {
478 if (error != -ENXIO)
479 dev_err(dev, "target allocation failed, error %d\n", error);
480
481
482 scsi_target_destroy(starget);
483 return NULL;
484 }
485 }
486 get_device(dev);
487
488 return starget;
489
490 found:
491
492
493
494
495
496 ref_got = kref_get_unless_zero(&found_target->reap_ref);
497
498 spin_unlock_irqrestore(shost->host_lock, flags);
499 if (ref_got) {
500 put_device(dev);
501 return found_target;
502 }
503
504
505
506
507
508
509
510
511
512 put_device(&found_target->dev);
513
514
515
516
517 msleep(1);
518 goto retry;
519}
520
521
522
523
524
525
526
527
528
529void scsi_target_reap(struct scsi_target *starget)
530{
531
532
533
534
535
536 BUG_ON(starget->state == STARGET_DEL);
537 scsi_target_reap_ref_put(starget);
538}
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555void scsi_sanitize_inquiry_string(unsigned char *s, int len)
556{
557 int terminated = 0;
558
559 for (; len > 0; (--len, ++s)) {
560 if (*s == 0)
561 terminated = 1;
562 if (terminated || *s < 0x20 || *s > 0x7e)
563 *s = ' ';
564 }
565}
566EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
583 int result_len, blist_flags_t *bflags)
584{
585 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
586 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
587 int response_len = 0;
588 int pass, count, result;
589 struct scsi_sense_hdr sshdr;
590
591 *bflags = 0;
592
593
594
595
596 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
597 try_inquiry_len = first_inquiry_len;
598 pass = 1;
599
600 next_pass:
601 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
602 "scsi scan: INQUIRY pass %d length %d\n",
603 pass, try_inquiry_len));
604
605
606 for (count = 0; count < 3; ++count) {
607 int resid;
608
609 memset(scsi_cmd, 0, 6);
610 scsi_cmd[0] = INQUIRY;
611 scsi_cmd[4] = (unsigned char) try_inquiry_len;
612
613 memset(inq_result, 0, try_inquiry_len);
614
615 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
616 inq_result, try_inquiry_len, &sshdr,
617 HZ / 2 + HZ * scsi_inq_timeout, 3,
618 &resid);
619
620 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
621 "scsi scan: INQUIRY %s with code 0x%x\n",
622 result ? "failed" : "successful", result));
623
624 if (result > 0) {
625
626
627
628
629
630
631 if (scsi_status_is_check_condition(result) &&
632 scsi_sense_valid(&sshdr)) {
633 if ((sshdr.sense_key == UNIT_ATTENTION) &&
634 ((sshdr.asc == 0x28) ||
635 (sshdr.asc == 0x29)) &&
636 (sshdr.ascq == 0))
637 continue;
638 }
639 } else if (result == 0) {
640
641
642
643
644
645 if (resid == try_inquiry_len)
646 continue;
647 }
648 break;
649 }
650
651 if (result == 0) {
652 scsi_sanitize_inquiry_string(&inq_result[8], 8);
653 scsi_sanitize_inquiry_string(&inq_result[16], 16);
654 scsi_sanitize_inquiry_string(&inq_result[32], 4);
655
656 response_len = inq_result[4] + 5;
657 if (response_len > 255)
658 response_len = first_inquiry_len;
659
660
661
662
663
664
665
666
667 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
668 &inq_result[16]);
669
670
671
672 if (pass == 1) {
673 if (BLIST_INQUIRY_36 & *bflags)
674 next_inquiry_len = 36;
675 else if (sdev->inquiry_len)
676 next_inquiry_len = sdev->inquiry_len;
677 else
678 next_inquiry_len = response_len;
679
680
681 if (next_inquiry_len > try_inquiry_len) {
682 try_inquiry_len = next_inquiry_len;
683 pass = 2;
684 goto next_pass;
685 }
686 }
687
688 } else if (pass == 2) {
689 sdev_printk(KERN_INFO, sdev,
690 "scsi scan: %d byte inquiry failed. "
691 "Consider BLIST_INQUIRY_36 for this device\n",
692 try_inquiry_len);
693
694
695
696 try_inquiry_len = first_inquiry_len;
697 pass = 3;
698 goto next_pass;
699 }
700
701
702
703 if (result)
704 return -EIO;
705
706
707 sdev->inquiry_len = min(try_inquiry_len, response_len);
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724 if (sdev->inquiry_len < 36) {
725 if (!sdev->host->short_inquiry) {
726 shost_printk(KERN_INFO, sdev->host,
727 "scsi scan: INQUIRY result too short (%d),"
728 " using 36\n", sdev->inquiry_len);
729 sdev->host->short_inquiry = 1;
730 }
731 sdev->inquiry_len = 36;
732 }
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752 sdev->scsi_level = inq_result[2] & 0x07;
753 if (sdev->scsi_level >= 2 ||
754 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
755 sdev->scsi_level++;
756 sdev->sdev_target->scsi_level = sdev->scsi_level;
757
758
759
760
761
762 sdev->lun_in_cdb = 0;
763 if (sdev->scsi_level <= SCSI_2 &&
764 sdev->scsi_level != SCSI_UNKNOWN &&
765 !sdev->host->no_scsi2_lun_in_cdb)
766 sdev->lun_in_cdb = 1;
767
768 return 0;
769}
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
787 blist_flags_t *bflags, int async)
788{
789 int ret;
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811 sdev->inquiry = kmemdup(inq_result,
812 max_t(size_t, sdev->inquiry_len, 36),
813 GFP_KERNEL);
814 if (sdev->inquiry == NULL)
815 return SCSI_SCAN_NO_RESPONSE;
816
817 sdev->vendor = (char *) (sdev->inquiry + 8);
818 sdev->model = (char *) (sdev->inquiry + 16);
819 sdev->rev = (char *) (sdev->inquiry + 32);
820
821 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
822
823
824
825
826
827
828 sdev->allow_restart = 1;
829 }
830
831 if (*bflags & BLIST_ISROM) {
832 sdev->type = TYPE_ROM;
833 sdev->removable = 1;
834 } else {
835 sdev->type = (inq_result[0] & 0x1f);
836 sdev->removable = (inq_result[1] & 0x80) >> 7;
837
838
839
840
841
842
843 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
844 sdev_printk(KERN_WARNING, sdev,
845 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
846 __func__, sdev->type, (unsigned int)sdev->lun);
847 sdev->type = TYPE_WLUN;
848 }
849
850 }
851
852 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
853
854
855
856
857 if ((*bflags & BLIST_REPORTLUN2) == 0)
858 *bflags |= BLIST_NOREPORTLUN;
859 }
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
878 sdev->lockable = sdev->removable;
879 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
880
881 if (sdev->scsi_level >= SCSI_3 ||
882 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
883 sdev->ppr = 1;
884 if (inq_result[7] & 0x60)
885 sdev->wdtr = 1;
886 if (inq_result[7] & 0x10)
887 sdev->sdtr = 1;
888
889 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
890 "ANSI: %d%s\n", scsi_device_type(sdev->type),
891 sdev->vendor, sdev->model, sdev->rev,
892 sdev->inq_periph_qual, inq_result[2] & 0x07,
893 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
894
895 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
896 !(*bflags & BLIST_NOTQ)) {
897 sdev->tagged_supported = 1;
898 sdev->simple_tags = 1;
899 }
900
901
902
903
904
905
906 if ((*bflags & BLIST_BORKEN) == 0)
907 sdev->borken = 0;
908
909 if (*bflags & BLIST_NO_ULD_ATTACH)
910 sdev->no_uld_attach = 1;
911
912
913
914
915
916 if (*bflags & BLIST_SELECT_NO_ATN)
917 sdev->select_no_atn = 1;
918
919
920
921
922
923 if (*bflags & BLIST_MAX_512)
924 blk_queue_max_hw_sectors(sdev->request_queue, 512);
925
926
927
928
929 else if (*bflags & BLIST_MAX_1024)
930 blk_queue_max_hw_sectors(sdev->request_queue, 1024);
931
932
933
934
935
936 if (*bflags & BLIST_NOSTARTONADD)
937 sdev->no_start_on_add = 1;
938
939 if (*bflags & BLIST_SINGLELUN)
940 scsi_target(sdev)->single_lun = 1;
941
942 sdev->use_10_for_rw = 1;
943
944
945
946
947 if (*bflags & BLIST_NO_RSOC)
948 sdev->no_report_opcodes = 1;
949
950
951
952 mutex_lock(&sdev->state_mutex);
953 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
954 if (ret)
955 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
956 mutex_unlock(&sdev->state_mutex);
957
958 if (ret) {
959 sdev_printk(KERN_ERR, sdev,
960 "in wrong state %s to complete scan\n",
961 scsi_device_state_name(sdev->sdev_state));
962 return SCSI_SCAN_NO_RESPONSE;
963 }
964
965 if (*bflags & BLIST_NOT_LOCKABLE)
966 sdev->lockable = 0;
967
968 if (*bflags & BLIST_RETRY_HWERROR)
969 sdev->retry_hwerror = 1;
970
971 if (*bflags & BLIST_NO_DIF)
972 sdev->no_dif = 1;
973
974 if (*bflags & BLIST_UNMAP_LIMIT_WS)
975 sdev->unmap_limit_for_ws = 1;
976
977 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
978
979 if (*bflags & BLIST_TRY_VPD_PAGES)
980 sdev->try_vpd_pages = 1;
981 else if (*bflags & BLIST_SKIP_VPD_PAGES)
982 sdev->skip_vpd_pages = 1;
983
984 transport_configure_device(&sdev->sdev_gendev);
985
986 if (sdev->host->hostt->slave_configure) {
987 ret = sdev->host->hostt->slave_configure(sdev);
988 if (ret) {
989
990
991
992
993 if (ret != -ENXIO) {
994 sdev_printk(KERN_ERR, sdev,
995 "failed to configure device\n");
996 }
997 return SCSI_SCAN_NO_RESPONSE;
998 }
999 }
1000
1001 if (sdev->scsi_level >= SCSI_3)
1002 scsi_attach_vpd(sdev);
1003
1004 sdev->max_queue_depth = sdev->queue_depth;
1005 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1006 sdev->sdev_bflags = *bflags;
1007
1008
1009
1010
1011
1012
1013 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1014 return SCSI_SCAN_NO_RESPONSE;
1015
1016 return SCSI_SCAN_LUN_PRESENT;
1017}
1018
1019#ifdef CONFIG_SCSI_LOGGING
1020
1021
1022
1023
1024
1025
1026
1027static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1028 unsigned first, unsigned end)
1029{
1030 unsigned term = 0, idx;
1031
1032 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1033 if (inq[idx+first] > ' ') {
1034 buf[idx] = inq[idx+first];
1035 term = idx+1;
1036 } else {
1037 buf[idx] = ' ';
1038 }
1039 }
1040 buf[term] = 0;
1041 return buf;
1042}
1043#endif
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066static int scsi_probe_and_add_lun(struct scsi_target *starget,
1067 u64 lun, blist_flags_t *bflagsp,
1068 struct scsi_device **sdevp,
1069 enum scsi_scan_mode rescan,
1070 void *hostdata)
1071{
1072 struct scsi_device *sdev;
1073 unsigned char *result;
1074 blist_flags_t bflags;
1075 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1076 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1077
1078
1079
1080
1081
1082 sdev = scsi_device_lookup_by_target(starget, lun);
1083 if (sdev) {
1084 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1085 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1086 "scsi scan: device exists on %s\n",
1087 dev_name(&sdev->sdev_gendev)));
1088 if (sdevp)
1089 *sdevp = sdev;
1090 else
1091 scsi_device_put(sdev);
1092
1093 if (bflagsp)
1094 *bflagsp = scsi_get_device_flags(sdev,
1095 sdev->vendor,
1096 sdev->model);
1097 return SCSI_SCAN_LUN_PRESENT;
1098 }
1099 scsi_device_put(sdev);
1100 } else
1101 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1102 if (!sdev)
1103 goto out;
1104
1105 result = kmalloc(result_len, GFP_KERNEL);
1106 if (!result)
1107 goto out_free_sdev;
1108
1109 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1110 goto out_free_result;
1111
1112 if (bflagsp)
1113 *bflagsp = bflags;
1114
1115
1116
1117 if ((result[0] >> 5) == 3) {
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1129 " peripheral qualifier of 3, device not"
1130 " added\n"))
1131 if (lun == 0) {
1132 SCSI_LOG_SCAN_BUS(1, {
1133 unsigned char vend[9];
1134 unsigned char mod[17];
1135
1136 sdev_printk(KERN_INFO, sdev,
1137 "scsi scan: consider passing scsi_mod."
1138 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1139 scsi_inq_str(vend, result, 8, 16),
1140 scsi_inq_str(mod, result, 16, 32));
1141 });
1142
1143 }
1144
1145 res = SCSI_SCAN_TARGET_PRESENT;
1146 goto out_free_result;
1147 }
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169 if (((result[0] >> 5) == 1 ||
1170 (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
1171 !scsi_is_wlun(lun)) {
1172 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1173 "scsi scan: peripheral device type"
1174 " of 31, no device added\n"));
1175 res = SCSI_SCAN_TARGET_PRESENT;
1176 goto out_free_result;
1177 }
1178
1179 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1180 if (res == SCSI_SCAN_LUN_PRESENT) {
1181 if (bflags & BLIST_KEY) {
1182 sdev->lockable = 0;
1183 scsi_unlock_floptical(sdev, result);
1184 }
1185 }
1186
1187 out_free_result:
1188 kfree(result);
1189 out_free_sdev:
1190 if (res == SCSI_SCAN_LUN_PRESENT) {
1191 if (sdevp) {
1192 if (scsi_device_get(sdev) == 0) {
1193 *sdevp = sdev;
1194 } else {
1195 __scsi_remove_device(sdev);
1196 res = SCSI_SCAN_NO_RESPONSE;
1197 }
1198 }
1199 } else
1200 __scsi_remove_device(sdev);
1201 out:
1202 return res;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219static void scsi_sequential_lun_scan(struct scsi_target *starget,
1220 blist_flags_t bflags, int scsi_level,
1221 enum scsi_scan_mode rescan)
1222{
1223 uint max_dev_lun;
1224 u64 sparse_lun, lun;
1225 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1226
1227 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1228 "scsi scan: Sequential scan\n"));
1229
1230 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1231
1232
1233
1234
1235
1236 if (bflags & BLIST_SPARSELUN) {
1237 max_dev_lun = shost->max_lun;
1238 sparse_lun = 1;
1239 } else
1240 sparse_lun = 0;
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 if (bflags & BLIST_FORCELUN)
1264 max_dev_lun = shost->max_lun;
1265
1266
1267
1268 if (bflags & BLIST_MAX5LUN)
1269 max_dev_lun = min(5U, max_dev_lun);
1270
1271
1272
1273
1274 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1275 max_dev_lun = min(8U, max_dev_lun);
1276 else
1277 max_dev_lun = min(256U, max_dev_lun);
1278
1279
1280
1281
1282
1283
1284 for (lun = 1; lun < max_dev_lun; ++lun)
1285 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1286 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1287 !sparse_lun)
1288 return;
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1312 enum scsi_scan_mode rescan)
1313{
1314 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1315 unsigned int length;
1316 u64 lun;
1317 unsigned int num_luns;
1318 unsigned int retries;
1319 int result;
1320 struct scsi_lun *lunp, *lun_data;
1321 struct scsi_sense_hdr sshdr;
1322 struct scsi_device *sdev;
1323 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1324 int ret = 0;
1325
1326
1327
1328
1329
1330
1331
1332 if (bflags & BLIST_NOREPORTLUN)
1333 return 1;
1334 if (starget->scsi_level < SCSI_2 &&
1335 starget->scsi_level != SCSI_UNKNOWN)
1336 return 1;
1337 if (starget->scsi_level < SCSI_3 &&
1338 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1339 return 1;
1340 if (bflags & BLIST_NOLUN)
1341 return 0;
1342 if (starget->no_report_luns)
1343 return 1;
1344
1345 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1346 sdev = scsi_alloc_sdev(starget, 0, NULL);
1347 if (!sdev)
1348 return 0;
1349 if (scsi_device_get(sdev)) {
1350 __scsi_remove_device(sdev);
1351 return 0;
1352 }
1353 }
1354
1355
1356
1357
1358
1359
1360 length = (511 + 1) * sizeof(struct scsi_lun);
1361retry:
1362 lun_data = kmalloc(length, GFP_KERNEL);
1363 if (!lun_data) {
1364 printk(ALLOC_FAILURE_MSG, __func__);
1365 goto out;
1366 }
1367
1368 scsi_cmd[0] = REPORT_LUNS;
1369
1370
1371
1372
1373 memset(&scsi_cmd[1], 0, 5);
1374
1375
1376
1377
1378 put_unaligned_be32(length, &scsi_cmd[6]);
1379
1380 scsi_cmd[10] = 0;
1381 scsi_cmd[11] = 0;
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 for (retries = 0; retries < 3; retries++) {
1394 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1395 "scsi scan: Sending REPORT LUNS to (try %d)\n",
1396 retries));
1397
1398 result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
1399 lun_data, length, &sshdr,
1400 SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
1401
1402 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1403 "scsi scan: REPORT LUNS"
1404 " %s (try %d) result 0x%x\n",
1405 result ? "failed" : "successful",
1406 retries, result));
1407 if (result == 0)
1408 break;
1409 else if (scsi_sense_valid(&sshdr)) {
1410 if (sshdr.sense_key != UNIT_ATTENTION)
1411 break;
1412 }
1413 }
1414
1415 if (result) {
1416
1417
1418
1419 ret = 1;
1420 goto out_err;
1421 }
1422
1423
1424
1425
1426 if (get_unaligned_be32(lun_data->scsi_lun) +
1427 sizeof(struct scsi_lun) > length) {
1428 length = get_unaligned_be32(lun_data->scsi_lun) +
1429 sizeof(struct scsi_lun);
1430 kfree(lun_data);
1431 goto retry;
1432 }
1433 length = get_unaligned_be32(lun_data->scsi_lun);
1434
1435 num_luns = (length / sizeof(struct scsi_lun));
1436
1437 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1438 "scsi scan: REPORT LUN scan\n"));
1439
1440
1441
1442
1443
1444 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1445 lun = scsilun_to_int(lunp);
1446
1447 if (lun > sdev->host->max_lun) {
1448 sdev_printk(KERN_WARNING, sdev,
1449 "lun%llu has a LUN larger than"
1450 " allowed by the host adapter\n", lun);
1451 } else {
1452 int res;
1453
1454 res = scsi_probe_and_add_lun(starget,
1455 lun, NULL, NULL, rescan, NULL);
1456 if (res == SCSI_SCAN_NO_RESPONSE) {
1457
1458
1459
1460 sdev_printk(KERN_ERR, sdev,
1461 "Unexpected response"
1462 " from lun %llu while scanning, scan"
1463 " aborted\n", (unsigned long long)lun);
1464 break;
1465 }
1466 }
1467 }
1468
1469 out_err:
1470 kfree(lun_data);
1471 out:
1472 if (scsi_device_created(sdev))
1473
1474
1475
1476 __scsi_remove_device(sdev);
1477 scsi_device_put(sdev);
1478 return ret;
1479}
1480
1481struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1482 uint id, u64 lun, void *hostdata)
1483{
1484 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1485 struct device *parent = &shost->shost_gendev;
1486 struct scsi_target *starget;
1487
1488 if (strncmp(scsi_scan_type, "none", 4) == 0)
1489 return ERR_PTR(-ENODEV);
1490
1491 starget = scsi_alloc_target(parent, channel, id);
1492 if (!starget)
1493 return ERR_PTR(-ENOMEM);
1494 scsi_autopm_get_target(starget);
1495
1496 mutex_lock(&shost->scan_mutex);
1497 if (!shost->async_scan)
1498 scsi_complete_async_scans();
1499
1500 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1501 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
1502 scsi_autopm_put_host(shost);
1503 }
1504 mutex_unlock(&shost->scan_mutex);
1505 scsi_autopm_put_target(starget);
1506
1507
1508
1509
1510 scsi_target_reap(starget);
1511 put_device(&starget->dev);
1512
1513 return sdev;
1514}
1515EXPORT_SYMBOL(__scsi_add_device);
1516
1517int scsi_add_device(struct Scsi_Host *host, uint channel,
1518 uint target, u64 lun)
1519{
1520 struct scsi_device *sdev =
1521 __scsi_add_device(host, channel, target, lun, NULL);
1522 if (IS_ERR(sdev))
1523 return PTR_ERR(sdev);
1524
1525 scsi_device_put(sdev);
1526 return 0;
1527}
1528EXPORT_SYMBOL(scsi_add_device);
1529
1530void scsi_rescan_device(struct device *dev)
1531{
1532 struct scsi_device *sdev = to_scsi_device(dev);
1533
1534 device_lock(dev);
1535
1536 scsi_attach_vpd(sdev);
1537
1538 if (sdev->handler && sdev->handler->rescan)
1539 sdev->handler->rescan(sdev);
1540
1541 if (dev->driver && try_module_get(dev->driver->owner)) {
1542 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1543
1544 if (drv->rescan)
1545 drv->rescan(dev);
1546 module_put(dev->driver->owner);
1547 }
1548 device_unlock(dev);
1549}
1550EXPORT_SYMBOL(scsi_rescan_device);
1551
1552static void __scsi_scan_target(struct device *parent, unsigned int channel,
1553 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1554{
1555 struct Scsi_Host *shost = dev_to_shost(parent);
1556 blist_flags_t bflags = 0;
1557 int res;
1558 struct scsi_target *starget;
1559
1560 if (shost->this_id == id)
1561
1562
1563
1564 return;
1565
1566 starget = scsi_alloc_target(parent, channel, id);
1567 if (!starget)
1568 return;
1569 scsi_autopm_get_target(starget);
1570
1571 if (lun != SCAN_WILD_CARD) {
1572
1573
1574
1575 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1576 goto out_reap;
1577 }
1578
1579
1580
1581
1582
1583 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1584 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1585 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1586
1587
1588
1589
1590 scsi_sequential_lun_scan(starget, bflags,
1591 starget->scsi_level, rescan);
1592 }
1593
1594 out_reap:
1595 scsi_autopm_put_target(starget);
1596
1597
1598
1599
1600 scsi_target_reap(starget);
1601
1602 put_device(&starget->dev);
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623void scsi_scan_target(struct device *parent, unsigned int channel,
1624 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1625{
1626 struct Scsi_Host *shost = dev_to_shost(parent);
1627
1628 if (strncmp(scsi_scan_type, "none", 4) == 0)
1629 return;
1630
1631 if (rescan != SCSI_SCAN_MANUAL &&
1632 strncmp(scsi_scan_type, "manual", 6) == 0)
1633 return;
1634
1635 mutex_lock(&shost->scan_mutex);
1636 if (!shost->async_scan)
1637 scsi_complete_async_scans();
1638
1639 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1640 __scsi_scan_target(parent, channel, id, lun, rescan);
1641 scsi_autopm_put_host(shost);
1642 }
1643 mutex_unlock(&shost->scan_mutex);
1644}
1645EXPORT_SYMBOL(scsi_scan_target);
1646
1647static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1648 unsigned int id, u64 lun,
1649 enum scsi_scan_mode rescan)
1650{
1651 uint order_id;
1652
1653 if (id == SCAN_WILD_CARD)
1654 for (id = 0; id < shost->max_id; ++id) {
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664 if (shost->reverse_ordering)
1665
1666
1667
1668 order_id = shost->max_id - id - 1;
1669 else
1670 order_id = id;
1671 __scsi_scan_target(&shost->shost_gendev, channel,
1672 order_id, lun, rescan);
1673 }
1674 else
1675 __scsi_scan_target(&shost->shost_gendev, channel,
1676 id, lun, rescan);
1677}
1678
1679int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1680 unsigned int id, u64 lun,
1681 enum scsi_scan_mode rescan)
1682{
1683 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1684 "%s: <%u:%u:%llu>\n",
1685 __func__, channel, id, lun));
1686
1687 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1688 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1689 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1690 return -EINVAL;
1691
1692 mutex_lock(&shost->scan_mutex);
1693 if (!shost->async_scan)
1694 scsi_complete_async_scans();
1695
1696 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1697 if (channel == SCAN_WILD_CARD)
1698 for (channel = 0; channel <= shost->max_channel;
1699 channel++)
1700 scsi_scan_channel(shost, channel, id, lun,
1701 rescan);
1702 else
1703 scsi_scan_channel(shost, channel, id, lun, rescan);
1704 scsi_autopm_put_host(shost);
1705 }
1706 mutex_unlock(&shost->scan_mutex);
1707
1708 return 0;
1709}
1710
1711static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1712{
1713 struct scsi_device *sdev;
1714 shost_for_each_device(sdev, shost) {
1715
1716 if (sdev->sdev_state == SDEV_DEL)
1717 continue;
1718
1719 if (sdev->is_visible)
1720 continue;
1721 if (!scsi_host_scan_allowed(shost) ||
1722 scsi_sysfs_add_sdev(sdev) != 0)
1723 __scsi_remove_device(sdev);
1724 }
1725}
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1738{
1739 struct async_scan_data *data = NULL;
1740 unsigned long flags;
1741
1742 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1743 return NULL;
1744
1745 mutex_lock(&shost->scan_mutex);
1746 if (shost->async_scan) {
1747 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1748 goto err;
1749 }
1750
1751 data = kmalloc(sizeof(*data), GFP_KERNEL);
1752 if (!data)
1753 goto err;
1754 data->shost = scsi_host_get(shost);
1755 if (!data->shost)
1756 goto err;
1757 init_completion(&data->prev_finished);
1758
1759 spin_lock_irqsave(shost->host_lock, flags);
1760 shost->async_scan = 1;
1761 spin_unlock_irqrestore(shost->host_lock, flags);
1762 mutex_unlock(&shost->scan_mutex);
1763
1764 spin_lock(&async_scan_lock);
1765 if (list_empty(&scanning_hosts))
1766 complete(&data->prev_finished);
1767 list_add_tail(&data->list, &scanning_hosts);
1768 spin_unlock(&async_scan_lock);
1769
1770 return data;
1771
1772 err:
1773 mutex_unlock(&shost->scan_mutex);
1774 kfree(data);
1775 return NULL;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786static void scsi_finish_async_scan(struct async_scan_data *data)
1787{
1788 struct Scsi_Host *shost;
1789 unsigned long flags;
1790
1791 if (!data)
1792 return;
1793
1794 shost = data->shost;
1795
1796 mutex_lock(&shost->scan_mutex);
1797
1798 if (!shost->async_scan) {
1799 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1800 dump_stack();
1801 mutex_unlock(&shost->scan_mutex);
1802 return;
1803 }
1804
1805 wait_for_completion(&data->prev_finished);
1806
1807 scsi_sysfs_add_devices(shost);
1808
1809 spin_lock_irqsave(shost->host_lock, flags);
1810 shost->async_scan = 0;
1811 spin_unlock_irqrestore(shost->host_lock, flags);
1812
1813 mutex_unlock(&shost->scan_mutex);
1814
1815 spin_lock(&async_scan_lock);
1816 list_del(&data->list);
1817 if (!list_empty(&scanning_hosts)) {
1818 struct async_scan_data *next = list_entry(scanning_hosts.next,
1819 struct async_scan_data, list);
1820 complete(&next->prev_finished);
1821 }
1822 spin_unlock(&async_scan_lock);
1823
1824 scsi_autopm_put_host(shost);
1825 scsi_host_put(shost);
1826 kfree(data);
1827}
1828
1829static void do_scsi_scan_host(struct Scsi_Host *shost)
1830{
1831 if (shost->hostt->scan_finished) {
1832 unsigned long start = jiffies;
1833 if (shost->hostt->scan_start)
1834 shost->hostt->scan_start(shost);
1835
1836 while (!shost->hostt->scan_finished(shost, jiffies - start))
1837 msleep(10);
1838 } else {
1839 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
1840 SCAN_WILD_CARD, 0);
1841 }
1842}
1843
1844static void do_scan_async(void *_data, async_cookie_t c)
1845{
1846 struct async_scan_data *data = _data;
1847 struct Scsi_Host *shost = data->shost;
1848
1849 do_scsi_scan_host(shost);
1850 scsi_finish_async_scan(data);
1851}
1852
1853
1854
1855
1856
1857void scsi_scan_host(struct Scsi_Host *shost)
1858{
1859 struct async_scan_data *data;
1860
1861 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
1862 strncmp(scsi_scan_type, "manual", 6) == 0)
1863 return;
1864 if (scsi_autopm_get_host(shost) < 0)
1865 return;
1866
1867 data = scsi_prep_async_scan(shost);
1868 if (!data) {
1869 do_scsi_scan_host(shost);
1870 scsi_autopm_put_host(shost);
1871 return;
1872 }
1873
1874
1875
1876
1877 async_schedule(do_scan_async, data);
1878
1879
1880}
1881EXPORT_SYMBOL(scsi_scan_host);
1882
1883void scsi_forget_host(struct Scsi_Host *shost)
1884{
1885 struct scsi_device *sdev;
1886 unsigned long flags;
1887
1888 restart:
1889 spin_lock_irqsave(shost->host_lock, flags);
1890 list_for_each_entry(sdev, &shost->__devices, siblings) {
1891 if (sdev->sdev_state == SDEV_DEL)
1892 continue;
1893 spin_unlock_irqrestore(shost->host_lock, flags);
1894 __scsi_remove_device(sdev);
1895 goto restart;
1896 }
1897 spin_unlock_irqrestore(shost->host_lock, flags);
1898}
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917struct scsi_device *scsi_get_host_dev(struct Scsi_Host *shost)
1918{
1919 struct scsi_device *sdev = NULL;
1920 struct scsi_target *starget;
1921
1922 mutex_lock(&shost->scan_mutex);
1923 if (!scsi_host_scan_allowed(shost))
1924 goto out;
1925 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->this_id);
1926 if (!starget)
1927 goto out;
1928
1929 sdev = scsi_alloc_sdev(starget, 0, NULL);
1930 if (sdev)
1931 sdev->borken = 0;
1932 else
1933 scsi_target_reap(starget);
1934 put_device(&starget->dev);
1935 out:
1936 mutex_unlock(&shost->scan_mutex);
1937 return sdev;
1938}
1939EXPORT_SYMBOL(scsi_get_host_dev);
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949void scsi_free_host_dev(struct scsi_device *sdev)
1950{
1951 BUG_ON(sdev->id != sdev->host->this_id);
1952
1953 __scsi_remove_device(sdev);
1954}
1955EXPORT_SYMBOL(scsi_free_host_dev);
1956
1957