1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/module.h>
23#include <linux/blkdev.h>
24#include <linux/capability.h>
25#include <linux/completion.h>
26#include <linux/cdrom.h>
27#include <linux/ratelimit.h>
28#include <linux/slab.h>
29#include <linux/times.h>
30#include <linux/uio.h>
31#include <asm/uaccess.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_ioctl.h>
35#include <scsi/scsi_cmnd.h>
36
37struct blk_cmd_filter {
38 unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
39 unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
40};
41
42static struct blk_cmd_filter blk_default_cmd_filter;
43
44
45const unsigned char scsi_command_size_tbl[8] =
46{
47 6, 10, 10, 12,
48 16, 12, 10, 10
49};
50EXPORT_SYMBOL(scsi_command_size_tbl);
51
52#include <scsi/sg.h>
53
54static int sg_get_version(int __user *p)
55{
56 static const int sg_version_num = 30527;
57 return put_user(sg_version_num, p);
58}
59
60static int scsi_get_idlun(struct request_queue *q, int __user *p)
61{
62 return put_user(0, p);
63}
64
65static int scsi_get_bus(struct request_queue *q, int __user *p)
66{
67 return put_user(0, p);
68}
69
70static int sg_get_timeout(struct request_queue *q)
71{
72 return jiffies_to_clock_t(q->sg_timeout);
73}
74
75static int sg_set_timeout(struct request_queue *q, int __user *p)
76{
77 int timeout, err = get_user(timeout, p);
78
79 if (!err)
80 q->sg_timeout = clock_t_to_jiffies(timeout);
81
82 return err;
83}
84
85static int sg_get_reserved_size(struct request_queue *q, int __user *p)
86{
87 unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
88
89 return put_user(val, p);
90}
91
92static int sg_set_reserved_size(struct request_queue *q, int __user *p)
93{
94 int size, err = get_user(size, p);
95
96 if (err)
97 return err;
98
99 if (size < 0)
100 return -EINVAL;
101 if (size > (queue_max_sectors(q) << 9))
102 size = queue_max_sectors(q) << 9;
103
104 q->sg_reserved_size = size;
105 return 0;
106}
107
108
109
110
111
112static int sg_emulated_host(struct request_queue *q, int __user *p)
113{
114 return put_user(1, p);
115}
116
117static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
118{
119
120 __set_bit(TEST_UNIT_READY, filter->read_ok);
121 __set_bit(REQUEST_SENSE, filter->read_ok);
122 __set_bit(READ_6, filter->read_ok);
123 __set_bit(READ_10, filter->read_ok);
124 __set_bit(READ_12, filter->read_ok);
125 __set_bit(READ_16, filter->read_ok);
126 __set_bit(READ_BUFFER, filter->read_ok);
127 __set_bit(READ_DEFECT_DATA, filter->read_ok);
128 __set_bit(READ_CAPACITY, filter->read_ok);
129 __set_bit(READ_LONG, filter->read_ok);
130 __set_bit(INQUIRY, filter->read_ok);
131 __set_bit(MODE_SENSE, filter->read_ok);
132 __set_bit(MODE_SENSE_10, filter->read_ok);
133 __set_bit(LOG_SENSE, filter->read_ok);
134 __set_bit(START_STOP, filter->read_ok);
135 __set_bit(GPCMD_VERIFY_10, filter->read_ok);
136 __set_bit(VERIFY_16, filter->read_ok);
137 __set_bit(REPORT_LUNS, filter->read_ok);
138 __set_bit(SERVICE_ACTION_IN, filter->read_ok);
139 __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
140 __set_bit(MAINTENANCE_IN, filter->read_ok);
141 __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
142
143
144 __set_bit(GPCMD_PLAY_CD, filter->read_ok);
145 __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
146 __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
147 __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
148 __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
149
150
151 __set_bit(GPCMD_READ_CD, filter->read_ok);
152 __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
153 __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
154 __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
155 __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
156 __set_bit(GPCMD_READ_HEADER, filter->read_ok);
157 __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
158 __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
159 __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
160 __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
161 __set_bit(GPCMD_SCAN, filter->read_ok);
162 __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
163 __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
164 __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
165 __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
166 __set_bit(GPCMD_SEEK, filter->read_ok);
167 __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
168
169
170 __set_bit(WRITE_6, filter->write_ok);
171 __set_bit(WRITE_10, filter->write_ok);
172 __set_bit(WRITE_VERIFY, filter->write_ok);
173 __set_bit(WRITE_12, filter->write_ok);
174 __set_bit(WRITE_VERIFY_12, filter->write_ok);
175 __set_bit(WRITE_16, filter->write_ok);
176 __set_bit(WRITE_LONG, filter->write_ok);
177 __set_bit(WRITE_LONG_2, filter->write_ok);
178 __set_bit(WRITE_SAME, filter->write_ok);
179 __set_bit(WRITE_SAME_16, filter->write_ok);
180 __set_bit(WRITE_SAME_32, filter->write_ok);
181 __set_bit(ERASE, filter->write_ok);
182 __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
183 __set_bit(MODE_SELECT, filter->write_ok);
184 __set_bit(LOG_SELECT, filter->write_ok);
185 __set_bit(GPCMD_BLANK, filter->write_ok);
186 __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
187 __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
188 __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
189 __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
190 __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
191 __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
192 __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
193 __set_bit(GPCMD_SEND_KEY, filter->write_ok);
194 __set_bit(GPCMD_SEND_OPC, filter->write_ok);
195 __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
196 __set_bit(GPCMD_SET_SPEED, filter->write_ok);
197 __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
198 __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
199 __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
200 __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
201}
202
203int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm)
204{
205 struct blk_cmd_filter *filter = &blk_default_cmd_filter;
206
207
208 if (capable(CAP_SYS_RAWIO))
209 return 0;
210
211
212 if (!filter)
213 return -EPERM;
214
215
216 if (test_bit(cmd[0], filter->read_ok))
217 return 0;
218
219
220 if (test_bit(cmd[0], filter->write_ok) && has_write_perm)
221 return 0;
222
223 return -EPERM;
224}
225EXPORT_SYMBOL(blk_verify_command);
226
227static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
228 struct sg_io_hdr *hdr, fmode_t mode)
229{
230 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
231 return -EFAULT;
232 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
233 return -EPERM;
234
235
236
237
238 rq->cmd_len = hdr->cmd_len;
239 rq->cmd_type = REQ_TYPE_BLOCK_PC;
240
241 rq->timeout = msecs_to_jiffies(hdr->timeout);
242 if (!rq->timeout)
243 rq->timeout = q->sg_timeout;
244 if (!rq->timeout)
245 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
246 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
247 rq->timeout = BLK_MIN_SG_TIMEOUT;
248
249 return 0;
250}
251
252static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
253 struct bio *bio)
254{
255 int r, ret = 0;
256
257
258
259
260 hdr->status = rq->errors & 0xff;
261 hdr->masked_status = status_byte(rq->errors);
262 hdr->msg_status = msg_byte(rq->errors);
263 hdr->host_status = host_byte(rq->errors);
264 hdr->driver_status = driver_byte(rq->errors);
265 hdr->info = 0;
266 if (hdr->masked_status || hdr->host_status || hdr->driver_status)
267 hdr->info |= SG_INFO_CHECK;
268 hdr->resid = rq->resid_len;
269 hdr->sb_len_wr = 0;
270
271 if (rq->sense_len && hdr->sbp) {
272 int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
273
274 if (!copy_to_user(hdr->sbp, rq->sense, len))
275 hdr->sb_len_wr = len;
276 else
277 ret = -EFAULT;
278 }
279
280 r = blk_rq_unmap_user(bio);
281 if (!ret)
282 ret = r;
283 blk_put_request(rq);
284
285 return ret;
286}
287
288static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
289 struct sg_io_hdr *hdr, fmode_t mode)
290{
291 unsigned long start_time;
292 int writing = 0, ret = 0;
293 struct request *rq;
294 char sense[SCSI_SENSE_BUFFERSIZE];
295 struct bio *bio;
296
297 if (hdr->interface_id != 'S')
298 return -EINVAL;
299 if (hdr->cmd_len > BLK_MAX_CDB)
300 return -EINVAL;
301
302 if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
303 return -EIO;
304
305 if (hdr->dxfer_len)
306 switch (hdr->dxfer_direction) {
307 default:
308 return -EINVAL;
309 case SG_DXFER_TO_DEV:
310 writing = 1;
311 break;
312 case SG_DXFER_TO_FROM_DEV:
313 case SG_DXFER_FROM_DEV:
314 break;
315 }
316
317 rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
318 if (!rq)
319 return -ENOMEM;
320
321 if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
322 blk_put_request(rq);
323 return -EFAULT;
324 }
325
326 if (hdr->iovec_count) {
327 const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
328 size_t iov_data_len;
329 struct sg_iovec *sg_iov;
330 struct iovec *iov;
331 int i;
332
333 sg_iov = kmalloc(size, GFP_KERNEL);
334 if (!sg_iov) {
335 ret = -ENOMEM;
336 goto out;
337 }
338
339 if (copy_from_user(sg_iov, hdr->dxferp, size)) {
340 kfree(sg_iov);
341 ret = -EFAULT;
342 goto out;
343 }
344
345
346
347
348 iov = (struct iovec *) sg_iov;
349 iov_data_len = 0;
350 for (i = 0; i < hdr->iovec_count; i++) {
351 if (iov_data_len + iov[i].iov_len < iov_data_len) {
352 kfree(sg_iov);
353 ret = -EINVAL;
354 goto out;
355 }
356 iov_data_len += iov[i].iov_len;
357 }
358
359
360 if (hdr->dxfer_len < iov_data_len) {
361 hdr->iovec_count = iov_shorten(iov,
362 hdr->iovec_count,
363 hdr->dxfer_len);
364 iov_data_len = hdr->dxfer_len;
365 }
366
367 ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
368 iov_data_len, GFP_KERNEL);
369 kfree(sg_iov);
370 } else if (hdr->dxfer_len)
371 ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
372 GFP_KERNEL);
373
374 if (ret)
375 goto out;
376
377 bio = rq->bio;
378 memset(sense, 0, sizeof(sense));
379 rq->sense = sense;
380 rq->sense_len = 0;
381 rq->retries = 0;
382
383 start_time = jiffies;
384
385
386
387
388
389 blk_execute_rq(q, bd_disk, rq, 0);
390
391 hdr->duration = jiffies_to_msecs(jiffies - start_time);
392
393 return blk_complete_sghdr_rq(rq, hdr, bio);
394out:
395 blk_put_request(rq);
396 return ret;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432#define OMAX_SB_LEN 16
433int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
434 struct scsi_ioctl_command __user *sic)
435{
436 struct request *rq;
437 int err;
438 unsigned int in_len, out_len, bytes, opcode, cmdlen;
439 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
440
441 if (!sic)
442 return -EINVAL;
443
444
445
446
447 if (get_user(in_len, &sic->inlen))
448 return -EFAULT;
449 if (get_user(out_len, &sic->outlen))
450 return -EFAULT;
451 if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
452 return -EINVAL;
453 if (get_user(opcode, sic->data))
454 return -EFAULT;
455
456 bytes = max(in_len, out_len);
457 if (bytes) {
458 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
459 if (!buffer)
460 return -ENOMEM;
461
462 }
463
464 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
465
466 cmdlen = COMMAND_SIZE(opcode);
467
468
469
470
471 err = -EFAULT;
472 rq->cmd_len = cmdlen;
473 if (copy_from_user(rq->cmd, sic->data, cmdlen))
474 goto error;
475
476 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
477 goto error;
478
479 err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
480 if (err)
481 goto error;
482
483
484 rq->retries = 5;
485
486 switch (opcode) {
487 case SEND_DIAGNOSTIC:
488 case FORMAT_UNIT:
489 rq->timeout = FORMAT_UNIT_TIMEOUT;
490 rq->retries = 1;
491 break;
492 case START_STOP:
493 rq->timeout = START_STOP_TIMEOUT;
494 break;
495 case MOVE_MEDIUM:
496 rq->timeout = MOVE_MEDIUM_TIMEOUT;
497 break;
498 case READ_ELEMENT_STATUS:
499 rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
500 break;
501 case READ_DEFECT_DATA:
502 rq->timeout = READ_DEFECT_DATA_TIMEOUT;
503 rq->retries = 1;
504 break;
505 default:
506 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
507 break;
508 }
509
510 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
511 err = DRIVER_ERROR << 24;
512 goto error;
513 }
514
515 memset(sense, 0, sizeof(sense));
516 rq->sense = sense;
517 rq->sense_len = 0;
518 rq->cmd_type = REQ_TYPE_BLOCK_PC;
519
520 blk_execute_rq(q, disk, rq, 0);
521
522 err = rq->errors & 0xff;
523 if (err) {
524 if (rq->sense_len && rq->sense) {
525 bytes = (OMAX_SB_LEN > rq->sense_len) ?
526 rq->sense_len : OMAX_SB_LEN;
527 if (copy_to_user(sic->data, rq->sense, bytes))
528 err = -EFAULT;
529 }
530 } else {
531 if (copy_to_user(sic->data, buffer, out_len))
532 err = -EFAULT;
533 }
534
535error:
536 kfree(buffer);
537 blk_put_request(rq);
538 return err;
539}
540EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
541
542
543static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
544 int cmd, int data)
545{
546 struct request *rq;
547 int err;
548
549 rq = blk_get_request(q, WRITE, __GFP_WAIT);
550 rq->cmd_type = REQ_TYPE_BLOCK_PC;
551 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
552 rq->cmd[0] = cmd;
553 rq->cmd[4] = data;
554 rq->cmd_len = 6;
555 err = blk_execute_rq(q, bd_disk, rq, 0);
556 blk_put_request(rq);
557
558 return err;
559}
560
561static inline int blk_send_start_stop(struct request_queue *q,
562 struct gendisk *bd_disk, int data)
563{
564 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
565}
566
567int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
568 unsigned int cmd, void __user *arg)
569{
570 int err;
571
572 if (!q)
573 return -ENXIO;
574
575 switch (cmd) {
576
577
578
579 case SG_GET_VERSION_NUM:
580 err = sg_get_version(arg);
581 break;
582 case SCSI_IOCTL_GET_IDLUN:
583 err = scsi_get_idlun(q, arg);
584 break;
585 case SCSI_IOCTL_GET_BUS_NUMBER:
586 err = scsi_get_bus(q, arg);
587 break;
588 case SG_SET_TIMEOUT:
589 err = sg_set_timeout(q, arg);
590 break;
591 case SG_GET_TIMEOUT:
592 err = sg_get_timeout(q);
593 break;
594 case SG_GET_RESERVED_SIZE:
595 err = sg_get_reserved_size(q, arg);
596 break;
597 case SG_SET_RESERVED_SIZE:
598 err = sg_set_reserved_size(q, arg);
599 break;
600 case SG_EMULATED_HOST:
601 err = sg_emulated_host(q, arg);
602 break;
603 case SG_IO: {
604 struct sg_io_hdr hdr;
605
606 err = -EFAULT;
607 if (copy_from_user(&hdr, arg, sizeof(hdr)))
608 break;
609 err = sg_io(q, bd_disk, &hdr, mode);
610 if (err == -EFAULT)
611 break;
612
613 if (copy_to_user(arg, &hdr, sizeof(hdr)))
614 err = -EFAULT;
615 break;
616 }
617 case CDROM_SEND_PACKET: {
618 struct cdrom_generic_command cgc;
619 struct sg_io_hdr hdr;
620
621 err = -EFAULT;
622 if (copy_from_user(&cgc, arg, sizeof(cgc)))
623 break;
624 cgc.timeout = clock_t_to_jiffies(cgc.timeout);
625 memset(&hdr, 0, sizeof(hdr));
626 hdr.interface_id = 'S';
627 hdr.cmd_len = sizeof(cgc.cmd);
628 hdr.dxfer_len = cgc.buflen;
629 err = 0;
630 switch (cgc.data_direction) {
631 case CGC_DATA_UNKNOWN:
632 hdr.dxfer_direction = SG_DXFER_UNKNOWN;
633 break;
634 case CGC_DATA_WRITE:
635 hdr.dxfer_direction = SG_DXFER_TO_DEV;
636 break;
637 case CGC_DATA_READ:
638 hdr.dxfer_direction = SG_DXFER_FROM_DEV;
639 break;
640 case CGC_DATA_NONE:
641 hdr.dxfer_direction = SG_DXFER_NONE;
642 break;
643 default:
644 err = -EINVAL;
645 }
646 if (err)
647 break;
648
649 hdr.dxferp = cgc.buffer;
650 hdr.sbp = cgc.sense;
651 if (hdr.sbp)
652 hdr.mx_sb_len = sizeof(struct request_sense);
653 hdr.timeout = jiffies_to_msecs(cgc.timeout);
654 hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
655 hdr.cmd_len = sizeof(cgc.cmd);
656
657 err = sg_io(q, bd_disk, &hdr, mode);
658 if (err == -EFAULT)
659 break;
660
661 if (hdr.status)
662 err = -EIO;
663
664 cgc.stat = err;
665 cgc.buflen = hdr.resid;
666 if (copy_to_user(arg, &cgc, sizeof(cgc)))
667 err = -EFAULT;
668
669 break;
670 }
671
672
673
674
675 case SCSI_IOCTL_SEND_COMMAND:
676 printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
677 err = -EINVAL;
678 if (!arg)
679 break;
680
681 err = sg_scsi_ioctl(q, bd_disk, mode, arg);
682 break;
683 case CDROMCLOSETRAY:
684 err = blk_send_start_stop(q, bd_disk, 0x03);
685 break;
686 case CDROMEJECT:
687 err = blk_send_start_stop(q, bd_disk, 0x02);
688 break;
689 default:
690 err = -ENOTTY;
691 }
692
693 return err;
694}
695EXPORT_SYMBOL(scsi_cmd_ioctl);
696
697int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
698{
699 if (bd && bd == bd->bd_contains)
700 return 0;
701
702
703
704
705 switch (cmd) {
706 case SCSI_IOCTL_GET_IDLUN:
707 case SCSI_IOCTL_GET_BUS_NUMBER:
708 case SCSI_IOCTL_GET_PCI:
709 case SCSI_IOCTL_PROBE_HOST:
710 case SG_GET_VERSION_NUM:
711 case SG_SET_TIMEOUT:
712 case SG_GET_TIMEOUT:
713 case SG_GET_RESERVED_SIZE:
714 case SG_SET_RESERVED_SIZE:
715 case SG_EMULATED_HOST:
716 return 0;
717 case CDROM_GET_CAPABILITY:
718
719
720
721
722 return -ENOIOCTLCMD;
723 default:
724 break;
725 }
726
727 if (capable(CAP_SYS_RAWIO))
728 return 0;
729
730
731 printk_ratelimited(KERN_WARNING
732 "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
733
734 return -ENOIOCTLCMD;
735}
736EXPORT_SYMBOL(scsi_verify_blk_ioctl);
737
738int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
739 unsigned int cmd, void __user *arg)
740{
741 int ret;
742
743 ret = scsi_verify_blk_ioctl(bd, cmd);
744 if (ret < 0)
745 return ret;
746
747 return scsi_cmd_ioctl(bd->bd_disk->queue, bd->bd_disk, mode, cmd, arg);
748}
749EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
750
751static int __init blk_scsi_ioctl_init(void)
752{
753 blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
754 return 0;
755}
756fs_initcall(blk_scsi_ioctl_init);
757