1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/major.h>
32#include <linux/fs.h>
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/blkdev.h>
36#include <linux/genhd.h>
37#include <linux/hdreg.h>
38#include <linux/errno.h>
39#include <linux/init.h>
40#include <linux/string.h>
41#include <linux/dma-mapping.h>
42#include <linux/completion.h>
43#include <linux/device.h>
44#include <linux/scatterlist.h>
45
46#include <asm/uaccess.h>
47#include <asm/vio.h>
48#include <asm/iseries/hv_types.h>
49#include <asm/iseries/hv_lp_event.h>
50#include <asm/iseries/hv_lp_config.h>
51#include <asm/iseries/vio.h>
52#include <asm/firmware.h>
53
54MODULE_DESCRIPTION("iSeries Virtual DASD");
55MODULE_AUTHOR("Dave Boutcher");
56MODULE_LICENSE("GPL");
57
58
59
60
61
62#define VIOD_GENHD_NAME "iseries/vd"
63
64#define VIOD_VERS "1.64"
65
66#define VIOD_KERN_WARNING KERN_WARNING "viod: "
67#define VIOD_KERN_INFO KERN_INFO "viod: "
68
69enum {
70 PARTITION_SHIFT = 3,
71 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
72 MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name)
73};
74
75static DEFINE_SPINLOCK(viodasd_spinlock);
76
77#define VIOMAXREQ 16
78
79#define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
80
81struct viodasd_waitevent {
82 struct completion com;
83 int rc;
84 u16 sub_result;
85 int max_disk;
86};
87
88static const struct vio_error_entry viodasd_err_table[] = {
89 { 0x0201, EINVAL, "Invalid Range" },
90 { 0x0202, EINVAL, "Invalid Token" },
91 { 0x0203, EIO, "DMA Error" },
92 { 0x0204, EIO, "Use Error" },
93 { 0x0205, EIO, "Release Error" },
94 { 0x0206, EINVAL, "Invalid Disk" },
95 { 0x0207, EBUSY, "Cant Lock" },
96 { 0x0208, EIO, "Already Locked" },
97 { 0x0209, EIO, "Already Unlocked" },
98 { 0x020A, EIO, "Invalid Arg" },
99 { 0x020B, EIO, "Bad IFS File" },
100 { 0x020C, EROFS, "Read Only Device" },
101 { 0x02FF, EIO, "Internal Error" },
102 { 0x0000, 0, NULL },
103};
104
105
106
107
108#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
109
110
111
112
113static int num_req_outstanding;
114
115
116
117
118struct viodasd_device {
119 u16 cylinders;
120 u16 tracks;
121 u16 sectors;
122 u16 bytes_per_sector;
123 u64 size;
124 int read_only;
125 spinlock_t q_lock;
126 struct gendisk *disk;
127 struct device *dev;
128} viodasd_devices[MAX_DISKNO];
129
130
131
132
133static int viodasd_open(struct block_device *bdev, fmode_t mode)
134{
135 struct viodasd_device *d = bdev->bd_disk->private_data;
136 HvLpEvent_Rc hvrc;
137 struct viodasd_waitevent we;
138 u16 flags = 0;
139
140 if (d->read_only) {
141 if (mode & FMODE_WRITE)
142 return -EROFS;
143 flags = vioblockflags_ro;
144 }
145
146 init_completion(&we.com);
147
148
149 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
150 HvLpEvent_Type_VirtualIo,
151 viomajorsubtype_blockio | vioblockopen,
152 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
153 viopath_sourceinst(viopath_hostLp),
154 viopath_targetinst(viopath_hostLp),
155 (u64)(unsigned long)&we, VIOVERSION << 16,
156 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
157 0, 0, 0);
158 if (hvrc != 0) {
159 printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc);
160 return -EIO;
161 }
162
163 wait_for_completion(&we.com);
164
165
166 if (we.rc != 0) {
167 const struct vio_error_entry *err =
168 vio_lookup_rc(viodasd_err_table, we.sub_result);
169
170 printk(VIOD_KERN_WARNING
171 "bad rc opening disk: %d:0x%04x (%s)\n",
172 (int)we.rc, we.sub_result, err->msg);
173 return -EIO;
174 }
175
176 return 0;
177}
178
179
180
181
182static int viodasd_release(struct gendisk *disk, fmode_t mode)
183{
184 struct viodasd_device *d = disk->private_data;
185 HvLpEvent_Rc hvrc;
186
187
188 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
189 HvLpEvent_Type_VirtualIo,
190 viomajorsubtype_blockio | vioblockclose,
191 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
192 viopath_sourceinst(viopath_hostLp),
193 viopath_targetinst(viopath_hostLp),
194 0, VIOVERSION << 16,
195 ((u64)DEVICE_NO(d) << 48) ,
196 0, 0, 0);
197 if (hvrc != 0)
198 printk(VIOD_KERN_WARNING "HV close call failed %d\n",
199 (int)hvrc);
200 return 0;
201}
202
203
204
205
206static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
207{
208 struct gendisk *disk = bdev->bd_disk;
209 struct viodasd_device *d = disk->private_data;
210
211 geo->sectors = d->sectors ? d->sectors : 32;
212 geo->heads = d->tracks ? d->tracks : 64;
213 geo->cylinders = d->cylinders ? d->cylinders :
214 get_capacity(disk) / (geo->sectors * geo->heads);
215
216 return 0;
217}
218
219
220
221
222static struct block_device_operations viodasd_fops = {
223 .owner = THIS_MODULE,
224 .open = viodasd_open,
225 .release = viodasd_release,
226 .getgeo = viodasd_getgeo,
227};
228
229
230
231
232static void viodasd_end_request(struct request *req, int error,
233 int num_sectors)
234{
235 __blk_end_request(req, error, num_sectors << 9);
236}
237
238
239
240
241static int send_request(struct request *req)
242{
243 u64 start;
244 int direction;
245 int nsg;
246 u16 viocmd;
247 HvLpEvent_Rc hvrc;
248 struct vioblocklpevent *bevent;
249 struct HvLpEvent *hev;
250 struct scatterlist sg[VIOMAXBLOCKDMA];
251 int sgindex;
252 struct viodasd_device *d;
253 unsigned long flags;
254
255 start = (u64)req->sector << 9;
256
257 if (rq_data_dir(req) == READ) {
258 direction = DMA_FROM_DEVICE;
259 viocmd = viomajorsubtype_blockio | vioblockread;
260 } else {
261 direction = DMA_TO_DEVICE;
262 viocmd = viomajorsubtype_blockio | vioblockwrite;
263 }
264
265 d = req->rq_disk->private_data;
266
267
268 sg_init_table(sg, VIOMAXBLOCKDMA);
269 nsg = blk_rq_map_sg(req->q, req, sg);
270 nsg = dma_map_sg(d->dev, sg, nsg, direction);
271
272 spin_lock_irqsave(&viodasd_spinlock, flags);
273 num_req_outstanding++;
274
275
276 if (nsg == 1)
277 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
278 HvLpEvent_Type_VirtualIo, viocmd,
279 HvLpEvent_AckInd_DoAck,
280 HvLpEvent_AckType_ImmediateAck,
281 viopath_sourceinst(viopath_hostLp),
282 viopath_targetinst(viopath_hostLp),
283 (u64)(unsigned long)req, VIOVERSION << 16,
284 ((u64)DEVICE_NO(d) << 48), start,
285 ((u64)sg_dma_address(&sg[0])) << 32,
286 sg_dma_len(&sg[0]));
287 else {
288 bevent = (struct vioblocklpevent *)
289 vio_get_event_buffer(viomajorsubtype_blockio);
290 if (bevent == NULL) {
291 printk(VIOD_KERN_WARNING
292 "error allocating disk event buffer\n");
293 goto error_ret;
294 }
295
296
297
298
299
300
301 memset(bevent, 0, sizeof(struct vioblocklpevent));
302 hev = &bevent->event;
303 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
304 HV_LP_EVENT_INT;
305 hev->xType = HvLpEvent_Type_VirtualIo;
306 hev->xSubtype = viocmd;
307 hev->xSourceLp = HvLpConfig_getLpIndex();
308 hev->xTargetLp = viopath_hostLp;
309 hev->xSizeMinus1 =
310 offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
311 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
312 hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
313 hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
314 hev->xCorrelationToken = (u64)req;
315 bevent->version = VIOVERSION;
316 bevent->disk = DEVICE_NO(d);
317 bevent->u.rw_data.offset = start;
318
319
320
321
322
323 for (sgindex = 0; sgindex < nsg; sgindex++) {
324 bevent->u.rw_data.dma_info[sgindex].token =
325 sg_dma_address(&sg[sgindex]);
326 bevent->u.rw_data.dma_info[sgindex].len =
327 sg_dma_len(&sg[sgindex]);
328 }
329
330
331 hvrc = HvCallEvent_signalLpEvent(&bevent->event);
332 vio_free_event_buffer(viomajorsubtype_blockio, bevent);
333 }
334
335 if (hvrc != HvLpEvent_Rc_Good) {
336 printk(VIOD_KERN_WARNING
337 "error sending disk event to OS/400 (rc %d)\n",
338 (int)hvrc);
339 goto error_ret;
340 }
341 spin_unlock_irqrestore(&viodasd_spinlock, flags);
342 return 0;
343
344error_ret:
345 num_req_outstanding--;
346 spin_unlock_irqrestore(&viodasd_spinlock, flags);
347 dma_unmap_sg(d->dev, sg, nsg, direction);
348 return -1;
349}
350
351
352
353
354static void do_viodasd_request(struct request_queue *q)
355{
356 struct request *req;
357
358
359
360
361
362
363 while (num_req_outstanding < VIOMAXREQ) {
364 req = elv_next_request(q);
365 if (req == NULL)
366 return;
367
368 blkdev_dequeue_request(req);
369
370 if (!blk_fs_request(req)) {
371 viodasd_end_request(req, -EIO, req->hard_nr_sectors);
372 continue;
373 }
374
375 if (send_request(req) != 0)
376 viodasd_end_request(req, -EIO, req->hard_nr_sectors);
377 }
378}
379
380
381
382
383
384static int probe_disk(struct viodasd_device *d)
385{
386 HvLpEvent_Rc hvrc;
387 struct viodasd_waitevent we;
388 int dev_no = DEVICE_NO(d);
389 struct gendisk *g;
390 struct request_queue *q;
391 u16 flags = 0;
392
393retry:
394 init_completion(&we.com);
395
396
397 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
398 HvLpEvent_Type_VirtualIo,
399 viomajorsubtype_blockio | vioblockopen,
400 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
401 viopath_sourceinst(viopath_hostLp),
402 viopath_targetinst(viopath_hostLp),
403 (u64)(unsigned long)&we, VIOVERSION << 16,
404 ((u64)dev_no << 48) | ((u64)flags<< 32),
405 0, 0, 0);
406 if (hvrc != 0) {
407 printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc);
408 return 0;
409 }
410
411 wait_for_completion(&we.com);
412
413 if (we.rc != 0) {
414 if (flags != 0)
415 return 0;
416
417 flags = vioblockflags_ro;
418 goto retry;
419 }
420 if (we.max_disk > (MAX_DISKNO - 1)) {
421 static int warned;
422
423 if (warned == 0) {
424 warned++;
425 printk(VIOD_KERN_INFO
426 "Only examining the first %d "
427 "of %d disks connected\n",
428 MAX_DISKNO, we.max_disk + 1);
429 }
430 }
431
432
433 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
434 HvLpEvent_Type_VirtualIo,
435 viomajorsubtype_blockio | vioblockclose,
436 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
437 viopath_sourceinst(viopath_hostLp),
438 viopath_targetinst(viopath_hostLp),
439 0, VIOVERSION << 16,
440 ((u64)dev_no << 48) | ((u64)flags << 32),
441 0, 0, 0);
442 if (hvrc != 0) {
443 printk(VIOD_KERN_WARNING
444 "bad rc sending event to OS/400 %d\n", (int)hvrc);
445 return 0;
446 }
447
448 if (d->dev == NULL) {
449
450 if (vio_create_viodasd(dev_no) == NULL) {
451 printk(VIOD_KERN_WARNING
452 "cannot allocate virtual device for disk %d\n",
453 dev_no);
454 return 0;
455 }
456
457
458
459
460
461 return 1;
462 }
463
464
465 spin_lock_init(&d->q_lock);
466 q = blk_init_queue(do_viodasd_request, &d->q_lock);
467 if (q == NULL) {
468 printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n",
469 dev_no);
470 return 0;
471 }
472 g = alloc_disk(1 << PARTITION_SHIFT);
473 if (g == NULL) {
474 printk(VIOD_KERN_WARNING
475 "cannot allocate disk structure for disk %d\n",
476 dev_no);
477 blk_cleanup_queue(q);
478 return 0;
479 }
480
481 d->disk = g;
482 blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
483 blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
484 blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
485 g->major = VIODASD_MAJOR;
486 g->first_minor = dev_no << PARTITION_SHIFT;
487 if (dev_no >= 26)
488 snprintf(g->disk_name, sizeof(g->disk_name),
489 VIOD_GENHD_NAME "%c%c",
490 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
491 else
492 snprintf(g->disk_name, sizeof(g->disk_name),
493 VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
494 g->fops = &viodasd_fops;
495 g->queue = q;
496 g->private_data = d;
497 g->driverfs_dev = d->dev;
498 set_capacity(g, d->size >> 9);
499
500 printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) "
501 "CHS=%d/%d/%d sector size %d%s\n",
502 dev_no, (unsigned long)(d->size >> 9),
503 (unsigned long)(d->size >> 20),
504 (int)d->cylinders, (int)d->tracks,
505 (int)d->sectors, (int)d->bytes_per_sector,
506 d->read_only ? " (RO)" : "");
507
508
509 add_disk(g);
510 return 1;
511}
512
513
514static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
515 struct scatterlist *sg, int *total_len)
516{
517 int i, numsg;
518 const struct rw_data *rw_data = &bevent->u.rw_data;
519 static const int offset =
520 offsetof(struct vioblocklpevent, u.rw_data.dma_info);
521 static const int element_size = sizeof(rw_data->dma_info[0]);
522
523 numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
524 if (numsg > VIOMAXBLOCKDMA)
525 numsg = VIOMAXBLOCKDMA;
526
527 *total_len = 0;
528 sg_init_table(sg, VIOMAXBLOCKDMA);
529 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
530 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
531 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
532 *total_len += rw_data->dma_info[i].len;
533 }
534 return i;
535}
536
537
538
539
540
541static void viodasd_restart_all_queues_starting_from(int first_index)
542{
543 int i;
544
545 for (i = first_index + 1; i < MAX_DISKNO; ++i)
546 if (viodasd_devices[i].disk)
547 blk_run_queue(viodasd_devices[i].disk->queue);
548 for (i = 0; i <= first_index; ++i)
549 if (viodasd_devices[i].disk)
550 blk_run_queue(viodasd_devices[i].disk->queue);
551}
552
553
554
555
556
557static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
558{
559 int num_sg, num_sect, pci_direction, total_len;
560 struct request *req;
561 struct scatterlist sg[VIOMAXBLOCKDMA];
562 struct HvLpEvent *event = &bevent->event;
563 unsigned long irq_flags;
564 struct viodasd_device *d;
565 int error;
566 spinlock_t *qlock;
567
568 num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
569 num_sect = total_len >> 9;
570 if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
571 pci_direction = DMA_FROM_DEVICE;
572 else
573 pci_direction = DMA_TO_DEVICE;
574 req = (struct request *)bevent->event.xCorrelationToken;
575 d = req->rq_disk->private_data;
576
577 dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
578
579
580
581
582
583 spin_lock_irqsave(&viodasd_spinlock, irq_flags);
584 num_req_outstanding--;
585 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
586
587 error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
588 if (error) {
589 const struct vio_error_entry *err;
590 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
591 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
592 event->xRc, bevent->sub_result, err->msg);
593 num_sect = req->hard_nr_sectors;
594 }
595 qlock = req->q->queue_lock;
596 spin_lock_irqsave(qlock, irq_flags);
597 viodasd_end_request(req, error, num_sect);
598 spin_unlock_irqrestore(qlock, irq_flags);
599
600
601 viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
602
603 return 0;
604}
605
606
607static void handle_block_event(struct HvLpEvent *event)
608{
609 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
610 struct viodasd_waitevent *pwe;
611
612 if (event == NULL)
613
614 return;
615
616 if (hvlpevent_is_int(event)) {
617 printk(VIOD_KERN_WARNING
618 "Yikes! got an int in viodasd event handler!\n");
619 if (hvlpevent_need_ack(event)) {
620 event->xRc = HvLpEvent_Rc_InvalidSubtype;
621 HvCallEvent_ackLpEvent(event);
622 }
623 }
624
625 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
626 case vioblockopen:
627
628
629
630
631
632
633
634
635 pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
636 pwe->rc = event->xRc;
637 pwe->sub_result = bevent->sub_result;
638 if (event->xRc == HvLpEvent_Rc_Good) {
639 const struct open_data *data = &bevent->u.open_data;
640 struct viodasd_device *device =
641 &viodasd_devices[bevent->disk];
642 device->read_only =
643 bevent->flags & vioblockflags_ro;
644 device->size = data->disk_size;
645 device->cylinders = data->cylinders;
646 device->tracks = data->tracks;
647 device->sectors = data->sectors;
648 device->bytes_per_sector = data->bytes_per_sector;
649 pwe->max_disk = data->max_disk;
650 }
651 complete(&pwe->com);
652 break;
653 case vioblockclose:
654 break;
655 case vioblockread:
656 case vioblockwrite:
657 viodasd_handle_read_write(bevent);
658 break;
659
660 default:
661 printk(VIOD_KERN_WARNING "invalid subtype!");
662 if (hvlpevent_need_ack(event)) {
663 event->xRc = HvLpEvent_Rc_InvalidSubtype;
664 HvCallEvent_ackLpEvent(event);
665 }
666 }
667}
668
669
670
671
672static ssize_t probe_disks(struct device_driver *drv, const char *buf,
673 size_t count)
674{
675 struct viodasd_device *d;
676
677 for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
678 if (d->disk == NULL)
679 probe_disk(d);
680 }
681 return count;
682}
683static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
684
685static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
686{
687 struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
688
689 d->dev = &vdev->dev;
690 if (!probe_disk(d))
691 return -ENODEV;
692 return 0;
693}
694
695static int viodasd_remove(struct vio_dev *vdev)
696{
697 struct viodasd_device *d;
698
699 d = &viodasd_devices[vdev->unit_address];
700 if (d->disk) {
701 del_gendisk(d->disk);
702 blk_cleanup_queue(d->disk->queue);
703 put_disk(d->disk);
704 d->disk = NULL;
705 }
706 d->dev = NULL;
707 return 0;
708}
709
710
711
712
713
714static struct vio_device_id viodasd_device_table[] __devinitdata = {
715 { "block", "IBM,iSeries-viodasd" },
716 { "", "" }
717};
718MODULE_DEVICE_TABLE(vio, viodasd_device_table);
719
720static struct vio_driver viodasd_driver = {
721 .id_table = viodasd_device_table,
722 .probe = viodasd_probe,
723 .remove = viodasd_remove,
724 .driver = {
725 .name = "viodasd",
726 .owner = THIS_MODULE,
727 }
728};
729
730static int need_delete_probe;
731
732
733
734
735
736static int __init viodasd_init(void)
737{
738 int rc;
739
740 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
741 rc = -ENODEV;
742 goto early_fail;
743 }
744
745
746 if (viopath_hostLp == HvLpIndexInvalid)
747 vio_set_hostlp();
748
749 if (viopath_hostLp == HvLpIndexInvalid) {
750 printk(VIOD_KERN_WARNING "invalid hosting partition\n");
751 rc = -EIO;
752 goto early_fail;
753 }
754
755 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
756 viopath_hostLp);
757
758
759 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
760 if (rc) {
761 printk(VIOD_KERN_WARNING
762 "Unable to get major number %d for %s\n",
763 VIODASD_MAJOR, VIOD_GENHD_NAME);
764 goto early_fail;
765 }
766
767 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
768 VIOMAXREQ + 2);
769 if (rc) {
770 printk(VIOD_KERN_WARNING
771 "error opening path to host partition %d\n",
772 viopath_hostLp);
773 goto unregister_blk;
774 }
775
776
777 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
778
779 rc = vio_register_driver(&viodasd_driver);
780 if (rc) {
781 printk(VIOD_KERN_WARNING "vio_register_driver failed\n");
782 goto unset_handler;
783 }
784
785
786
787
788
789
790 if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
791 need_delete_probe = 1;
792
793 return 0;
794
795unset_handler:
796 vio_clearHandler(viomajorsubtype_blockio);
797 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
798unregister_blk:
799 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
800early_fail:
801 return rc;
802}
803module_init(viodasd_init);
804
805void __exit viodasd_exit(void)
806{
807 if (need_delete_probe)
808 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
809 vio_unregister_driver(&viodasd_driver);
810 vio_clearHandler(viomajorsubtype_blockio);
811 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
812 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
813}
814module_exit(viodasd_exit);
815