1
2
3
4
5
6
7#include <linux/device.h>
8#include <linux/iommu.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/vfio.h>
14#include <linux/fsl/mc.h>
15#include <linux/delay.h>
16#include <linux/io-64-nonatomic-hi-lo.h>
17
18#include "vfio_fsl_mc_private.h"
19
20static struct fsl_mc_driver vfio_fsl_mc_driver;
21
22static DEFINE_MUTEX(reflck_lock);
23
24static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
25{
26 kref_get(&reflck->kref);
27}
28
29static void vfio_fsl_mc_reflck_release(struct kref *kref)
30{
31 struct vfio_fsl_mc_reflck *reflck = container_of(kref,
32 struct vfio_fsl_mc_reflck,
33 kref);
34
35 mutex_destroy(&reflck->lock);
36 kfree(reflck);
37 mutex_unlock(&reflck_lock);
38}
39
40static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
41{
42 kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
43}
44
45static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
46{
47 struct vfio_fsl_mc_reflck *reflck;
48
49 reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
50 if (!reflck)
51 return ERR_PTR(-ENOMEM);
52
53 kref_init(&reflck->kref);
54 mutex_init(&reflck->lock);
55
56 return reflck;
57}
58
59static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
60{
61 int ret = 0;
62
63 mutex_lock(&reflck_lock);
64 if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
65 vdev->reflck = vfio_fsl_mc_reflck_alloc();
66 ret = PTR_ERR_OR_ZERO(vdev->reflck);
67 } else {
68 struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
69 struct vfio_device *device;
70 struct vfio_fsl_mc_device *cont_vdev;
71
72 device = vfio_device_get_from_dev(mc_cont_dev);
73 if (!device) {
74 ret = -ENODEV;
75 goto unlock;
76 }
77
78 cont_vdev =
79 container_of(device, struct vfio_fsl_mc_device, vdev);
80 if (!cont_vdev || !cont_vdev->reflck) {
81 vfio_device_put(device);
82 ret = -ENODEV;
83 goto unlock;
84 }
85 vfio_fsl_mc_reflck_get(cont_vdev->reflck);
86 vdev->reflck = cont_vdev->reflck;
87 vfio_device_put(device);
88 }
89
90unlock:
91 mutex_unlock(&reflck_lock);
92 return ret;
93}
94
95static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
96{
97 struct fsl_mc_device *mc_dev = vdev->mc_dev;
98 int count = mc_dev->obj_desc.region_count;
99 int i;
100
101 vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
102 GFP_KERNEL);
103 if (!vdev->regions)
104 return -ENOMEM;
105
106 for (i = 0; i < count; i++) {
107 struct resource *res = &mc_dev->regions[i];
108 int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
109
110 vdev->regions[i].addr = res->start;
111 vdev->regions[i].size = resource_size(res);
112 vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
113
114
115
116
117 if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
118 !(vdev->regions[i].size & ~PAGE_MASK))
119 vdev->regions[i].flags |=
120 VFIO_REGION_INFO_FLAG_MMAP;
121 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
122 if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
123 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
124 }
125
126 return 0;
127}
128
129static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
130{
131 struct fsl_mc_device *mc_dev = vdev->mc_dev;
132 int i;
133
134 for (i = 0; i < mc_dev->obj_desc.region_count; i++)
135 iounmap(vdev->regions[i].ioaddr);
136 kfree(vdev->regions);
137}
138
139static int vfio_fsl_mc_open(struct vfio_device *core_vdev)
140{
141 struct vfio_fsl_mc_device *vdev =
142 container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
143 int ret = 0;
144
145 mutex_lock(&vdev->reflck->lock);
146 if (!vdev->refcnt) {
147 ret = vfio_fsl_mc_regions_init(vdev);
148 if (ret)
149 goto out;
150 }
151 vdev->refcnt++;
152out:
153 mutex_unlock(&vdev->reflck->lock);
154
155 return ret;
156}
157
158static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
159{
160 struct vfio_fsl_mc_device *vdev =
161 container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
162 int ret;
163
164 mutex_lock(&vdev->reflck->lock);
165
166 if (!(--vdev->refcnt)) {
167 struct fsl_mc_device *mc_dev = vdev->mc_dev;
168 struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
169 struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
170
171 vfio_fsl_mc_regions_cleanup(vdev);
172
173
174 ret = dprc_reset_container(mc_cont->mc_io, 0,
175 mc_cont->mc_handle,
176 mc_cont->obj_desc.id,
177 DPRC_RESET_OPTION_NON_RECURSIVE);
178
179 if (ret) {
180 dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
181 ret);
182 WARN_ON(1);
183 }
184
185 vfio_fsl_mc_irqs_cleanup(vdev);
186
187 fsl_mc_cleanup_irq_pool(mc_cont);
188 }
189
190 mutex_unlock(&vdev->reflck->lock);
191}
192
193static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
194 unsigned int cmd, unsigned long arg)
195{
196 unsigned long minsz;
197 struct vfio_fsl_mc_device *vdev =
198 container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
199 struct fsl_mc_device *mc_dev = vdev->mc_dev;
200
201 switch (cmd) {
202 case VFIO_DEVICE_GET_INFO:
203 {
204 struct vfio_device_info info;
205
206 minsz = offsetofend(struct vfio_device_info, num_irqs);
207
208 if (copy_from_user(&info, (void __user *)arg, minsz))
209 return -EFAULT;
210
211 if (info.argsz < minsz)
212 return -EINVAL;
213
214 info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
215
216 if (is_fsl_mc_bus_dprc(mc_dev))
217 info.flags |= VFIO_DEVICE_FLAGS_RESET;
218
219 info.num_regions = mc_dev->obj_desc.region_count;
220 info.num_irqs = mc_dev->obj_desc.irq_count;
221
222 return copy_to_user((void __user *)arg, &info, minsz) ?
223 -EFAULT : 0;
224 }
225 case VFIO_DEVICE_GET_REGION_INFO:
226 {
227 struct vfio_region_info info;
228
229 minsz = offsetofend(struct vfio_region_info, offset);
230
231 if (copy_from_user(&info, (void __user *)arg, minsz))
232 return -EFAULT;
233
234 if (info.argsz < minsz)
235 return -EINVAL;
236
237 if (info.index >= mc_dev->obj_desc.region_count)
238 return -EINVAL;
239
240
241 info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
242 info.size = vdev->regions[info.index].size;
243 info.flags = vdev->regions[info.index].flags;
244
245 if (copy_to_user((void __user *)arg, &info, minsz))
246 return -EFAULT;
247 return 0;
248 }
249 case VFIO_DEVICE_GET_IRQ_INFO:
250 {
251 struct vfio_irq_info info;
252
253 minsz = offsetofend(struct vfio_irq_info, count);
254 if (copy_from_user(&info, (void __user *)arg, minsz))
255 return -EFAULT;
256
257 if (info.argsz < minsz)
258 return -EINVAL;
259
260 if (info.index >= mc_dev->obj_desc.irq_count)
261 return -EINVAL;
262
263 info.flags = VFIO_IRQ_INFO_EVENTFD;
264 info.count = 1;
265
266 if (copy_to_user((void __user *)arg, &info, minsz))
267 return -EFAULT;
268 return 0;
269 }
270 case VFIO_DEVICE_SET_IRQS:
271 {
272 struct vfio_irq_set hdr;
273 u8 *data = NULL;
274 int ret = 0;
275 size_t data_size = 0;
276
277 minsz = offsetofend(struct vfio_irq_set, count);
278
279 if (copy_from_user(&hdr, (void __user *)arg, minsz))
280 return -EFAULT;
281
282 ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
283 mc_dev->obj_desc.irq_count, &data_size);
284 if (ret)
285 return ret;
286
287 if (data_size) {
288 data = memdup_user((void __user *)(arg + minsz),
289 data_size);
290 if (IS_ERR(data))
291 return PTR_ERR(data);
292 }
293
294 mutex_lock(&vdev->igate);
295 ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
296 hdr.index, hdr.start,
297 hdr.count, data);
298 mutex_unlock(&vdev->igate);
299 kfree(data);
300
301 return ret;
302 }
303 case VFIO_DEVICE_RESET:
304 {
305 int ret;
306 struct fsl_mc_device *mc_dev = vdev->mc_dev;
307
308
309 if (!is_fsl_mc_bus_dprc(mc_dev))
310 return -ENOTTY;
311
312 ret = dprc_reset_container(mc_dev->mc_io, 0,
313 mc_dev->mc_handle,
314 mc_dev->obj_desc.id,
315 DPRC_RESET_OPTION_NON_RECURSIVE);
316 return ret;
317
318 }
319 default:
320 return -ENOTTY;
321 }
322}
323
324static ssize_t vfio_fsl_mc_read(struct vfio_device *core_vdev, char __user *buf,
325 size_t count, loff_t *ppos)
326{
327 struct vfio_fsl_mc_device *vdev =
328 container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
329 unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
330 loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
331 struct fsl_mc_device *mc_dev = vdev->mc_dev;
332 struct vfio_fsl_mc_region *region;
333 u64 data[8];
334 int i;
335
336 if (index >= mc_dev->obj_desc.region_count)
337 return -EINVAL;
338
339 region = &vdev->regions[index];
340
341 if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
342 return -EINVAL;
343
344 if (!region->ioaddr) {
345 region->ioaddr = ioremap(region->addr, region->size);
346 if (!region->ioaddr)
347 return -ENOMEM;
348 }
349
350 if (count != 64 || off != 0)
351 return -EINVAL;
352
353 for (i = 7; i >= 0; i--)
354 data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
355
356 if (copy_to_user(buf, data, 64))
357 return -EFAULT;
358
359 return count;
360}
361
362#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
363#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
364
365static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
366{
367 int i;
368 enum mc_cmd_status status;
369 unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
370
371
372 for (i = 7; i >= 1; i--)
373 writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
374
375
376 writeq(cmd_data[0], ioaddr);
377
378
379
380
381
382 for (;;) {
383 u64 header;
384 struct mc_cmd_header *resp_hdr;
385
386 header = cpu_to_le64(readq_relaxed(ioaddr));
387
388 resp_hdr = (struct mc_cmd_header *)&header;
389 status = (enum mc_cmd_status)resp_hdr->status;
390 if (status != MC_CMD_STATUS_READY)
391 break;
392
393 udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
394 timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
395 if (timeout_usecs == 0)
396 return -ETIMEDOUT;
397 }
398
399 return 0;
400}
401
402static ssize_t vfio_fsl_mc_write(struct vfio_device *core_vdev,
403 const char __user *buf, size_t count,
404 loff_t *ppos)
405{
406 struct vfio_fsl_mc_device *vdev =
407 container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
408 unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
409 loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
410 struct fsl_mc_device *mc_dev = vdev->mc_dev;
411 struct vfio_fsl_mc_region *region;
412 u64 data[8];
413 int ret;
414
415 if (index >= mc_dev->obj_desc.region_count)
416 return -EINVAL;
417
418 region = &vdev->regions[index];
419
420 if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
421 return -EINVAL;
422
423 if (!region->ioaddr) {
424 region->ioaddr = ioremap(region->addr, region->size);
425 if (!region->ioaddr)
426 return -ENOMEM;
427 }
428
429 if (count != 64 || off != 0)
430 return -EINVAL;
431
432 if (copy_from_user(&data, buf, 64))
433 return -EFAULT;
434
435 ret = vfio_fsl_mc_send_command(region->ioaddr, data);
436 if (ret)
437 return ret;
438
439 return count;
440
441}
442
443static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
444 struct vm_area_struct *vma)
445{
446 u64 size = vma->vm_end - vma->vm_start;
447 u64 pgoff, base;
448 u8 region_cacheable;
449
450 pgoff = vma->vm_pgoff &
451 ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
452 base = pgoff << PAGE_SHIFT;
453
454 if (region.size < PAGE_SIZE || base + size > region.size)
455 return -EINVAL;
456
457 region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
458 (region.type & FSL_MC_REGION_SHAREABLE);
459 if (!region_cacheable)
460 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
461
462 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
463
464 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
465 size, vma->vm_page_prot);
466}
467
468static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
469 struct vm_area_struct *vma)
470{
471 struct vfio_fsl_mc_device *vdev =
472 container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
473 struct fsl_mc_device *mc_dev = vdev->mc_dev;
474 unsigned int index;
475
476 index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
477
478 if (vma->vm_end < vma->vm_start)
479 return -EINVAL;
480 if (vma->vm_start & ~PAGE_MASK)
481 return -EINVAL;
482 if (vma->vm_end & ~PAGE_MASK)
483 return -EINVAL;
484 if (!(vma->vm_flags & VM_SHARED))
485 return -EINVAL;
486 if (index >= mc_dev->obj_desc.region_count)
487 return -EINVAL;
488
489 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
490 return -EINVAL;
491
492 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
493 && (vma->vm_flags & VM_READ))
494 return -EINVAL;
495
496 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
497 && (vma->vm_flags & VM_WRITE))
498 return -EINVAL;
499
500 vma->vm_private_data = mc_dev;
501
502 return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
503}
504
505static const struct vfio_device_ops vfio_fsl_mc_ops = {
506 .name = "vfio-fsl-mc",
507 .open = vfio_fsl_mc_open,
508 .release = vfio_fsl_mc_release,
509 .ioctl = vfio_fsl_mc_ioctl,
510 .read = vfio_fsl_mc_read,
511 .write = vfio_fsl_mc_write,
512 .mmap = vfio_fsl_mc_mmap,
513};
514
515static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
516 unsigned long action, void *data)
517{
518 struct vfio_fsl_mc_device *vdev = container_of(nb,
519 struct vfio_fsl_mc_device, nb);
520 struct device *dev = data;
521 struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
522 struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
523
524 if (action == BUS_NOTIFY_ADD_DEVICE &&
525 vdev->mc_dev == mc_cont) {
526 mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
527 vfio_fsl_mc_ops.name);
528 if (!mc_dev->driver_override)
529 dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
530 dev_name(&mc_cont->dev));
531 else
532 dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
533 dev_name(&mc_cont->dev));
534 } else if (action == BUS_NOTIFY_BOUND_DRIVER &&
535 vdev->mc_dev == mc_cont) {
536 struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
537
538 if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
539 dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
540 dev_name(dev), mc_drv->driver.name);
541 }
542
543 return 0;
544}
545
546static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
547{
548 struct fsl_mc_device *mc_dev = vdev->mc_dev;
549 int ret;
550
551
552 if (!is_fsl_mc_bus_dprc(mc_dev)) {
553 struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
554
555 mc_dev->mc_io = mc_cont->mc_io;
556 return 0;
557 }
558
559 vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
560 ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
561 if (ret)
562 return ret;
563
564
565 ret = dprc_setup(mc_dev);
566 if (ret) {
567 dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
568 goto out_nc_unreg;
569 }
570 return 0;
571
572out_nc_unreg:
573 bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
574 return ret;
575}
576
577static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
578{
579 int ret;
580
581
582 if (!is_fsl_mc_bus_dprc(mc_dev))
583 return 0;
584 ret = dprc_scan_container(mc_dev, false);
585 if (ret) {
586 dev_err(&mc_dev->dev,
587 "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
588 dprc_remove_devices(mc_dev, NULL, 0);
589 return ret;
590 }
591 return 0;
592}
593
594static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
595{
596 struct fsl_mc_device *mc_dev = vdev->mc_dev;
597
598 if (!is_fsl_mc_bus_dprc(mc_dev))
599 return;
600
601 dprc_cleanup(mc_dev);
602 bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
603}
604
605static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
606{
607 struct iommu_group *group;
608 struct vfio_fsl_mc_device *vdev;
609 struct device *dev = &mc_dev->dev;
610 int ret;
611
612 group = vfio_iommu_group_get(dev);
613 if (!group) {
614 dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
615 return -EINVAL;
616 }
617
618 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
619 if (!vdev) {
620 ret = -ENOMEM;
621 goto out_group_put;
622 }
623
624 vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops);
625 vdev->mc_dev = mc_dev;
626 mutex_init(&vdev->igate);
627
628 ret = vfio_fsl_mc_reflck_attach(vdev);
629 if (ret)
630 goto out_kfree;
631
632 ret = vfio_fsl_mc_init_device(vdev);
633 if (ret)
634 goto out_reflck;
635
636 ret = vfio_register_group_dev(&vdev->vdev);
637 if (ret) {
638 dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
639 goto out_device;
640 }
641
642
643
644
645
646
647
648 ret = vfio_fsl_mc_scan_container(mc_dev);
649 if (ret)
650 goto out_group_dev;
651 dev_set_drvdata(dev, vdev);
652 return 0;
653
654out_group_dev:
655 vfio_unregister_group_dev(&vdev->vdev);
656out_device:
657 vfio_fsl_uninit_device(vdev);
658out_reflck:
659 vfio_fsl_mc_reflck_put(vdev->reflck);
660out_kfree:
661 kfree(vdev);
662out_group_put:
663 vfio_iommu_group_put(group, dev);
664 return ret;
665}
666
667static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
668{
669 struct device *dev = &mc_dev->dev;
670 struct vfio_fsl_mc_device *vdev = dev_get_drvdata(dev);
671
672 vfio_unregister_group_dev(&vdev->vdev);
673 mutex_destroy(&vdev->igate);
674
675 dprc_remove_devices(mc_dev, NULL, 0);
676 vfio_fsl_uninit_device(vdev);
677 vfio_fsl_mc_reflck_put(vdev->reflck);
678
679 kfree(vdev);
680 vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
681
682 return 0;
683}
684
685static struct fsl_mc_driver vfio_fsl_mc_driver = {
686 .probe = vfio_fsl_mc_probe,
687 .remove = vfio_fsl_mc_remove,
688 .driver = {
689 .name = "vfio-fsl-mc",
690 .owner = THIS_MODULE,
691 },
692};
693
694static int __init vfio_fsl_mc_driver_init(void)
695{
696 return fsl_mc_driver_register(&vfio_fsl_mc_driver);
697}
698
699static void __exit vfio_fsl_mc_driver_exit(void)
700{
701 fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
702}
703
704module_init(vfio_fsl_mc_driver_init);
705module_exit(vfio_fsl_mc_driver_exit);
706
707MODULE_LICENSE("Dual BSD/GPL");
708MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
709