1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/cpu.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/stat.h>
17#include <linux/device.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/console.h>
21#include <linux/export.h>
22#include <linux/mm.h>
23#include <linux/dma-map-ops.h>
24#include <linux/kobject.h>
25#include <linux/kexec.h>
26
27#include <asm/iommu.h>
28#include <asm/dma.h>
29#include <asm/vio.h>
30#include <asm/prom.h>
31#include <asm/firmware.h>
32#include <asm/tce.h>
33#include <asm/page.h>
34#include <asm/hvcall.h>
35#include <asm/machdep.h>
36
37static struct vio_dev vio_bus_device = {
38 .name = "vio",
39 .type = "",
40 .dev.init_name = "vio",
41 .dev.bus = &vio_bus_type,
42};
43
44#ifdef CONFIG_PPC_SMLPAR
45
46
47
48
49
50
51struct vio_cmo_pool {
52 size_t size;
53 size_t free;
54};
55
56
57#define VIO_CMO_BALANCE_DELAY 100
58
59
60#define VIO_CMO_BALANCE_CHUNK 131072
61
62
63
64
65
66
67
68struct vio_cmo_dev_entry {
69 struct vio_dev *viodev;
70 struct list_head list;
71};
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88static struct vio_cmo {
89 spinlock_t lock;
90 struct delayed_work balance_q;
91 struct list_head device_list;
92 size_t entitled;
93 struct vio_cmo_pool reserve;
94 struct vio_cmo_pool excess;
95 size_t spare;
96 size_t min;
97 size_t desired;
98 size_t curr;
99 size_t high;
100} vio_cmo;
101
102
103
104
105static int vio_cmo_num_OF_devs(void)
106{
107 struct device_node *node_vroot;
108 int count = 0;
109
110
111
112
113
114 node_vroot = of_find_node_by_name(NULL, "vdevice");
115 if (node_vroot) {
116 struct device_node *of_node;
117 struct property *prop;
118
119 for_each_child_of_node(node_vroot, of_node) {
120 prop = of_find_property(of_node, "ibm,my-dma-window",
121 NULL);
122 if (prop)
123 count++;
124 }
125 }
126 of_node_put(node_vroot);
127 return count;
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
145{
146 unsigned long flags;
147 size_t reserve_free = 0;
148 size_t excess_free = 0;
149 int ret = -ENOMEM;
150
151 spin_lock_irqsave(&vio_cmo.lock, flags);
152
153
154 if (viodev->cmo.entitled > viodev->cmo.allocated)
155 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
156
157
158 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
159 excess_free = vio_cmo.excess.free;
160
161
162 if ((reserve_free + excess_free) >= size) {
163 vio_cmo.curr += size;
164 if (vio_cmo.curr > vio_cmo.high)
165 vio_cmo.high = vio_cmo.curr;
166 viodev->cmo.allocated += size;
167 size -= min(reserve_free, size);
168 vio_cmo.excess.free -= size;
169 ret = 0;
170 }
171
172 spin_unlock_irqrestore(&vio_cmo.lock, flags);
173 return ret;
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
189{
190 unsigned long flags;
191 size_t spare_needed = 0;
192 size_t excess_freed = 0;
193 size_t reserve_freed = size;
194 size_t tmp;
195 int balance = 0;
196
197 spin_lock_irqsave(&vio_cmo.lock, flags);
198 vio_cmo.curr -= size;
199
200
201 if (viodev->cmo.allocated > viodev->cmo.entitled) {
202 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
203 viodev->cmo.entitled));
204 reserve_freed -= excess_freed;
205 }
206
207
208 viodev->cmo.allocated -= (reserve_freed + excess_freed);
209
210
211 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
212
213
214
215
216
217 if (spare_needed && excess_freed) {
218 tmp = min(excess_freed, spare_needed);
219 vio_cmo.excess.size -= tmp;
220 vio_cmo.reserve.size += tmp;
221 vio_cmo.spare += tmp;
222 excess_freed -= tmp;
223 spare_needed -= tmp;
224 balance = 1;
225 }
226
227
228
229
230
231
232
233 if (spare_needed && reserve_freed) {
234 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
235
236 vio_cmo.spare += tmp;
237 viodev->cmo.entitled -= tmp;
238 reserve_freed -= tmp;
239 spare_needed -= tmp;
240 balance = 1;
241 }
242
243
244
245
246
247
248 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
249 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
250
251 vio_cmo.excess.size -= tmp;
252 vio_cmo.reserve.size += tmp;
253 excess_freed -= tmp;
254 balance = 1;
255 }
256
257
258 if (excess_freed)
259 vio_cmo.excess.free += excess_freed;
260
261 if (balance)
262 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
263 spin_unlock_irqrestore(&vio_cmo.lock, flags);
264}
265
266
267
268
269
270
271
272
273
274
275
276
277int vio_cmo_entitlement_update(size_t new_entitlement)
278{
279 struct vio_dev *viodev;
280 struct vio_cmo_dev_entry *dev_ent;
281 unsigned long flags;
282 size_t avail, delta, tmp;
283
284 spin_lock_irqsave(&vio_cmo.lock, flags);
285
286
287 if (new_entitlement > vio_cmo.entitled) {
288 delta = new_entitlement - vio_cmo.entitled;
289
290
291 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
292 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
293 vio_cmo.spare += tmp;
294 vio_cmo.reserve.size += tmp;
295 delta -= tmp;
296 }
297
298
299 vio_cmo.entitled += delta;
300 vio_cmo.excess.size += delta;
301 vio_cmo.excess.free += delta;
302
303 goto out;
304 }
305
306
307 delta = vio_cmo.entitled - new_entitlement;
308 avail = vio_cmo.excess.free;
309
310
311
312
313
314 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
315 if (avail >= delta)
316 break;
317
318 viodev = dev_ent->viodev;
319 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
320 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
321 avail += viodev->cmo.entitled -
322 max_t(size_t, viodev->cmo.allocated,
323 VIO_CMO_MIN_ENT);
324 }
325
326 if (delta <= avail) {
327 vio_cmo.entitled -= delta;
328
329
330 tmp = min(vio_cmo.excess.free, delta);
331 vio_cmo.excess.size -= tmp;
332 vio_cmo.excess.free -= tmp;
333 delta -= tmp;
334
335
336
337
338
339 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
340 if (!delta)
341 break;
342
343 viodev = dev_ent->viodev;
344 tmp = 0;
345 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
346 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
347 tmp = viodev->cmo.entitled -
348 max_t(size_t, viodev->cmo.allocated,
349 VIO_CMO_MIN_ENT);
350 viodev->cmo.entitled -= min(tmp, delta);
351 delta -= min(tmp, delta);
352 }
353 } else {
354 spin_unlock_irqrestore(&vio_cmo.lock, flags);
355 return -ENOMEM;
356 }
357
358out:
359 schedule_delayed_work(&vio_cmo.balance_q, 0);
360 spin_unlock_irqrestore(&vio_cmo.lock, flags);
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385static void vio_cmo_balance(struct work_struct *work)
386{
387 struct vio_cmo *cmo;
388 struct vio_dev *viodev;
389 struct vio_cmo_dev_entry *dev_ent;
390 unsigned long flags;
391 size_t avail = 0, level, chunk, need;
392 int devcount = 0, fulfilled;
393
394 cmo = container_of(work, struct vio_cmo, balance_q.work);
395
396 spin_lock_irqsave(&vio_cmo.lock, flags);
397
398
399 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
400 BUG_ON(cmo->min > cmo->entitled);
401 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
402 cmo->min += cmo->spare;
403 cmo->desired = cmo->min;
404
405
406
407
408
409 avail = cmo->entitled - cmo->spare;
410 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
411 viodev = dev_ent->viodev;
412 devcount++;
413 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
414 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
415 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
416 }
417
418
419
420
421
422
423 level = VIO_CMO_MIN_ENT;
424 while (avail) {
425 fulfilled = 0;
426 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
427 viodev = dev_ent->viodev;
428
429 if (viodev->cmo.desired <= level) {
430 fulfilled++;
431 continue;
432 }
433
434
435
436
437
438
439 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
440 chunk = min(chunk, (viodev->cmo.desired -
441 viodev->cmo.entitled));
442 viodev->cmo.entitled += chunk;
443
444
445
446
447
448
449 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
450 max(viodev->cmo.allocated, level);
451 avail -= need;
452
453 }
454 if (fulfilled == devcount)
455 break;
456 level += VIO_CMO_BALANCE_CHUNK;
457 }
458
459
460 cmo->reserve.size = cmo->min;
461 cmo->excess.free = 0;
462 cmo->excess.size = 0;
463 need = 0;
464 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
465 viodev = dev_ent->viodev;
466
467 if (viodev->cmo.entitled)
468 cmo->reserve.size += (viodev->cmo.entitled -
469 VIO_CMO_MIN_ENT);
470
471 if (viodev->cmo.allocated > viodev->cmo.entitled)
472 need += viodev->cmo.allocated - viodev->cmo.entitled;
473 }
474 cmo->excess.size = cmo->entitled - cmo->reserve.size;
475 cmo->excess.free = cmo->excess.size - need;
476
477 cancel_delayed_work(to_delayed_work(work));
478 spin_unlock_irqrestore(&vio_cmo.lock, flags);
479}
480
481static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
482 dma_addr_t *dma_handle, gfp_t flag,
483 unsigned long attrs)
484{
485 struct vio_dev *viodev = to_vio_dev(dev);
486 void *ret;
487
488 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
489 atomic_inc(&viodev->cmo.allocs_failed);
490 return NULL;
491 }
492
493 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
494 dma_handle, dev->coherent_dma_mask, flag,
495 dev_to_node(dev));
496 if (unlikely(ret == NULL)) {
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
498 atomic_inc(&viodev->cmo.allocs_failed);
499 }
500
501 return ret;
502}
503
504static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
505 void *vaddr, dma_addr_t dma_handle,
506 unsigned long attrs)
507{
508 struct vio_dev *viodev = to_vio_dev(dev);
509
510 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
511 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
512}
513
514static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
515 unsigned long offset, size_t size,
516 enum dma_data_direction direction,
517 unsigned long attrs)
518{
519 struct vio_dev *viodev = to_vio_dev(dev);
520 struct iommu_table *tbl = get_iommu_table_base(dev);
521 dma_addr_t ret = DMA_MAPPING_ERROR;
522
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
524 goto out_fail;
525 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
526 direction, attrs);
527 if (unlikely(ret == DMA_MAPPING_ERROR))
528 goto out_deallocate;
529 return ret;
530
531out_deallocate:
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533out_fail:
534 atomic_inc(&viodev->cmo.allocs_failed);
535 return DMA_MAPPING_ERROR;
536}
537
538static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
539 size_t size,
540 enum dma_data_direction direction,
541 unsigned long attrs)
542{
543 struct vio_dev *viodev = to_vio_dev(dev);
544 struct iommu_table *tbl = get_iommu_table_base(dev);
545
546 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
547 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
548}
549
550static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
551 int nelems, enum dma_data_direction direction,
552 unsigned long attrs)
553{
554 struct vio_dev *viodev = to_vio_dev(dev);
555 struct iommu_table *tbl = get_iommu_table_base(dev);
556 struct scatterlist *sgl;
557 int ret, count;
558 size_t alloc_size = 0;
559
560 for_each_sg(sglist, sgl, nelems, count)
561 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
562
563 if (vio_cmo_alloc(viodev, alloc_size))
564 goto out_fail;
565 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
566 direction, attrs);
567 if (unlikely(!ret))
568 goto out_deallocate;
569
570 for_each_sg(sglist, sgl, ret, count)
571 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
572 if (alloc_size)
573 vio_cmo_dealloc(viodev, alloc_size);
574 return ret;
575
576out_deallocate:
577 vio_cmo_dealloc(viodev, alloc_size);
578out_fail:
579 atomic_inc(&viodev->cmo.allocs_failed);
580 return 0;
581}
582
583static void vio_dma_iommu_unmap_sg(struct device *dev,
584 struct scatterlist *sglist, int nelems,
585 enum dma_data_direction direction,
586 unsigned long attrs)
587{
588 struct vio_dev *viodev = to_vio_dev(dev);
589 struct iommu_table *tbl = get_iommu_table_base(dev);
590 struct scatterlist *sgl;
591 size_t alloc_size = 0;
592 int count;
593
594 for_each_sg(sglist, sgl, nelems, count)
595 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
596
597 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
598 vio_cmo_dealloc(viodev, alloc_size);
599}
600
601static const struct dma_map_ops vio_dma_mapping_ops = {
602 .alloc = vio_dma_iommu_alloc_coherent,
603 .free = vio_dma_iommu_free_coherent,
604 .map_sg = vio_dma_iommu_map_sg,
605 .unmap_sg = vio_dma_iommu_unmap_sg,
606 .map_page = vio_dma_iommu_map_page,
607 .unmap_page = vio_dma_iommu_unmap_page,
608 .dma_supported = dma_iommu_dma_supported,
609 .get_required_mask = dma_iommu_get_required_mask,
610 .mmap = dma_common_mmap,
611 .get_sgtable = dma_common_get_sgtable,
612 .alloc_pages = dma_common_alloc_pages,
613 .free_pages = dma_common_free_pages,
614};
615
616
617
618
619
620
621
622
623
624
625
626void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
627{
628 unsigned long flags;
629 struct vio_cmo_dev_entry *dev_ent;
630 int found = 0;
631
632 if (!firmware_has_feature(FW_FEATURE_CMO))
633 return;
634
635 spin_lock_irqsave(&vio_cmo.lock, flags);
636 if (desired < VIO_CMO_MIN_ENT)
637 desired = VIO_CMO_MIN_ENT;
638
639
640
641
642
643
644 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
645 if (viodev == dev_ent->viodev) {
646 found = 1;
647 break;
648 }
649 if (!found) {
650 spin_unlock_irqrestore(&vio_cmo.lock, flags);
651 return;
652 }
653
654
655 if (desired >= viodev->cmo.desired) {
656
657 vio_cmo.desired += desired - viodev->cmo.desired;
658 viodev->cmo.desired = desired;
659 } else {
660
661 vio_cmo.desired -= viodev->cmo.desired - desired;
662 viodev->cmo.desired = desired;
663
664
665
666
667 if (viodev->cmo.entitled > desired) {
668 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
669 vio_cmo.excess.size += viodev->cmo.entitled - desired;
670
671
672
673
674
675 if (viodev->cmo.allocated < viodev->cmo.entitled)
676 vio_cmo.excess.free += viodev->cmo.entitled -
677 max(viodev->cmo.allocated, desired);
678 viodev->cmo.entitled = desired;
679 }
680 }
681 schedule_delayed_work(&vio_cmo.balance_q, 0);
682 spin_unlock_irqrestore(&vio_cmo.lock, flags);
683}
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699static int vio_cmo_bus_probe(struct vio_dev *viodev)
700{
701 struct vio_cmo_dev_entry *dev_ent;
702 struct device *dev = &viodev->dev;
703 struct iommu_table *tbl;
704 struct vio_driver *viodrv = to_vio_driver(dev->driver);
705 unsigned long flags;
706 size_t size;
707 bool dma_capable = false;
708
709 tbl = get_iommu_table_base(dev);
710
711
712 switch (viodev->family) {
713 case VDEVICE:
714 if (of_get_property(viodev->dev.of_node,
715 "ibm,my-dma-window", NULL))
716 dma_capable = true;
717 break;
718 case PFO:
719 dma_capable = false;
720 break;
721 default:
722 dev_warn(dev, "unknown device family: %d\n", viodev->family);
723 BUG();
724 break;
725 }
726
727
728 if (dma_capable) {
729
730 if (!viodrv->get_desired_dma) {
731 dev_err(dev, "%s: device driver does not support CMO\n",
732 __func__);
733 return -EINVAL;
734 }
735
736 viodev->cmo.desired =
737 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
738 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
739 viodev->cmo.desired = VIO_CMO_MIN_ENT;
740 size = VIO_CMO_MIN_ENT;
741
742 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
743 GFP_KERNEL);
744 if (!dev_ent)
745 return -ENOMEM;
746
747 dev_ent->viodev = viodev;
748 spin_lock_irqsave(&vio_cmo.lock, flags);
749 list_add(&dev_ent->list, &vio_cmo.device_list);
750 } else {
751 viodev->cmo.desired = 0;
752 size = 0;
753 spin_lock_irqsave(&vio_cmo.lock, flags);
754 }
755
756
757
758
759
760
761
762 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
763 VIO_CMO_MIN_ENT)) {
764
765 if (size)
766 vio_cmo.desired += (viodev->cmo.desired -
767 VIO_CMO_MIN_ENT);
768 } else {
769 size_t tmp;
770
771 tmp = vio_cmo.spare + vio_cmo.excess.free;
772 if (tmp < size) {
773 dev_err(dev, "%s: insufficient free "
774 "entitlement to add device. "
775 "Need %lu, have %lu\n", __func__,
776 size, (vio_cmo.spare + tmp));
777 spin_unlock_irqrestore(&vio_cmo.lock, flags);
778 return -ENOMEM;
779 }
780
781
782 tmp = min(size, vio_cmo.excess.free);
783 vio_cmo.excess.free -= tmp;
784 vio_cmo.excess.size -= tmp;
785 vio_cmo.reserve.size += tmp;
786
787
788 vio_cmo.spare -= size - tmp;
789
790
791 vio_cmo.min += size;
792 vio_cmo.desired += viodev->cmo.desired;
793 }
794 spin_unlock_irqrestore(&vio_cmo.lock, flags);
795 return 0;
796}
797
798
799
800
801
802
803
804
805
806
807
808static void vio_cmo_bus_remove(struct vio_dev *viodev)
809{
810 struct vio_cmo_dev_entry *dev_ent;
811 unsigned long flags;
812 size_t tmp;
813
814 spin_lock_irqsave(&vio_cmo.lock, flags);
815 if (viodev->cmo.allocated) {
816 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
817 "allocated after remove operation.\n",
818 __func__, viodev->cmo.allocated);
819 BUG();
820 }
821
822
823
824
825
826 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
827 if (viodev == dev_ent->viodev) {
828 list_del(&dev_ent->list);
829 kfree(dev_ent);
830 break;
831 }
832
833
834
835
836
837
838 if (viodev->cmo.entitled) {
839
840
841
842
843
844 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
845
846
847
848
849
850
851 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
852
853
854 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
855 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
856 vio_cmo.spare));
857 vio_cmo.spare += tmp;
858 viodev->cmo.entitled -= tmp;
859 }
860
861
862 vio_cmo.excess.size += viodev->cmo.entitled;
863 vio_cmo.excess.free += viodev->cmo.entitled;
864 vio_cmo.reserve.size -= viodev->cmo.entitled;
865
866
867
868
869
870
871 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
872 viodev->cmo.desired = VIO_CMO_MIN_ENT;
873 atomic_set(&viodev->cmo.allocs_failed, 0);
874 }
875
876 spin_unlock_irqrestore(&vio_cmo.lock, flags);
877}
878
879static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
880{
881 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
882}
883
884
885
886
887
888
889
890
891static void vio_cmo_bus_init(void)
892{
893 struct hvcall_mpp_data mpp_data;
894 int err;
895
896 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
897 spin_lock_init(&vio_cmo.lock);
898 INIT_LIST_HEAD(&vio_cmo.device_list);
899 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
900
901
902 err = h_get_mpp(&mpp_data);
903
904
905
906
907
908 if (err != H_SUCCESS) {
909 printk(KERN_ERR "%s: unable to determine system IO "\
910 "entitlement. (%d)\n", __func__, err);
911 vio_cmo.entitled = 0;
912 } else {
913 vio_cmo.entitled = mpp_data.entitled_mem;
914 }
915
916
917 vio_cmo.spare = VIO_CMO_MIN_ENT;
918 vio_cmo.reserve.size = vio_cmo.spare;
919 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
920 VIO_CMO_MIN_ENT);
921 if (vio_cmo.reserve.size > vio_cmo.entitled) {
922 printk(KERN_ERR "%s: insufficient system entitlement\n",
923 __func__);
924 panic("%s: Insufficient system entitlement", __func__);
925 }
926
927
928 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
929 vio_cmo.excess.free = vio_cmo.excess.size;
930 vio_cmo.min = vio_cmo.reserve.size;
931 vio_cmo.desired = vio_cmo.reserve.size;
932}
933
934
935
936#define viodev_cmo_rd_attr(name) \
937static ssize_t cmo_##name##_show(struct device *dev, \
938 struct device_attribute *attr, \
939 char *buf) \
940{ \
941 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
942}
943
944static ssize_t cmo_allocs_failed_show(struct device *dev,
945 struct device_attribute *attr, char *buf)
946{
947 struct vio_dev *viodev = to_vio_dev(dev);
948 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
949}
950
951static ssize_t cmo_allocs_failed_store(struct device *dev,
952 struct device_attribute *attr, const char *buf, size_t count)
953{
954 struct vio_dev *viodev = to_vio_dev(dev);
955 atomic_set(&viodev->cmo.allocs_failed, 0);
956 return count;
957}
958
959static ssize_t cmo_desired_store(struct device *dev,
960 struct device_attribute *attr, const char *buf, size_t count)
961{
962 struct vio_dev *viodev = to_vio_dev(dev);
963 size_t new_desired;
964 int ret;
965
966 ret = kstrtoul(buf, 10, &new_desired);
967 if (ret)
968 return ret;
969
970 vio_cmo_set_dev_desired(viodev, new_desired);
971 return count;
972}
973
974viodev_cmo_rd_attr(desired);
975viodev_cmo_rd_attr(entitled);
976viodev_cmo_rd_attr(allocated);
977
978static ssize_t name_show(struct device *, struct device_attribute *, char *);
979static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
980static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
981 char *buf);
982
983static struct device_attribute dev_attr_name;
984static struct device_attribute dev_attr_devspec;
985static struct device_attribute dev_attr_modalias;
986
987static DEVICE_ATTR_RO(cmo_entitled);
988static DEVICE_ATTR_RO(cmo_allocated);
989static DEVICE_ATTR_RW(cmo_desired);
990static DEVICE_ATTR_RW(cmo_allocs_failed);
991
992static struct attribute *vio_cmo_dev_attrs[] = {
993 &dev_attr_name.attr,
994 &dev_attr_devspec.attr,
995 &dev_attr_modalias.attr,
996 &dev_attr_cmo_entitled.attr,
997 &dev_attr_cmo_allocated.attr,
998 &dev_attr_cmo_desired.attr,
999 &dev_attr_cmo_allocs_failed.attr,
1000 NULL,
1001};
1002ATTRIBUTE_GROUPS(vio_cmo_dev);
1003
1004
1005
1006#define viobus_cmo_rd_attr(name) \
1007static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1008{ \
1009 return sprintf(buf, "%lu\n", vio_cmo.name); \
1010} \
1011static struct bus_attribute bus_attr_cmo_bus_##name = \
1012 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1013
1014#define viobus_cmo_pool_rd_attr(name, var) \
1015static ssize_t \
1016cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1017{ \
1018 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1019} \
1020static BUS_ATTR_RO(cmo_##name##_##var)
1021
1022viobus_cmo_rd_attr(entitled);
1023viobus_cmo_rd_attr(spare);
1024viobus_cmo_rd_attr(min);
1025viobus_cmo_rd_attr(desired);
1026viobus_cmo_rd_attr(curr);
1027viobus_cmo_pool_rd_attr(reserve, size);
1028viobus_cmo_pool_rd_attr(excess, size);
1029viobus_cmo_pool_rd_attr(excess, free);
1030
1031static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1032{
1033 return sprintf(buf, "%lu\n", vio_cmo.high);
1034}
1035
1036static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1037 size_t count)
1038{
1039 unsigned long flags;
1040
1041 spin_lock_irqsave(&vio_cmo.lock, flags);
1042 vio_cmo.high = vio_cmo.curr;
1043 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1044
1045 return count;
1046}
1047static BUS_ATTR_RW(cmo_high);
1048
1049static struct attribute *vio_bus_attrs[] = {
1050 &bus_attr_cmo_bus_entitled.attr,
1051 &bus_attr_cmo_bus_spare.attr,
1052 &bus_attr_cmo_bus_min.attr,
1053 &bus_attr_cmo_bus_desired.attr,
1054 &bus_attr_cmo_bus_curr.attr,
1055 &bus_attr_cmo_high.attr,
1056 &bus_attr_cmo_reserve_size.attr,
1057 &bus_attr_cmo_excess_size.attr,
1058 &bus_attr_cmo_excess_free.attr,
1059 NULL,
1060};
1061ATTRIBUTE_GROUPS(vio_bus);
1062
1063static void vio_cmo_sysfs_init(void)
1064{
1065 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1066 vio_bus_type.bus_groups = vio_bus_groups;
1067}
1068#else
1069int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1070void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1071static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1072static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1073static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1074static void vio_cmo_bus_init(void) {}
1075static void vio_cmo_sysfs_init(void) { }
1076#endif
1077EXPORT_SYMBOL(vio_cmo_entitlement_update);
1078EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1111{
1112 struct device *dev = &vdev->dev;
1113 unsigned long deadline = 0;
1114 long hret = 0;
1115 int ret = 0;
1116
1117 if (op->timeout)
1118 deadline = jiffies + msecs_to_jiffies(op->timeout);
1119
1120 while (true) {
1121 hret = plpar_hcall_norets(H_COP, op->flags,
1122 vdev->resource_id,
1123 op->in, op->inlen, op->out,
1124 op->outlen, op->csbcpb);
1125
1126 if (hret == H_SUCCESS ||
1127 (hret != H_NOT_ENOUGH_RESOURCES &&
1128 hret != H_BUSY && hret != H_RESOURCE) ||
1129 (op->timeout && time_after(deadline, jiffies)))
1130 break;
1131
1132 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1133 }
1134
1135 switch (hret) {
1136 case H_SUCCESS:
1137 ret = 0;
1138 break;
1139 case H_OP_MODE:
1140 case H_TOO_BIG:
1141 ret = -E2BIG;
1142 break;
1143 case H_RESCINDED:
1144 ret = -EACCES;
1145 break;
1146 case H_HARDWARE:
1147 ret = -EPERM;
1148 break;
1149 case H_NOT_ENOUGH_RESOURCES:
1150 case H_RESOURCE:
1151 case H_BUSY:
1152 ret = -EBUSY;
1153 break;
1154 default:
1155 ret = -EINVAL;
1156 break;
1157 }
1158
1159 if (ret)
1160 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1161 __func__, ret, hret);
1162
1163 op->hcall_err = hret;
1164 return ret;
1165}
1166EXPORT_SYMBOL(vio_h_cop_sync);
1167
1168static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1169{
1170 const __be32 *dma_window;
1171 struct iommu_table *tbl;
1172 unsigned long offset, size;
1173
1174 dma_window = of_get_property(dev->dev.of_node,
1175 "ibm,my-dma-window", NULL);
1176 if (!dma_window)
1177 return NULL;
1178
1179 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1180 if (tbl == NULL)
1181 return NULL;
1182
1183 kref_init(&tbl->it_kref);
1184
1185 of_parse_dma_window(dev->dev.of_node, dma_window,
1186 &tbl->it_index, &offset, &size);
1187
1188
1189 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1190 tbl->it_size = size >> tbl->it_page_shift;
1191
1192 tbl->it_offset = offset >> tbl->it_page_shift;
1193 tbl->it_busno = 0;
1194 tbl->it_type = TCE_VB;
1195 tbl->it_blocksize = 16;
1196
1197 if (firmware_has_feature(FW_FEATURE_LPAR))
1198 tbl->it_ops = &iommu_table_lpar_multi_ops;
1199 else
1200 tbl->it_ops = &iommu_table_pseries_ops;
1201
1202 return iommu_init_table(tbl, -1, 0, 0);
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215static const struct vio_device_id *vio_match_device(
1216 const struct vio_device_id *ids, const struct vio_dev *dev)
1217{
1218 while (ids->type[0] != '\0') {
1219 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1220 of_device_is_compatible(dev->dev.of_node,
1221 ids->compat))
1222 return ids;
1223 ids++;
1224 }
1225 return NULL;
1226}
1227
1228
1229
1230
1231
1232
1233static int vio_bus_probe(struct device *dev)
1234{
1235 struct vio_dev *viodev = to_vio_dev(dev);
1236 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1237 const struct vio_device_id *id;
1238 int error = -ENODEV;
1239
1240 if (!viodrv->probe)
1241 return error;
1242
1243 id = vio_match_device(viodrv->id_table, viodev);
1244 if (id) {
1245 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1246 if (firmware_has_feature(FW_FEATURE_CMO)) {
1247 error = vio_cmo_bus_probe(viodev);
1248 if (error)
1249 return error;
1250 }
1251 error = viodrv->probe(viodev, id);
1252 if (error && firmware_has_feature(FW_FEATURE_CMO))
1253 vio_cmo_bus_remove(viodev);
1254 }
1255
1256 return error;
1257}
1258
1259
1260static int vio_bus_remove(struct device *dev)
1261{
1262 struct vio_dev *viodev = to_vio_dev(dev);
1263 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1264 struct device *devptr;
1265
1266
1267
1268
1269
1270 devptr = get_device(dev);
1271
1272 if (viodrv->remove)
1273 viodrv->remove(viodev);
1274
1275 if (firmware_has_feature(FW_FEATURE_CMO))
1276 vio_cmo_bus_remove(viodev);
1277
1278 put_device(devptr);
1279 return 0;
1280}
1281
1282static void vio_bus_shutdown(struct device *dev)
1283{
1284 struct vio_dev *viodev = to_vio_dev(dev);
1285 struct vio_driver *viodrv;
1286
1287 if (dev->driver) {
1288 viodrv = to_vio_driver(dev->driver);
1289 if (viodrv->shutdown)
1290 viodrv->shutdown(viodev);
1291 else if (kexec_in_progress)
1292 vio_bus_remove(dev);
1293 }
1294}
1295
1296
1297
1298
1299
1300int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1301 const char *mod_name)
1302{
1303
1304 if (!machine_is(pseries))
1305 return -ENODEV;
1306
1307 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1308
1309
1310 viodrv->driver.name = viodrv->name;
1311 viodrv->driver.pm = viodrv->pm;
1312 viodrv->driver.bus = &vio_bus_type;
1313 viodrv->driver.owner = owner;
1314 viodrv->driver.mod_name = mod_name;
1315
1316 return driver_register(&viodrv->driver);
1317}
1318EXPORT_SYMBOL(__vio_register_driver);
1319
1320
1321
1322
1323
1324void vio_unregister_driver(struct vio_driver *viodrv)
1325{
1326 driver_unregister(&viodrv->driver);
1327}
1328EXPORT_SYMBOL(vio_unregister_driver);
1329
1330
1331static void vio_dev_release(struct device *dev)
1332{
1333 struct iommu_table *tbl = get_iommu_table_base(dev);
1334
1335 if (tbl)
1336 iommu_tce_table_put(tbl);
1337 of_node_put(dev->of_node);
1338 kfree(to_vio_dev(dev));
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350struct vio_dev *vio_register_device_node(struct device_node *of_node)
1351{
1352 struct vio_dev *viodev;
1353 struct device_node *parent_node;
1354 const __be32 *prop;
1355 enum vio_dev_family family;
1356
1357
1358
1359
1360
1361 parent_node = of_get_parent(of_node);
1362 if (parent_node) {
1363 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1364 family = PFO;
1365 else if (of_node_is_type(parent_node, "vdevice"))
1366 family = VDEVICE;
1367 else {
1368 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1369 __func__,
1370 parent_node,
1371 of_node);
1372 of_node_put(parent_node);
1373 return NULL;
1374 }
1375 of_node_put(parent_node);
1376 } else {
1377 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1378 __func__, of_node);
1379 return NULL;
1380 }
1381
1382 if (family == PFO) {
1383 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1384 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1385 __func__, of_node);
1386 return NULL;
1387 }
1388 }
1389
1390
1391 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1392 if (viodev == NULL) {
1393 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1394 return NULL;
1395 }
1396
1397
1398 viodev->family = family;
1399 if (viodev->family == VDEVICE) {
1400 unsigned int unit_address;
1401
1402 viodev->type = of_node_get_device_type(of_node);
1403 if (!viodev->type) {
1404 pr_warn("%s: node %pOFn is missing the 'device_type' "
1405 "property.\n", __func__, of_node);
1406 goto out;
1407 }
1408
1409 prop = of_get_property(of_node, "reg", NULL);
1410 if (prop == NULL) {
1411 pr_warn("%s: node %pOFn missing 'reg'\n",
1412 __func__, of_node);
1413 goto out;
1414 }
1415 unit_address = of_read_number(prop, 1);
1416 dev_set_name(&viodev->dev, "%x", unit_address);
1417 viodev->irq = irq_of_parse_and_map(of_node, 0);
1418 viodev->unit_address = unit_address;
1419 } else {
1420
1421
1422
1423 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1424 if (prop != NULL)
1425 viodev->resource_id = of_read_number(prop, 1);
1426
1427 dev_set_name(&viodev->dev, "%pOFn", of_node);
1428 viodev->type = dev_name(&viodev->dev);
1429 viodev->irq = 0;
1430 }
1431
1432 viodev->name = of_node->name;
1433 viodev->dev.of_node = of_node_get(of_node);
1434
1435 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1436
1437
1438 viodev->dev.parent = &vio_bus_device.dev;
1439 viodev->dev.bus = &vio_bus_type;
1440 viodev->dev.release = vio_dev_release;
1441
1442 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1443 if (firmware_has_feature(FW_FEATURE_CMO))
1444 vio_cmo_set_dma_ops(viodev);
1445 else
1446 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1447
1448 set_iommu_table_base(&viodev->dev,
1449 vio_build_iommu_table(viodev));
1450
1451
1452
1453 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1454 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1455 }
1456
1457
1458 if (device_register(&viodev->dev)) {
1459 printk(KERN_ERR "%s: failed to register device %s\n",
1460 __func__, dev_name(&viodev->dev));
1461 put_device(&viodev->dev);
1462 return NULL;
1463 }
1464
1465 return viodev;
1466
1467out:
1468 kfree(viodev);
1469
1470 return NULL;
1471}
1472EXPORT_SYMBOL(vio_register_device_node);
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static void vio_bus_scan_register_devices(char *root_name)
1483{
1484 struct device_node *node_root, *node_child;
1485
1486 if (!root_name)
1487 return;
1488
1489 node_root = of_find_node_by_name(NULL, root_name);
1490 if (node_root) {
1491
1492
1493
1494
1495
1496 node_child = of_get_next_child(node_root, NULL);
1497 while (node_child) {
1498 vio_register_device_node(node_child);
1499 node_child = of_get_next_child(node_root, node_child);
1500 }
1501 of_node_put(node_root);
1502 }
1503}
1504
1505
1506
1507
1508static int __init vio_bus_init(void)
1509{
1510 int err;
1511
1512 if (firmware_has_feature(FW_FEATURE_CMO))
1513 vio_cmo_sysfs_init();
1514
1515 err = bus_register(&vio_bus_type);
1516 if (err) {
1517 printk(KERN_ERR "failed to register VIO bus\n");
1518 return err;
1519 }
1520
1521
1522
1523
1524
1525 err = device_register(&vio_bus_device.dev);
1526 if (err) {
1527 printk(KERN_WARNING "%s: device_register returned %i\n",
1528 __func__, err);
1529 return err;
1530 }
1531
1532 if (firmware_has_feature(FW_FEATURE_CMO))
1533 vio_cmo_bus_init();
1534
1535 return 0;
1536}
1537machine_postcore_initcall(pseries, vio_bus_init);
1538
1539static int __init vio_device_init(void)
1540{
1541 vio_bus_scan_register_devices("vdevice");
1542 vio_bus_scan_register_devices("ibm,platform-facilities");
1543
1544 return 0;
1545}
1546machine_device_initcall(pseries, vio_device_init);
1547
1548static ssize_t name_show(struct device *dev,
1549 struct device_attribute *attr, char *buf)
1550{
1551 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1552}
1553static DEVICE_ATTR_RO(name);
1554
1555static ssize_t devspec_show(struct device *dev,
1556 struct device_attribute *attr, char *buf)
1557{
1558 struct device_node *of_node = dev->of_node;
1559
1560 return sprintf(buf, "%pOF\n", of_node);
1561}
1562static DEVICE_ATTR_RO(devspec);
1563
1564static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1565 char *buf)
1566{
1567 const struct vio_dev *vio_dev = to_vio_dev(dev);
1568 struct device_node *dn;
1569 const char *cp;
1570
1571 dn = dev->of_node;
1572 if (!dn) {
1573 strcpy(buf, "\n");
1574 return strlen(buf);
1575 }
1576 cp = of_get_property(dn, "compatible", NULL);
1577 if (!cp) {
1578 strcpy(buf, "\n");
1579 return strlen(buf);
1580 }
1581
1582 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1583}
1584static DEVICE_ATTR_RO(modalias);
1585
1586static struct attribute *vio_dev_attrs[] = {
1587 &dev_attr_name.attr,
1588 &dev_attr_devspec.attr,
1589 &dev_attr_modalias.attr,
1590 NULL,
1591};
1592ATTRIBUTE_GROUPS(vio_dev);
1593
1594void vio_unregister_device(struct vio_dev *viodev)
1595{
1596 device_unregister(&viodev->dev);
1597 if (viodev->family == VDEVICE)
1598 irq_dispose_mapping(viodev->irq);
1599}
1600EXPORT_SYMBOL(vio_unregister_device);
1601
1602static int vio_bus_match(struct device *dev, struct device_driver *drv)
1603{
1604 const struct vio_dev *vio_dev = to_vio_dev(dev);
1605 struct vio_driver *vio_drv = to_vio_driver(drv);
1606 const struct vio_device_id *ids = vio_drv->id_table;
1607
1608 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1609}
1610
1611static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1612{
1613 const struct vio_dev *vio_dev = to_vio_dev(dev);
1614 struct device_node *dn;
1615 const char *cp;
1616
1617 dn = dev->of_node;
1618 if (!dn)
1619 return -ENODEV;
1620 cp = of_get_property(dn, "compatible", NULL);
1621 if (!cp)
1622 return -ENODEV;
1623
1624 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1625 return 0;
1626}
1627
1628struct bus_type vio_bus_type = {
1629 .name = "vio",
1630 .dev_groups = vio_dev_groups,
1631 .uevent = vio_hotplug,
1632 .match = vio_bus_match,
1633 .probe = vio_bus_probe,
1634 .remove = vio_bus_remove,
1635 .shutdown = vio_bus_shutdown,
1636};
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1648{
1649 return of_get_property(vdev->dev.of_node, which, length);
1650}
1651EXPORT_SYMBOL(vio_get_attribute);
1652
1653
1654
1655
1656static struct vio_dev *vio_find_name(const char *name)
1657{
1658 struct device *found;
1659
1660 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1661 if (!found)
1662 return NULL;
1663
1664 return to_vio_dev(found);
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674struct vio_dev *vio_find_node(struct device_node *vnode)
1675{
1676 char kobj_name[20];
1677 struct device_node *vnode_parent;
1678
1679 vnode_parent = of_get_parent(vnode);
1680 if (!vnode_parent)
1681 return NULL;
1682
1683
1684 if (of_node_is_type(vnode_parent, "vdevice")) {
1685 const __be32 *prop;
1686
1687 prop = of_get_property(vnode, "reg", NULL);
1688 if (!prop)
1689 goto out;
1690 snprintf(kobj_name, sizeof(kobj_name), "%x",
1691 (uint32_t)of_read_number(prop, 1));
1692 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1693 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1694 else
1695 goto out;
1696
1697 of_node_put(vnode_parent);
1698 return vio_find_name(kobj_name);
1699out:
1700 of_node_put(vnode_parent);
1701 return NULL;
1702}
1703EXPORT_SYMBOL(vio_find_node);
1704
1705int vio_enable_interrupts(struct vio_dev *dev)
1706{
1707 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1708 if (rc != H_SUCCESS)
1709 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1710 return rc;
1711}
1712EXPORT_SYMBOL(vio_enable_interrupts);
1713
1714int vio_disable_interrupts(struct vio_dev *dev)
1715{
1716 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1717 if (rc != H_SUCCESS)
1718 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1719 return rc;
1720}
1721EXPORT_SYMBOL(vio_disable_interrupts);
1722
1723static int __init vio_init(void)
1724{
1725 dma_debug_add_bus(&vio_bus_type);
1726 return 0;
1727}
1728machine_fs_initcall(pseries, vio_init);
1729