1
2
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/highmem.h>
14#include <linux/gfp.h>
15#include <linux/slab.h>
16#include <linux/hugetlb.h>
17#include <linux/list.h>
18#include <linux/stringify.h>
19
20#include <asm/kvm_ppc.h>
21#include <asm/kvm_book3s.h>
22#include <asm/book3s/64/mmu-hash.h>
23#include <asm/mmu_context.h>
24#include <asm/hvcall.h>
25#include <asm/synch.h>
26#include <asm/ppc-opcode.h>
27#include <asm/udbg.h>
28#include <asm/iommu.h>
29#include <asm/tce.h>
30#include <asm/pte-walk.h>
31
32#ifdef CONFIG_BUG
33
34#define WARN_ON_ONCE_RM(condition) ({ \
35 static bool __section(".data.unlikely") __warned; \
36 int __ret_warn_once = !!(condition); \
37 \
38 if (unlikely(__ret_warn_once && !__warned)) { \
39 __warned = true; \
40 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
41 __stringify(condition), \
42 __func__, __LINE__); \
43 dump_stack(); \
44 } \
45 unlikely(__ret_warn_once); \
46})
47
48#else
49
50#define WARN_ON_ONCE_RM(condition) ({ \
51 int __ret_warn_on = !!(condition); \
52 unlikely(__ret_warn_on); \
53})
54
55#endif
56
57
58
59
60
61
62
63struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
64 unsigned long liobn)
65{
66 struct kvmppc_spapr_tce_table *stt;
67
68 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
69 if (stt->liobn == liobn)
70 return stt;
71
72 return NULL;
73}
74EXPORT_SYMBOL_GPL(kvmppc_find_table);
75
76#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
77static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
78 unsigned long tce, unsigned long *ua)
79{
80 unsigned long gfn = tce >> PAGE_SHIFT;
81 struct kvm_memory_slot *memslot;
82
83 memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
84 if (!memslot)
85 return -EINVAL;
86
87 *ua = __gfn_to_hva_memslot(memslot, gfn) |
88 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
89
90 return 0;
91}
92
93
94
95
96
97
98
99
100
101static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
102 unsigned long tce)
103{
104 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
105 enum dma_data_direction dir = iommu_tce_direction(tce);
106 struct kvmppc_spapr_tce_iommu_table *stit;
107 unsigned long ua = 0;
108
109
110 if (dir == DMA_NONE)
111 return H_SUCCESS;
112
113 if (iommu_tce_check_gpa(stt->page_shift, gpa))
114 return H_PARAMETER;
115
116 if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
117 return H_TOO_HARD;
118
119 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
120 unsigned long hpa = 0;
121 struct mm_iommu_table_group_mem_t *mem;
122 long shift = stit->tbl->it_page_shift;
123
124 mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
125 if (!mem)
126 return H_TOO_HARD;
127
128 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
129 return H_TOO_HARD;
130 }
131
132 return H_SUCCESS;
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154static u64 *kvmppc_page_address(struct page *page)
155{
156#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
157#error TODO: fix to avoid page_address() here
158#endif
159 return (u64 *) page_address(page);
160}
161
162
163
164
165
166
167static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
168 unsigned long idx, unsigned long tce)
169{
170 struct page *page;
171 u64 *tbl;
172
173 idx -= stt->offset;
174 page = stt->pages[idx / TCES_PER_PAGE];
175
176
177
178
179 WARN_ON_ONCE_RM(!page);
180 tbl = kvmppc_page_address(page);
181
182 tbl[idx % TCES_PER_PAGE] = tce;
183}
184
185
186
187
188
189
190
191static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
192 unsigned long ioba, unsigned long npages, bool clearing)
193{
194 unsigned long i, idx, sttpage, sttpages;
195 unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
196
197 if (ret)
198 return ret;
199
200
201
202
203 if (clearing)
204 return H_SUCCESS;
205
206 idx = (ioba >> stt->page_shift) - stt->offset;
207 sttpage = idx / TCES_PER_PAGE;
208 sttpages = ALIGN(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
209 TCES_PER_PAGE;
210 for (i = sttpage; i < sttpage + sttpages; ++i)
211 if (!stt->pages[i])
212 return H_TOO_HARD;
213
214 return H_SUCCESS;
215}
216
217static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
218 struct iommu_table *tbl,
219 unsigned long entry, unsigned long *hpa,
220 enum dma_data_direction *direction)
221{
222 long ret;
223
224 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
225
226 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
227 (*direction == DMA_BIDIRECTIONAL))) {
228 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
229
230
231
232
233 if (pua && *pua)
234 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
235 }
236
237 return ret;
238}
239
240static void iommu_tce_kill_rm(struct iommu_table *tbl,
241 unsigned long entry, unsigned long pages)
242{
243 if (tbl->it_ops->tce_kill)
244 tbl->it_ops->tce_kill(tbl, entry, pages, true);
245}
246
247static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
248 unsigned long entry)
249{
250 unsigned long hpa = 0;
251 enum dma_data_direction dir = DMA_NONE;
252
253 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
254}
255
256static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
257 struct iommu_table *tbl, unsigned long entry)
258{
259 struct mm_iommu_table_group_mem_t *mem = NULL;
260 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
261 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
262
263 if (!pua)
264
265 return H_TOO_HARD;
266
267 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
268 if (!mem)
269 return H_TOO_HARD;
270
271 mm_iommu_mapped_dec(mem);
272
273 *pua = cpu_to_be64(0);
274
275 return H_SUCCESS;
276}
277
278static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
279 struct iommu_table *tbl, unsigned long entry)
280{
281 enum dma_data_direction dir = DMA_NONE;
282 unsigned long hpa = 0;
283 long ret;
284
285 if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
286
287
288
289
290 return H_TOO_HARD;
291
292 if (dir == DMA_NONE)
293 return H_SUCCESS;
294
295 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
296 if (ret)
297 iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
298
299 return ret;
300}
301
302static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
303 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
304 unsigned long entry)
305{
306 unsigned long i, ret = H_SUCCESS;
307 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
308 unsigned long io_entry = entry * subpages;
309
310 for (i = 0; i < subpages; ++i) {
311 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
312 if (ret != H_SUCCESS)
313 break;
314 }
315
316 return ret;
317}
318
319static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
320 unsigned long entry, unsigned long ua,
321 enum dma_data_direction dir)
322{
323 long ret;
324 unsigned long hpa = 0;
325 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
326 struct mm_iommu_table_group_mem_t *mem;
327
328 if (!pua)
329
330 return H_TOO_HARD;
331
332 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
333 if (!mem)
334 return H_TOO_HARD;
335
336 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
337 &hpa)))
338 return H_TOO_HARD;
339
340 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
341 return H_TOO_HARD;
342
343 ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
344 if (ret) {
345 mm_iommu_mapped_dec(mem);
346
347
348
349
350 return H_TOO_HARD;
351 }
352
353 if (dir != DMA_NONE)
354 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
355
356 *pua = cpu_to_be64(ua);
357
358 return 0;
359}
360
361static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
362 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
363 unsigned long entry, unsigned long ua,
364 enum dma_data_direction dir)
365{
366 unsigned long i, pgoff, ret = H_SUCCESS;
367 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
368 unsigned long io_entry = entry * subpages;
369
370 for (i = 0, pgoff = 0; i < subpages;
371 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
372
373 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
374 io_entry + i, ua + pgoff, dir);
375 if (ret != H_SUCCESS)
376 break;
377 }
378
379 return ret;
380}
381
382long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
383 unsigned long ioba, unsigned long tce)
384{
385 struct kvmppc_spapr_tce_table *stt;
386 long ret;
387 struct kvmppc_spapr_tce_iommu_table *stit;
388 unsigned long entry, ua = 0;
389 enum dma_data_direction dir;
390
391
392
393
394 stt = kvmppc_find_table(vcpu->kvm, liobn);
395 if (!stt)
396 return H_TOO_HARD;
397
398 ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
399 if (ret != H_SUCCESS)
400 return ret;
401
402 ret = kvmppc_rm_tce_validate(stt, tce);
403 if (ret != H_SUCCESS)
404 return ret;
405
406 dir = iommu_tce_direction(tce);
407 if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
408 return H_PARAMETER;
409
410 entry = ioba >> stt->page_shift;
411
412 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
413 if (dir == DMA_NONE)
414 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
415 stit->tbl, entry);
416 else
417 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
418 stit->tbl, entry, ua, dir);
419
420 iommu_tce_kill_rm(stit->tbl, entry, 1);
421
422 if (ret != H_SUCCESS) {
423 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
424 return ret;
425 }
426 }
427
428 kvmppc_rm_tce_put(stt, entry, tce);
429
430 return H_SUCCESS;
431}
432
433static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
434 unsigned long ua, unsigned long *phpa)
435{
436 pte_t *ptep, pte;
437 unsigned shift = 0;
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
453 if (!ptep)
454 return -ENXIO;
455
456 pte = READ_ONCE(*ptep);
457 if (!pte_present(pte))
458 return -ENXIO;
459
460 if (!shift)
461 shift = PAGE_SHIFT;
462
463
464 if (shift > PAGE_SHIFT)
465 return -EAGAIN;
466
467 if (!pte_young(pte))
468 return -EAGAIN;
469
470 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
471 (ua & ~PAGE_MASK);
472
473 return 0;
474}
475
476long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
477 unsigned long liobn, unsigned long ioba,
478 unsigned long tce_list, unsigned long npages)
479{
480 struct kvm *kvm = vcpu->kvm;
481 struct kvmppc_spapr_tce_table *stt;
482 long i, ret = H_SUCCESS;
483 unsigned long tces, entry, ua = 0;
484 unsigned long mmu_seq;
485 bool prereg = false;
486 struct kvmppc_spapr_tce_iommu_table *stit;
487
488
489
490
491 mmu_seq = kvm->mmu_notifier_seq;
492 smp_rmb();
493
494 stt = kvmppc_find_table(vcpu->kvm, liobn);
495 if (!stt)
496 return H_TOO_HARD;
497
498 entry = ioba >> stt->page_shift;
499
500
501
502
503 if (npages > 512)
504 return H_PARAMETER;
505
506 if (tce_list & (SZ_4K - 1))
507 return H_PARAMETER;
508
509 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
510 if (ret != H_SUCCESS)
511 return ret;
512
513 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
514
515
516
517
518
519 struct mm_iommu_table_group_mem_t *mem;
520
521 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
522 return H_TOO_HARD;
523
524 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
525 if (mem)
526 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
527 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
528 }
529
530 if (!prereg) {
531
532
533
534
535
536
537 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
538 return H_TOO_HARD;
539
540 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
541 if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
542 ret = H_TOO_HARD;
543 goto unlock_exit;
544 }
545 }
546
547 for (i = 0; i < npages; ++i) {
548 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
549
550 ret = kvmppc_rm_tce_validate(stt, tce);
551 if (ret != H_SUCCESS)
552 goto unlock_exit;
553 }
554
555 for (i = 0; i < npages; ++i) {
556 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
557
558 ua = 0;
559 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
560 ret = H_PARAMETER;
561 goto invalidate_exit;
562 }
563
564 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
565 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
566 stit->tbl, entry + i, ua,
567 iommu_tce_direction(tce));
568
569 if (ret != H_SUCCESS) {
570 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
571 entry);
572 goto invalidate_exit;
573 }
574 }
575
576 kvmppc_rm_tce_put(stt, entry + i, tce);
577 }
578
579invalidate_exit:
580 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
581 iommu_tce_kill_rm(stit->tbl, entry, npages);
582
583unlock_exit:
584 if (!prereg)
585 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
586 return ret;
587}
588
589long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
590 unsigned long liobn, unsigned long ioba,
591 unsigned long tce_value, unsigned long npages)
592{
593 struct kvmppc_spapr_tce_table *stt;
594 long i, ret;
595 struct kvmppc_spapr_tce_iommu_table *stit;
596
597 stt = kvmppc_find_table(vcpu->kvm, liobn);
598 if (!stt)
599 return H_TOO_HARD;
600
601 ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
602 if (ret != H_SUCCESS)
603 return ret;
604
605
606 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
607 return H_PARAMETER;
608
609 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
610 unsigned long entry = ioba >> stt->page_shift;
611
612 for (i = 0; i < npages; ++i) {
613 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
614 stit->tbl, entry + i);
615
616 if (ret == H_SUCCESS)
617 continue;
618
619 if (ret == H_TOO_HARD)
620 goto invalidate_exit;
621
622 WARN_ON_ONCE_RM(1);
623 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
624 }
625 }
626
627 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
628 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
629
630invalidate_exit:
631 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
632 iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
633
634 return ret;
635}
636
637
638long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
639 unsigned long ioba)
640{
641 struct kvmppc_spapr_tce_table *stt;
642 long ret;
643 unsigned long idx;
644 struct page *page;
645 u64 *tbl;
646
647 stt = kvmppc_find_table(vcpu->kvm, liobn);
648 if (!stt)
649 return H_TOO_HARD;
650
651 ret = kvmppc_ioba_validate(stt, ioba, 1);
652 if (ret != H_SUCCESS)
653 return ret;
654
655 idx = (ioba >> stt->page_shift) - stt->offset;
656 page = stt->pages[idx / TCES_PER_PAGE];
657 if (!page) {
658 vcpu->arch.regs.gpr[4] = 0;
659 return H_SUCCESS;
660 }
661 tbl = (u64 *)page_address(page);
662
663 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
664
665 return H_SUCCESS;
666}
667EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
668
669#endif
670