1
2
3
4
5
6
7
8
9
10#include <linux/kvm_types.h>
11#include <linux/kvm_host.h>
12#include <linux/kernel.h>
13#include <linux/highmem.h>
14#include <linux/psp-sev.h>
15#include <linux/pagemap.h>
16#include <linux/swap.h>
17#include <linux/misc_cgroup.h>
18#include <linux/processor.h>
19#include <linux/trace_events.h>
20#include <asm/fpu/internal.h>
21
22#include <asm/pkru.h>
23#include <asm/trapnr.h>
24
25#include "x86.h"
26#include "svm.h"
27#include "svm_ops.h"
28#include "cpuid.h"
29#include "trace.h"
30
31#define __ex(x) __kvm_handle_fault_on_reboot(x)
32
33#ifndef CONFIG_KVM_AMD_SEV
34
35
36
37
38
39
40
41
42
43
44#define MISC_CG_RES_SEV MISC_CG_RES_TYPES
45#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
46#endif
47
48#ifdef CONFIG_KVM_AMD_SEV
49
50static bool sev_enabled = true;
51module_param_named(sev, sev_enabled, bool, 0444);
52
53
54static bool sev_es_enabled = true;
55module_param_named(sev_es, sev_es_enabled, bool, 0444);
56#else
57#define sev_enabled false
58#define sev_es_enabled false
59#endif
60
61static u8 sev_enc_bit;
62static DECLARE_RWSEM(sev_deactivate_lock);
63static DEFINE_MUTEX(sev_bitmap_lock);
64unsigned int max_sev_asid;
65static unsigned int min_sev_asid;
66static unsigned long sev_me_mask;
67static unsigned int nr_asids;
68static unsigned long *sev_asid_bitmap;
69static unsigned long *sev_reclaim_asid_bitmap;
70
71struct enc_region {
72 struct list_head list;
73 unsigned long npages;
74 struct page **pages;
75 unsigned long uaddr;
76 unsigned long size;
77};
78
79
80static int sev_flush_asids(int min_asid, int max_asid)
81{
82 int ret, asid, error = 0;
83
84
85 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
86 if (asid > max_asid)
87 return -EBUSY;
88
89
90
91
92
93 down_write(&sev_deactivate_lock);
94
95 wbinvd_on_all_cpus();
96 ret = sev_guest_df_flush(&error);
97
98 up_write(&sev_deactivate_lock);
99
100 if (ret)
101 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
102
103 return ret;
104}
105
106static inline bool is_mirroring_enc_context(struct kvm *kvm)
107{
108 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
109}
110
111
112static bool __sev_recycle_asids(int min_asid, int max_asid)
113{
114 if (sev_flush_asids(min_asid, max_asid))
115 return false;
116
117
118 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
119 nr_asids);
120 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
121
122 return true;
123}
124
125static int sev_asid_new(struct kvm_sev_info *sev)
126{
127 int asid, min_asid, max_asid, ret;
128 bool retry = true;
129 enum misc_res_type type;
130
131 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
132 WARN_ON(sev->misc_cg);
133 sev->misc_cg = get_current_misc_cg();
134 ret = misc_cg_try_charge(type, sev->misc_cg, 1);
135 if (ret) {
136 put_misc_cg(sev->misc_cg);
137 sev->misc_cg = NULL;
138 return ret;
139 }
140
141 mutex_lock(&sev_bitmap_lock);
142
143
144
145
146
147 min_asid = sev->es_active ? 1 : min_sev_asid;
148 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
149again:
150 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
151 if (asid > max_asid) {
152 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
153 retry = false;
154 goto again;
155 }
156 mutex_unlock(&sev_bitmap_lock);
157 ret = -EBUSY;
158 goto e_uncharge;
159 }
160
161 __set_bit(asid, sev_asid_bitmap);
162
163 mutex_unlock(&sev_bitmap_lock);
164
165 return asid;
166e_uncharge:
167 misc_cg_uncharge(type, sev->misc_cg, 1);
168 put_misc_cg(sev->misc_cg);
169 sev->misc_cg = NULL;
170 return ret;
171}
172
173static int sev_get_asid(struct kvm *kvm)
174{
175 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
176
177 return sev->asid;
178}
179
180static void sev_asid_free(struct kvm_sev_info *sev)
181{
182 struct svm_cpu_data *sd;
183 int cpu;
184 enum misc_res_type type;
185
186 mutex_lock(&sev_bitmap_lock);
187
188 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
189
190 for_each_possible_cpu(cpu) {
191 sd = per_cpu(svm_data, cpu);
192 sd->sev_vmcbs[sev->asid] = NULL;
193 }
194
195 mutex_unlock(&sev_bitmap_lock);
196
197 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
198 misc_cg_uncharge(type, sev->misc_cg, 1);
199 put_misc_cg(sev->misc_cg);
200 sev->misc_cg = NULL;
201}
202
203static void sev_decommission(unsigned int handle)
204{
205 struct sev_data_decommission decommission;
206
207 if (!handle)
208 return;
209
210 decommission.handle = handle;
211 sev_guest_decommission(&decommission, NULL);
212}
213
214static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
215{
216 struct sev_data_deactivate deactivate;
217
218 if (!handle)
219 return;
220
221 deactivate.handle = handle;
222
223
224 down_read(&sev_deactivate_lock);
225 sev_guest_deactivate(&deactivate, NULL);
226 up_read(&sev_deactivate_lock);
227
228 sev_decommission(handle);
229}
230
231static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
232{
233 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
234 bool es_active = argp->id == KVM_SEV_ES_INIT;
235 int asid, ret;
236
237 if (kvm->created_vcpus)
238 return -EINVAL;
239
240 ret = -EBUSY;
241 if (unlikely(sev->active))
242 return ret;
243
244 sev->es_active = es_active;
245 asid = sev_asid_new(sev);
246 if (asid < 0)
247 goto e_no_asid;
248 sev->asid = asid;
249
250 ret = sev_platform_init(&argp->error);
251 if (ret)
252 goto e_free;
253
254 sev->active = true;
255 sev->asid = asid;
256 INIT_LIST_HEAD(&sev->regions_list);
257
258 return 0;
259
260e_free:
261 sev_asid_free(sev);
262 sev->asid = 0;
263e_no_asid:
264 sev->es_active = false;
265 return ret;
266}
267
268static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
269{
270 struct sev_data_activate activate;
271 int asid = sev_get_asid(kvm);
272 int ret;
273
274
275 activate.handle = handle;
276 activate.asid = asid;
277 ret = sev_guest_activate(&activate, error);
278
279 return ret;
280}
281
282static int __sev_issue_cmd(int fd, int id, void *data, int *error)
283{
284 struct fd f;
285 int ret;
286
287 f = fdget(fd);
288 if (!f.file)
289 return -EBADF;
290
291 ret = sev_issue_cmd_external_user(f.file, id, data, error);
292
293 fdput(f);
294 return ret;
295}
296
297static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
298{
299 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
300
301 return __sev_issue_cmd(sev->fd, id, data, error);
302}
303
304static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
305{
306 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
307 struct sev_data_launch_start start;
308 struct kvm_sev_launch_start params;
309 void *dh_blob, *session_blob;
310 int *error = &argp->error;
311 int ret;
312
313 if (!sev_guest(kvm))
314 return -ENOTTY;
315
316 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
317 return -EFAULT;
318
319 memset(&start, 0, sizeof(start));
320
321 dh_blob = NULL;
322 if (params.dh_uaddr) {
323 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
324 if (IS_ERR(dh_blob))
325 return PTR_ERR(dh_blob);
326
327 start.dh_cert_address = __sme_set(__pa(dh_blob));
328 start.dh_cert_len = params.dh_len;
329 }
330
331 session_blob = NULL;
332 if (params.session_uaddr) {
333 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
334 if (IS_ERR(session_blob)) {
335 ret = PTR_ERR(session_blob);
336 goto e_free_dh;
337 }
338
339 start.session_address = __sme_set(__pa(session_blob));
340 start.session_len = params.session_len;
341 }
342
343 start.handle = params.handle;
344 start.policy = params.policy;
345
346
347 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
348 if (ret)
349 goto e_free_session;
350
351
352 ret = sev_bind_asid(kvm, start.handle, error);
353 if (ret) {
354 sev_decommission(start.handle);
355 goto e_free_session;
356 }
357
358
359 params.handle = start.handle;
360 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
361 sev_unbind_asid(kvm, start.handle);
362 ret = -EFAULT;
363 goto e_free_session;
364 }
365
366 sev->handle = start.handle;
367 sev->fd = argp->sev_fd;
368
369e_free_session:
370 kfree(session_blob);
371e_free_dh:
372 kfree(dh_blob);
373 return ret;
374}
375
376static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
377 unsigned long ulen, unsigned long *n,
378 int write)
379{
380 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
381 unsigned long npages, size;
382 int npinned;
383 unsigned long locked, lock_limit;
384 struct page **pages;
385 unsigned long first, last;
386 int ret;
387
388 lockdep_assert_held(&kvm->lock);
389
390 if (ulen == 0 || uaddr + ulen < uaddr)
391 return ERR_PTR(-EINVAL);
392
393
394 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
395 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
396 npages = (last - first + 1);
397
398 locked = sev->pages_locked + npages;
399 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
400 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
401 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
402 return ERR_PTR(-ENOMEM);
403 }
404
405 if (WARN_ON_ONCE(npages > INT_MAX))
406 return ERR_PTR(-EINVAL);
407
408
409 size = npages * sizeof(struct page *);
410 if (size > PAGE_SIZE)
411 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
412 else
413 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
414
415 if (!pages)
416 return ERR_PTR(-ENOMEM);
417
418
419 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
420 if (npinned != npages) {
421 pr_err("SEV: Failure locking %lu pages.\n", npages);
422 ret = -ENOMEM;
423 goto err;
424 }
425
426 *n = npages;
427 sev->pages_locked = locked;
428
429 return pages;
430
431err:
432 if (npinned > 0)
433 unpin_user_pages(pages, npinned);
434
435 kvfree(pages);
436 return ERR_PTR(ret);
437}
438
439static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
440 unsigned long npages)
441{
442 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
443
444 unpin_user_pages(pages, npages);
445 kvfree(pages);
446 sev->pages_locked -= npages;
447}
448
449static void sev_clflush_pages(struct page *pages[], unsigned long npages)
450{
451 uint8_t *page_virtual;
452 unsigned long i;
453
454 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
455 pages == NULL)
456 return;
457
458 for (i = 0; i < npages; i++) {
459 page_virtual = kmap_atomic(pages[i]);
460 clflush_cache_range(page_virtual, PAGE_SIZE);
461 kunmap_atomic(page_virtual);
462 }
463}
464
465static unsigned long get_num_contig_pages(unsigned long idx,
466 struct page **inpages, unsigned long npages)
467{
468 unsigned long paddr, next_paddr;
469 unsigned long i = idx + 1, pages = 1;
470
471
472 paddr = __sme_page_pa(inpages[idx]);
473 while (i < npages) {
474 next_paddr = __sme_page_pa(inpages[i++]);
475 if ((paddr + PAGE_SIZE) == next_paddr) {
476 pages++;
477 paddr = next_paddr;
478 continue;
479 }
480 break;
481 }
482
483 return pages;
484}
485
486static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
487{
488 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
489 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
490 struct kvm_sev_launch_update_data params;
491 struct sev_data_launch_update_data data;
492 struct page **inpages;
493 int ret;
494
495 if (!sev_guest(kvm))
496 return -ENOTTY;
497
498 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
499 return -EFAULT;
500
501 vaddr = params.uaddr;
502 size = params.len;
503 vaddr_end = vaddr + size;
504
505
506 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
507 if (IS_ERR(inpages))
508 return PTR_ERR(inpages);
509
510
511
512
513
514 sev_clflush_pages(inpages, npages);
515
516 data.reserved = 0;
517 data.handle = sev->handle;
518
519 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
520 int offset, len;
521
522
523
524
525
526 offset = vaddr & (PAGE_SIZE - 1);
527
528
529 pages = get_num_contig_pages(i, inpages, npages);
530
531 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
532
533 data.len = len;
534 data.address = __sme_page_pa(inpages[i]) + offset;
535 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
536 if (ret)
537 goto e_unpin;
538
539 size -= len;
540 next_vaddr = vaddr + len;
541 }
542
543e_unpin:
544
545 for (i = 0; i < npages; i++) {
546 set_page_dirty_lock(inpages[i]);
547 mark_page_accessed(inpages[i]);
548 }
549
550 sev_unpin_memory(kvm, inpages, npages);
551 return ret;
552}
553
554static int sev_es_sync_vmsa(struct vcpu_svm *svm)
555{
556 struct vmcb_save_area *save = &svm->vmcb->save;
557
558
559 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
560 return -EINVAL;
561
562
563 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
564 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
565 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
566 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
567 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
568 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
569 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
570 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
571#ifdef CONFIG_X86_64
572 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
573 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
574 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
575 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
576 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
577 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
578 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
579 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
580#endif
581 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
582
583
584 save->xcr0 = svm->vcpu.arch.xcr0;
585 save->pkru = svm->vcpu.arch.pkru;
586 save->xss = svm->vcpu.arch.ia32_xss;
587
588
589
590
591
592
593
594 memcpy(svm->vmsa, save, sizeof(*save));
595
596 return 0;
597}
598
599static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
600{
601 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
602 struct sev_data_launch_update_vmsa vmsa;
603 struct kvm_vcpu *vcpu;
604 int i, ret;
605
606 if (!sev_es_guest(kvm))
607 return -ENOTTY;
608
609 vmsa.reserved = 0;
610
611 kvm_for_each_vcpu(i, vcpu, kvm) {
612 struct vcpu_svm *svm = to_svm(vcpu);
613
614
615 ret = sev_es_sync_vmsa(svm);
616 if (ret)
617 return ret;
618
619
620
621
622
623
624
625 clflush_cache_range(svm->vmsa, PAGE_SIZE);
626
627 vmsa.handle = sev->handle;
628 vmsa.address = __sme_pa(svm->vmsa);
629 vmsa.len = PAGE_SIZE;
630 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
631 &argp->error);
632 if (ret)
633 return ret;
634
635 svm->vcpu.arch.guest_state_protected = true;
636 }
637
638 return 0;
639}
640
641static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
642{
643 void __user *measure = (void __user *)(uintptr_t)argp->data;
644 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
645 struct sev_data_launch_measure data;
646 struct kvm_sev_launch_measure params;
647 void __user *p = NULL;
648 void *blob = NULL;
649 int ret;
650
651 if (!sev_guest(kvm))
652 return -ENOTTY;
653
654 if (copy_from_user(¶ms, measure, sizeof(params)))
655 return -EFAULT;
656
657 memset(&data, 0, sizeof(data));
658
659
660 if (!params.len)
661 goto cmd;
662
663 p = (void __user *)(uintptr_t)params.uaddr;
664 if (p) {
665 if (params.len > SEV_FW_BLOB_MAX_SIZE)
666 return -EINVAL;
667
668 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
669 if (!blob)
670 return -ENOMEM;
671
672 data.address = __psp_pa(blob);
673 data.len = params.len;
674 }
675
676cmd:
677 data.handle = sev->handle;
678 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
679
680
681
682
683 if (!params.len)
684 goto done;
685
686 if (ret)
687 goto e_free_blob;
688
689 if (blob) {
690 if (copy_to_user(p, blob, params.len))
691 ret = -EFAULT;
692 }
693
694done:
695 params.len = data.len;
696 if (copy_to_user(measure, ¶ms, sizeof(params)))
697 ret = -EFAULT;
698e_free_blob:
699 kfree(blob);
700 return ret;
701}
702
703static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
704{
705 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
706 struct sev_data_launch_finish data;
707
708 if (!sev_guest(kvm))
709 return -ENOTTY;
710
711 data.handle = sev->handle;
712 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
713}
714
715static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
716{
717 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
718 struct kvm_sev_guest_status params;
719 struct sev_data_guest_status data;
720 int ret;
721
722 if (!sev_guest(kvm))
723 return -ENOTTY;
724
725 memset(&data, 0, sizeof(data));
726
727 data.handle = sev->handle;
728 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
729 if (ret)
730 return ret;
731
732 params.policy = data.policy;
733 params.state = data.state;
734 params.handle = data.handle;
735
736 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
737 ret = -EFAULT;
738
739 return ret;
740}
741
742static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
743 unsigned long dst, int size,
744 int *error, bool enc)
745{
746 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
747 struct sev_data_dbg data;
748
749 data.reserved = 0;
750 data.handle = sev->handle;
751 data.dst_addr = dst;
752 data.src_addr = src;
753 data.len = size;
754
755 return sev_issue_cmd(kvm,
756 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
757 &data, error);
758}
759
760static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
761 unsigned long dst_paddr, int sz, int *err)
762{
763 int offset;
764
765
766
767
768
769 offset = src_paddr & 15;
770 src_paddr = round_down(src_paddr, 16);
771 sz = round_up(sz + offset, 16);
772
773 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
774}
775
776static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
777 void __user *dst_uaddr,
778 unsigned long dst_paddr,
779 int size, int *err)
780{
781 struct page *tpage = NULL;
782 int ret, offset;
783
784
785 if (!IS_ALIGNED(dst_paddr, 16) ||
786 !IS_ALIGNED(paddr, 16) ||
787 !IS_ALIGNED(size, 16)) {
788 tpage = (void *)alloc_page(GFP_KERNEL);
789 if (!tpage)
790 return -ENOMEM;
791
792 dst_paddr = __sme_page_pa(tpage);
793 }
794
795 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
796 if (ret)
797 goto e_free;
798
799 if (tpage) {
800 offset = paddr & 15;
801 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
802 ret = -EFAULT;
803 }
804
805e_free:
806 if (tpage)
807 __free_page(tpage);
808
809 return ret;
810}
811
812static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
813 void __user *vaddr,
814 unsigned long dst_paddr,
815 void __user *dst_vaddr,
816 int size, int *error)
817{
818 struct page *src_tpage = NULL;
819 struct page *dst_tpage = NULL;
820 int ret, len = size;
821
822
823 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
824 src_tpage = alloc_page(GFP_KERNEL);
825 if (!src_tpage)
826 return -ENOMEM;
827
828 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
829 __free_page(src_tpage);
830 return -EFAULT;
831 }
832
833 paddr = __sme_page_pa(src_tpage);
834 }
835
836
837
838
839
840
841
842 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
843 int dst_offset;
844
845 dst_tpage = alloc_page(GFP_KERNEL);
846 if (!dst_tpage) {
847 ret = -ENOMEM;
848 goto e_free;
849 }
850
851 ret = __sev_dbg_decrypt(kvm, dst_paddr,
852 __sme_page_pa(dst_tpage), size, error);
853 if (ret)
854 goto e_free;
855
856
857
858
859
860 dst_offset = dst_paddr & 15;
861
862 if (src_tpage)
863 memcpy(page_address(dst_tpage) + dst_offset,
864 page_address(src_tpage), size);
865 else {
866 if (copy_from_user(page_address(dst_tpage) + dst_offset,
867 vaddr, size)) {
868 ret = -EFAULT;
869 goto e_free;
870 }
871 }
872
873 paddr = __sme_page_pa(dst_tpage);
874 dst_paddr = round_down(dst_paddr, 16);
875 len = round_up(size, 16);
876 }
877
878 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
879
880e_free:
881 if (src_tpage)
882 __free_page(src_tpage);
883 if (dst_tpage)
884 __free_page(dst_tpage);
885 return ret;
886}
887
888static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
889{
890 unsigned long vaddr, vaddr_end, next_vaddr;
891 unsigned long dst_vaddr;
892 struct page **src_p, **dst_p;
893 struct kvm_sev_dbg debug;
894 unsigned long n;
895 unsigned int size;
896 int ret;
897
898 if (!sev_guest(kvm))
899 return -ENOTTY;
900
901 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
902 return -EFAULT;
903
904 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
905 return -EINVAL;
906 if (!debug.dst_uaddr)
907 return -EINVAL;
908
909 vaddr = debug.src_uaddr;
910 size = debug.len;
911 vaddr_end = vaddr + size;
912 dst_vaddr = debug.dst_uaddr;
913
914 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
915 int len, s_off, d_off;
916
917
918 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
919 if (IS_ERR(src_p))
920 return PTR_ERR(src_p);
921
922 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
923 if (IS_ERR(dst_p)) {
924 sev_unpin_memory(kvm, src_p, n);
925 return PTR_ERR(dst_p);
926 }
927
928
929
930
931
932
933 sev_clflush_pages(src_p, 1);
934 sev_clflush_pages(dst_p, 1);
935
936
937
938
939
940 s_off = vaddr & ~PAGE_MASK;
941 d_off = dst_vaddr & ~PAGE_MASK;
942 len = min_t(size_t, (PAGE_SIZE - s_off), size);
943
944 if (dec)
945 ret = __sev_dbg_decrypt_user(kvm,
946 __sme_page_pa(src_p[0]) + s_off,
947 (void __user *)dst_vaddr,
948 __sme_page_pa(dst_p[0]) + d_off,
949 len, &argp->error);
950 else
951 ret = __sev_dbg_encrypt_user(kvm,
952 __sme_page_pa(src_p[0]) + s_off,
953 (void __user *)vaddr,
954 __sme_page_pa(dst_p[0]) + d_off,
955 (void __user *)dst_vaddr,
956 len, &argp->error);
957
958 sev_unpin_memory(kvm, src_p, n);
959 sev_unpin_memory(kvm, dst_p, n);
960
961 if (ret)
962 goto err;
963
964 next_vaddr = vaddr + len;
965 dst_vaddr = dst_vaddr + len;
966 size -= len;
967 }
968err:
969 return ret;
970}
971
972static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
973{
974 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
975 struct sev_data_launch_secret data;
976 struct kvm_sev_launch_secret params;
977 struct page **pages;
978 void *blob, *hdr;
979 unsigned long n, i;
980 int ret, offset;
981
982 if (!sev_guest(kvm))
983 return -ENOTTY;
984
985 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
986 return -EFAULT;
987
988 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
989 if (IS_ERR(pages))
990 return PTR_ERR(pages);
991
992
993
994
995
996 sev_clflush_pages(pages, n);
997
998
999
1000
1001
1002 if (get_num_contig_pages(0, pages, n) != n) {
1003 ret = -EINVAL;
1004 goto e_unpin_memory;
1005 }
1006
1007 memset(&data, 0, sizeof(data));
1008
1009 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1010 data.guest_address = __sme_page_pa(pages[0]) + offset;
1011 data.guest_len = params.guest_len;
1012
1013 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1014 if (IS_ERR(blob)) {
1015 ret = PTR_ERR(blob);
1016 goto e_unpin_memory;
1017 }
1018
1019 data.trans_address = __psp_pa(blob);
1020 data.trans_len = params.trans_len;
1021
1022 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1023 if (IS_ERR(hdr)) {
1024 ret = PTR_ERR(hdr);
1025 goto e_free_blob;
1026 }
1027 data.hdr_address = __psp_pa(hdr);
1028 data.hdr_len = params.hdr_len;
1029
1030 data.handle = sev->handle;
1031 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1032
1033 kfree(hdr);
1034
1035e_free_blob:
1036 kfree(blob);
1037e_unpin_memory:
1038
1039 for (i = 0; i < n; i++) {
1040 set_page_dirty_lock(pages[i]);
1041 mark_page_accessed(pages[i]);
1042 }
1043 sev_unpin_memory(kvm, pages, n);
1044 return ret;
1045}
1046
1047static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1048{
1049 void __user *report = (void __user *)(uintptr_t)argp->data;
1050 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1051 struct sev_data_attestation_report data;
1052 struct kvm_sev_attestation_report params;
1053 void __user *p;
1054 void *blob = NULL;
1055 int ret;
1056
1057 if (!sev_guest(kvm))
1058 return -ENOTTY;
1059
1060 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1061 return -EFAULT;
1062
1063 memset(&data, 0, sizeof(data));
1064
1065
1066 if (!params.len)
1067 goto cmd;
1068
1069 p = (void __user *)(uintptr_t)params.uaddr;
1070 if (p) {
1071 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1072 return -EINVAL;
1073
1074 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
1075 if (!blob)
1076 return -ENOMEM;
1077
1078 data.address = __psp_pa(blob);
1079 data.len = params.len;
1080 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1081 }
1082cmd:
1083 data.handle = sev->handle;
1084 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1085
1086
1087
1088 if (!params.len)
1089 goto done;
1090
1091 if (ret)
1092 goto e_free_blob;
1093
1094 if (blob) {
1095 if (copy_to_user(p, blob, params.len))
1096 ret = -EFAULT;
1097 }
1098
1099done:
1100 params.len = data.len;
1101 if (copy_to_user(report, ¶ms, sizeof(params)))
1102 ret = -EFAULT;
1103e_free_blob:
1104 kfree(blob);
1105 return ret;
1106}
1107
1108
1109static int
1110__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1111 struct kvm_sev_send_start *params)
1112{
1113 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1114 struct sev_data_send_start data;
1115 int ret;
1116
1117 memset(&data, 0, sizeof(data));
1118 data.handle = sev->handle;
1119 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1120
1121 params->session_len = data.session_len;
1122 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1123 sizeof(struct kvm_sev_send_start)))
1124 ret = -EFAULT;
1125
1126 return ret;
1127}
1128
1129static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1130{
1131 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1132 struct sev_data_send_start data;
1133 struct kvm_sev_send_start params;
1134 void *amd_certs, *session_data;
1135 void *pdh_cert, *plat_certs;
1136 int ret;
1137
1138 if (!sev_guest(kvm))
1139 return -ENOTTY;
1140
1141 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1142 sizeof(struct kvm_sev_send_start)))
1143 return -EFAULT;
1144
1145
1146 if (!params.session_len)
1147 return __sev_send_start_query_session_length(kvm, argp,
1148 ¶ms);
1149
1150
1151 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1152 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1153 return -EINVAL;
1154
1155
1156 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1157 if (!session_data)
1158 return -ENOMEM;
1159
1160
1161 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1162 params.pdh_cert_len);
1163 if (IS_ERR(pdh_cert)) {
1164 ret = PTR_ERR(pdh_cert);
1165 goto e_free_session;
1166 }
1167
1168 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1169 params.plat_certs_len);
1170 if (IS_ERR(plat_certs)) {
1171 ret = PTR_ERR(plat_certs);
1172 goto e_free_pdh;
1173 }
1174
1175 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1176 params.amd_certs_len);
1177 if (IS_ERR(amd_certs)) {
1178 ret = PTR_ERR(amd_certs);
1179 goto e_free_plat_cert;
1180 }
1181
1182
1183 memset(&data, 0, sizeof(data));
1184 data.pdh_cert_address = __psp_pa(pdh_cert);
1185 data.pdh_cert_len = params.pdh_cert_len;
1186 data.plat_certs_address = __psp_pa(plat_certs);
1187 data.plat_certs_len = params.plat_certs_len;
1188 data.amd_certs_address = __psp_pa(amd_certs);
1189 data.amd_certs_len = params.amd_certs_len;
1190 data.session_address = __psp_pa(session_data);
1191 data.session_len = params.session_len;
1192 data.handle = sev->handle;
1193
1194 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1195
1196 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1197 session_data, params.session_len)) {
1198 ret = -EFAULT;
1199 goto e_free_amd_cert;
1200 }
1201
1202 params.policy = data.policy;
1203 params.session_len = data.session_len;
1204 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
1205 sizeof(struct kvm_sev_send_start)))
1206 ret = -EFAULT;
1207
1208e_free_amd_cert:
1209 kfree(amd_certs);
1210e_free_plat_cert:
1211 kfree(plat_certs);
1212e_free_pdh:
1213 kfree(pdh_cert);
1214e_free_session:
1215 kfree(session_data);
1216 return ret;
1217}
1218
1219
1220static int
1221__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1222 struct kvm_sev_send_update_data *params)
1223{
1224 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1225 struct sev_data_send_update_data data;
1226 int ret;
1227
1228 memset(&data, 0, sizeof(data));
1229 data.handle = sev->handle;
1230 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1231
1232 params->hdr_len = data.hdr_len;
1233 params->trans_len = data.trans_len;
1234
1235 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1236 sizeof(struct kvm_sev_send_update_data)))
1237 ret = -EFAULT;
1238
1239 return ret;
1240}
1241
1242static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1243{
1244 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1245 struct sev_data_send_update_data data;
1246 struct kvm_sev_send_update_data params;
1247 void *hdr, *trans_data;
1248 struct page **guest_page;
1249 unsigned long n;
1250 int ret, offset;
1251
1252 if (!sev_guest(kvm))
1253 return -ENOTTY;
1254
1255 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1256 sizeof(struct kvm_sev_send_update_data)))
1257 return -EFAULT;
1258
1259
1260 if (!params.trans_len || !params.hdr_len)
1261 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1262
1263 if (!params.trans_uaddr || !params.guest_uaddr ||
1264 !params.guest_len || !params.hdr_uaddr)
1265 return -EINVAL;
1266
1267
1268 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1269 if ((params.guest_len + offset > PAGE_SIZE))
1270 return -EINVAL;
1271
1272
1273 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1274 PAGE_SIZE, &n, 0);
1275 if (IS_ERR(guest_page))
1276 return PTR_ERR(guest_page);
1277
1278
1279 ret = -ENOMEM;
1280 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1281 if (!hdr)
1282 goto e_unpin;
1283
1284 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1285 if (!trans_data)
1286 goto e_free_hdr;
1287
1288 memset(&data, 0, sizeof(data));
1289 data.hdr_address = __psp_pa(hdr);
1290 data.hdr_len = params.hdr_len;
1291 data.trans_address = __psp_pa(trans_data);
1292 data.trans_len = params.trans_len;
1293
1294
1295 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1296 data.guest_address |= sev_me_mask;
1297 data.guest_len = params.guest_len;
1298 data.handle = sev->handle;
1299
1300 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1301
1302 if (ret)
1303 goto e_free_trans_data;
1304
1305
1306 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1307 trans_data, params.trans_len)) {
1308 ret = -EFAULT;
1309 goto e_free_trans_data;
1310 }
1311
1312
1313 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1314 params.hdr_len))
1315 ret = -EFAULT;
1316
1317e_free_trans_data:
1318 kfree(trans_data);
1319e_free_hdr:
1320 kfree(hdr);
1321e_unpin:
1322 sev_unpin_memory(kvm, guest_page, n);
1323
1324 return ret;
1325}
1326
1327static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1328{
1329 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1330 struct sev_data_send_finish data;
1331
1332 if (!sev_guest(kvm))
1333 return -ENOTTY;
1334
1335 data.handle = sev->handle;
1336 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1337}
1338
1339static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1340{
1341 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1342 struct sev_data_send_cancel data;
1343
1344 if (!sev_guest(kvm))
1345 return -ENOTTY;
1346
1347 data.handle = sev->handle;
1348 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1349}
1350
1351static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1352{
1353 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1354 struct sev_data_receive_start start;
1355 struct kvm_sev_receive_start params;
1356 int *error = &argp->error;
1357 void *session_data;
1358 void *pdh_data;
1359 int ret;
1360
1361 if (!sev_guest(kvm))
1362 return -ENOTTY;
1363
1364
1365 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1366 sizeof(struct kvm_sev_receive_start)))
1367 return -EFAULT;
1368
1369
1370 if (!params.pdh_uaddr || !params.pdh_len ||
1371 !params.session_uaddr || !params.session_len)
1372 return -EINVAL;
1373
1374 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1375 if (IS_ERR(pdh_data))
1376 return PTR_ERR(pdh_data);
1377
1378 session_data = psp_copy_user_blob(params.session_uaddr,
1379 params.session_len);
1380 if (IS_ERR(session_data)) {
1381 ret = PTR_ERR(session_data);
1382 goto e_free_pdh;
1383 }
1384
1385 memset(&start, 0, sizeof(start));
1386 start.handle = params.handle;
1387 start.policy = params.policy;
1388 start.pdh_cert_address = __psp_pa(pdh_data);
1389 start.pdh_cert_len = params.pdh_len;
1390 start.session_address = __psp_pa(session_data);
1391 start.session_len = params.session_len;
1392
1393
1394 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1395 error);
1396 if (ret)
1397 goto e_free_session;
1398
1399
1400 ret = sev_bind_asid(kvm, start.handle, error);
1401 if (ret)
1402 goto e_free_session;
1403
1404 params.handle = start.handle;
1405 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1406 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1407 ret = -EFAULT;
1408 sev_unbind_asid(kvm, start.handle);
1409 goto e_free_session;
1410 }
1411
1412 sev->handle = start.handle;
1413 sev->fd = argp->sev_fd;
1414
1415e_free_session:
1416 kfree(session_data);
1417e_free_pdh:
1418 kfree(pdh_data);
1419
1420 return ret;
1421}
1422
1423static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1424{
1425 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1426 struct kvm_sev_receive_update_data params;
1427 struct sev_data_receive_update_data data;
1428 void *hdr = NULL, *trans = NULL;
1429 struct page **guest_page;
1430 unsigned long n;
1431 int ret, offset;
1432
1433 if (!sev_guest(kvm))
1434 return -EINVAL;
1435
1436 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1437 sizeof(struct kvm_sev_receive_update_data)))
1438 return -EFAULT;
1439
1440 if (!params.hdr_uaddr || !params.hdr_len ||
1441 !params.guest_uaddr || !params.guest_len ||
1442 !params.trans_uaddr || !params.trans_len)
1443 return -EINVAL;
1444
1445
1446 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1447 if ((params.guest_len + offset > PAGE_SIZE))
1448 return -EINVAL;
1449
1450 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1451 if (IS_ERR(hdr))
1452 return PTR_ERR(hdr);
1453
1454 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1455 if (IS_ERR(trans)) {
1456 ret = PTR_ERR(trans);
1457 goto e_free_hdr;
1458 }
1459
1460 memset(&data, 0, sizeof(data));
1461 data.hdr_address = __psp_pa(hdr);
1462 data.hdr_len = params.hdr_len;
1463 data.trans_address = __psp_pa(trans);
1464 data.trans_len = params.trans_len;
1465
1466
1467 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1468 PAGE_SIZE, &n, 0);
1469 if (IS_ERR(guest_page)) {
1470 ret = PTR_ERR(guest_page);
1471 goto e_free_trans;
1472 }
1473
1474
1475 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1476 data.guest_address |= sev_me_mask;
1477 data.guest_len = params.guest_len;
1478 data.handle = sev->handle;
1479
1480 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1481 &argp->error);
1482
1483 sev_unpin_memory(kvm, guest_page, n);
1484
1485e_free_trans:
1486 kfree(trans);
1487e_free_hdr:
1488 kfree(hdr);
1489
1490 return ret;
1491}
1492
1493static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1494{
1495 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1496 struct sev_data_receive_finish data;
1497
1498 if (!sev_guest(kvm))
1499 return -ENOTTY;
1500
1501 data.handle = sev->handle;
1502 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1503}
1504
1505int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1506{
1507 struct kvm_sev_cmd sev_cmd;
1508 int r;
1509
1510 if (!sev_enabled)
1511 return -ENOTTY;
1512
1513 if (!argp)
1514 return 0;
1515
1516 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1517 return -EFAULT;
1518
1519 mutex_lock(&kvm->lock);
1520
1521
1522 if (is_mirroring_enc_context(kvm)) {
1523 r = -EINVAL;
1524 goto out;
1525 }
1526
1527 switch (sev_cmd.id) {
1528 case KVM_SEV_ES_INIT:
1529 if (!sev_es_enabled) {
1530 r = -ENOTTY;
1531 goto out;
1532 }
1533 fallthrough;
1534 case KVM_SEV_INIT:
1535 r = sev_guest_init(kvm, &sev_cmd);
1536 break;
1537 case KVM_SEV_LAUNCH_START:
1538 r = sev_launch_start(kvm, &sev_cmd);
1539 break;
1540 case KVM_SEV_LAUNCH_UPDATE_DATA:
1541 r = sev_launch_update_data(kvm, &sev_cmd);
1542 break;
1543 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1544 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1545 break;
1546 case KVM_SEV_LAUNCH_MEASURE:
1547 r = sev_launch_measure(kvm, &sev_cmd);
1548 break;
1549 case KVM_SEV_LAUNCH_FINISH:
1550 r = sev_launch_finish(kvm, &sev_cmd);
1551 break;
1552 case KVM_SEV_GUEST_STATUS:
1553 r = sev_guest_status(kvm, &sev_cmd);
1554 break;
1555 case KVM_SEV_DBG_DECRYPT:
1556 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1557 break;
1558 case KVM_SEV_DBG_ENCRYPT:
1559 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1560 break;
1561 case KVM_SEV_LAUNCH_SECRET:
1562 r = sev_launch_secret(kvm, &sev_cmd);
1563 break;
1564 case KVM_SEV_GET_ATTESTATION_REPORT:
1565 r = sev_get_attestation_report(kvm, &sev_cmd);
1566 break;
1567 case KVM_SEV_SEND_START:
1568 r = sev_send_start(kvm, &sev_cmd);
1569 break;
1570 case KVM_SEV_SEND_UPDATE_DATA:
1571 r = sev_send_update_data(kvm, &sev_cmd);
1572 break;
1573 case KVM_SEV_SEND_FINISH:
1574 r = sev_send_finish(kvm, &sev_cmd);
1575 break;
1576 case KVM_SEV_SEND_CANCEL:
1577 r = sev_send_cancel(kvm, &sev_cmd);
1578 break;
1579 case KVM_SEV_RECEIVE_START:
1580 r = sev_receive_start(kvm, &sev_cmd);
1581 break;
1582 case KVM_SEV_RECEIVE_UPDATE_DATA:
1583 r = sev_receive_update_data(kvm, &sev_cmd);
1584 break;
1585 case KVM_SEV_RECEIVE_FINISH:
1586 r = sev_receive_finish(kvm, &sev_cmd);
1587 break;
1588 default:
1589 r = -EINVAL;
1590 goto out;
1591 }
1592
1593 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1594 r = -EFAULT;
1595
1596out:
1597 mutex_unlock(&kvm->lock);
1598 return r;
1599}
1600
1601int svm_register_enc_region(struct kvm *kvm,
1602 struct kvm_enc_region *range)
1603{
1604 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1605 struct enc_region *region;
1606 int ret = 0;
1607
1608 if (!sev_guest(kvm))
1609 return -ENOTTY;
1610
1611
1612 if (is_mirroring_enc_context(kvm))
1613 return -EINVAL;
1614
1615 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1616 return -EINVAL;
1617
1618 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1619 if (!region)
1620 return -ENOMEM;
1621
1622 mutex_lock(&kvm->lock);
1623 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1624 if (IS_ERR(region->pages)) {
1625 ret = PTR_ERR(region->pages);
1626 mutex_unlock(&kvm->lock);
1627 goto e_free;
1628 }
1629
1630 region->uaddr = range->addr;
1631 region->size = range->size;
1632
1633 list_add_tail(®ion->list, &sev->regions_list);
1634 mutex_unlock(&kvm->lock);
1635
1636
1637
1638
1639
1640
1641
1642 sev_clflush_pages(region->pages, region->npages);
1643
1644 return ret;
1645
1646e_free:
1647 kfree(region);
1648 return ret;
1649}
1650
1651static struct enc_region *
1652find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1653{
1654 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1655 struct list_head *head = &sev->regions_list;
1656 struct enc_region *i;
1657
1658 list_for_each_entry(i, head, list) {
1659 if (i->uaddr == range->addr &&
1660 i->size == range->size)
1661 return i;
1662 }
1663
1664 return NULL;
1665}
1666
1667static void __unregister_enc_region_locked(struct kvm *kvm,
1668 struct enc_region *region)
1669{
1670 sev_unpin_memory(kvm, region->pages, region->npages);
1671 list_del(®ion->list);
1672 kfree(region);
1673}
1674
1675int svm_unregister_enc_region(struct kvm *kvm,
1676 struct kvm_enc_region *range)
1677{
1678 struct enc_region *region;
1679 int ret;
1680
1681
1682 if (is_mirroring_enc_context(kvm))
1683 return -EINVAL;
1684
1685 mutex_lock(&kvm->lock);
1686
1687 if (!sev_guest(kvm)) {
1688 ret = -ENOTTY;
1689 goto failed;
1690 }
1691
1692 region = find_enc_region(kvm, range);
1693 if (!region) {
1694 ret = -EINVAL;
1695 goto failed;
1696 }
1697
1698
1699
1700
1701
1702
1703 wbinvd_on_all_cpus();
1704
1705 __unregister_enc_region_locked(kvm, region);
1706
1707 mutex_unlock(&kvm->lock);
1708 return 0;
1709
1710failed:
1711 mutex_unlock(&kvm->lock);
1712 return ret;
1713}
1714
1715int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1716{
1717 struct file *source_kvm_file;
1718 struct kvm *source_kvm;
1719 struct kvm_sev_info *mirror_sev;
1720 unsigned int asid;
1721 int ret;
1722
1723 source_kvm_file = fget(source_fd);
1724 if (!file_is_kvm(source_kvm_file)) {
1725 ret = -EBADF;
1726 goto e_source_put;
1727 }
1728
1729 source_kvm = source_kvm_file->private_data;
1730 mutex_lock(&source_kvm->lock);
1731
1732 if (!sev_guest(source_kvm)) {
1733 ret = -EINVAL;
1734 goto e_source_unlock;
1735 }
1736
1737
1738 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1739 ret = -EINVAL;
1740 goto e_source_unlock;
1741 }
1742
1743 asid = to_kvm_svm(source_kvm)->sev_info.asid;
1744
1745
1746
1747
1748
1749 kvm_get_kvm(source_kvm);
1750
1751 fput(source_kvm_file);
1752 mutex_unlock(&source_kvm->lock);
1753 mutex_lock(&kvm->lock);
1754
1755 if (sev_guest(kvm)) {
1756 ret = -EINVAL;
1757 goto e_mirror_unlock;
1758 }
1759
1760
1761 mirror_sev = &to_kvm_svm(kvm)->sev_info;
1762 mirror_sev->enc_context_owner = source_kvm;
1763 mirror_sev->asid = asid;
1764 mirror_sev->active = true;
1765
1766 mutex_unlock(&kvm->lock);
1767 return 0;
1768
1769e_mirror_unlock:
1770 mutex_unlock(&kvm->lock);
1771 kvm_put_kvm(source_kvm);
1772 return ret;
1773e_source_unlock:
1774 mutex_unlock(&source_kvm->lock);
1775e_source_put:
1776 if (source_kvm_file)
1777 fput(source_kvm_file);
1778 return ret;
1779}
1780
1781void sev_vm_destroy(struct kvm *kvm)
1782{
1783 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1784 struct list_head *head = &sev->regions_list;
1785 struct list_head *pos, *q;
1786
1787 if (!sev_guest(kvm))
1788 return;
1789
1790
1791 if (is_mirroring_enc_context(kvm)) {
1792 kvm_put_kvm(sev->enc_context_owner);
1793 return;
1794 }
1795
1796 mutex_lock(&kvm->lock);
1797
1798
1799
1800
1801
1802
1803 wbinvd_on_all_cpus();
1804
1805
1806
1807
1808
1809 if (!list_empty(head)) {
1810 list_for_each_safe(pos, q, head) {
1811 __unregister_enc_region_locked(kvm,
1812 list_entry(pos, struct enc_region, list));
1813 cond_resched();
1814 }
1815 }
1816
1817 mutex_unlock(&kvm->lock);
1818
1819 sev_unbind_asid(kvm, sev->handle);
1820 sev_asid_free(sev);
1821}
1822
1823void __init sev_set_cpu_caps(void)
1824{
1825 if (!sev_enabled)
1826 kvm_cpu_cap_clear(X86_FEATURE_SEV);
1827 if (!sev_es_enabled)
1828 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
1829}
1830
1831void __init sev_hardware_setup(void)
1832{
1833#ifdef CONFIG_KVM_AMD_SEV
1834 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
1835 bool sev_es_supported = false;
1836 bool sev_supported = false;
1837
1838 if (!sev_enabled || !npt_enabled)
1839 goto out;
1840
1841
1842 if (!boot_cpu_has(X86_FEATURE_SEV))
1843 goto out;
1844
1845
1846 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1847
1848
1849 sev_enc_bit = ebx & 0x3f;
1850
1851
1852 max_sev_asid = ecx;
1853 if (!max_sev_asid)
1854 goto out;
1855
1856
1857 min_sev_asid = edx;
1858 sev_me_mask = 1UL << (ebx & 0x3f);
1859
1860
1861
1862
1863
1864
1865 nr_asids = max_sev_asid + 1;
1866 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
1867 if (!sev_asid_bitmap)
1868 goto out;
1869
1870 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
1871 if (!sev_reclaim_asid_bitmap) {
1872 bitmap_free(sev_asid_bitmap);
1873 sev_asid_bitmap = NULL;
1874 goto out;
1875 }
1876
1877 sev_asid_count = max_sev_asid - min_sev_asid + 1;
1878 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
1879 goto out;
1880
1881 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
1882 sev_supported = true;
1883
1884
1885 if (!sev_es_enabled)
1886 goto out;
1887
1888
1889 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1890 goto out;
1891
1892
1893 if (min_sev_asid == 1)
1894 goto out;
1895
1896 sev_es_asid_count = min_sev_asid - 1;
1897 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
1898 goto out;
1899
1900 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
1901 sev_es_supported = true;
1902
1903out:
1904 sev_enabled = sev_supported;
1905 sev_es_enabled = sev_es_supported;
1906#endif
1907}
1908
1909void sev_hardware_teardown(void)
1910{
1911 if (!sev_enabled)
1912 return;
1913
1914
1915 sev_flush_asids(1, max_sev_asid);
1916
1917 bitmap_free(sev_asid_bitmap);
1918 bitmap_free(sev_reclaim_asid_bitmap);
1919
1920 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
1921 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
1922}
1923
1924int sev_cpu_init(struct svm_cpu_data *sd)
1925{
1926 if (!sev_enabled)
1927 return 0;
1928
1929 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
1930 if (!sd->sev_vmcbs)
1931 return -ENOMEM;
1932
1933 return 0;
1934}
1935
1936
1937
1938
1939
1940static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1941 unsigned long len)
1942{
1943
1944
1945
1946
1947 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1948 return;
1949
1950
1951
1952
1953
1954 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1955 struct kvm_sev_info *sev;
1956 unsigned long va_start;
1957 u64 start, stop;
1958
1959
1960 va_start = (unsigned long)va;
1961 start = (u64)va_start & PAGE_MASK;
1962 stop = PAGE_ALIGN((u64)va_start + len);
1963
1964 if (start < stop) {
1965 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1966
1967 while (start < stop) {
1968 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1969 start | sev->asid);
1970
1971 start += PAGE_SIZE;
1972 }
1973
1974 return;
1975 }
1976
1977 WARN(1, "Address overflow, using WBINVD\n");
1978 }
1979
1980
1981
1982
1983
1984 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1985 wbinvd_on_all_cpus();
1986}
1987
1988void sev_free_vcpu(struct kvm_vcpu *vcpu)
1989{
1990 struct vcpu_svm *svm;
1991
1992 if (!sev_es_guest(vcpu->kvm))
1993 return;
1994
1995 svm = to_svm(vcpu);
1996
1997 if (vcpu->arch.guest_state_protected)
1998 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1999 __free_page(virt_to_page(svm->vmsa));
2000
2001 if (svm->ghcb_sa_free)
2002 kfree(svm->ghcb_sa);
2003}
2004
2005static void dump_ghcb(struct vcpu_svm *svm)
2006{
2007 struct ghcb *ghcb = svm->ghcb;
2008 unsigned int nbits;
2009
2010
2011 if (!dump_invalid_vmcb) {
2012 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2013 return;
2014 }
2015
2016 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2017
2018 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2019 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2020 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2021 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2022 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2023 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2024 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2025 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2026 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2027 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2028}
2029
2030static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2031{
2032 struct kvm_vcpu *vcpu = &svm->vcpu;
2033 struct ghcb *ghcb = svm->ghcb;
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2044 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2045 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2046 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2047}
2048
2049static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2050{
2051 struct vmcb_control_area *control = &svm->vmcb->control;
2052 struct kvm_vcpu *vcpu = &svm->vcpu;
2053 struct ghcb *ghcb = svm->ghcb;
2054 u64 exit_code;
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2069
2070 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2071 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2072 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2073 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2074 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2075
2076 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2077
2078 if (ghcb_xcr0_is_valid(ghcb)) {
2079 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2080 kvm_update_cpuid_runtime(vcpu);
2081 }
2082
2083
2084 exit_code = ghcb_get_sw_exit_code(ghcb);
2085 control->exit_code = lower_32_bits(exit_code);
2086 control->exit_code_hi = upper_32_bits(exit_code);
2087 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2088 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2089
2090
2091 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2092}
2093
2094static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2095{
2096 struct kvm_vcpu *vcpu;
2097 struct ghcb *ghcb;
2098 u64 exit_code = 0;
2099
2100 ghcb = svm->ghcb;
2101
2102
2103 if (ghcb->ghcb_usage)
2104 goto vmgexit_err;
2105
2106
2107
2108
2109
2110 exit_code = ghcb_get_sw_exit_code(ghcb);
2111
2112 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2113 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2114 !ghcb_sw_exit_info_2_is_valid(ghcb))
2115 goto vmgexit_err;
2116
2117 switch (ghcb_get_sw_exit_code(ghcb)) {
2118 case SVM_EXIT_READ_DR7:
2119 break;
2120 case SVM_EXIT_WRITE_DR7:
2121 if (!ghcb_rax_is_valid(ghcb))
2122 goto vmgexit_err;
2123 break;
2124 case SVM_EXIT_RDTSC:
2125 break;
2126 case SVM_EXIT_RDPMC:
2127 if (!ghcb_rcx_is_valid(ghcb))
2128 goto vmgexit_err;
2129 break;
2130 case SVM_EXIT_CPUID:
2131 if (!ghcb_rax_is_valid(ghcb) ||
2132 !ghcb_rcx_is_valid(ghcb))
2133 goto vmgexit_err;
2134 if (ghcb_get_rax(ghcb) == 0xd)
2135 if (!ghcb_xcr0_is_valid(ghcb))
2136 goto vmgexit_err;
2137 break;
2138 case SVM_EXIT_INVD:
2139 break;
2140 case SVM_EXIT_IOIO:
2141 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2142 if (!ghcb_sw_scratch_is_valid(ghcb))
2143 goto vmgexit_err;
2144 } else {
2145 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2146 if (!ghcb_rax_is_valid(ghcb))
2147 goto vmgexit_err;
2148 }
2149 break;
2150 case SVM_EXIT_MSR:
2151 if (!ghcb_rcx_is_valid(ghcb))
2152 goto vmgexit_err;
2153 if (ghcb_get_sw_exit_info_1(ghcb)) {
2154 if (!ghcb_rax_is_valid(ghcb) ||
2155 !ghcb_rdx_is_valid(ghcb))
2156 goto vmgexit_err;
2157 }
2158 break;
2159 case SVM_EXIT_VMMCALL:
2160 if (!ghcb_rax_is_valid(ghcb) ||
2161 !ghcb_cpl_is_valid(ghcb))
2162 goto vmgexit_err;
2163 break;
2164 case SVM_EXIT_RDTSCP:
2165 break;
2166 case SVM_EXIT_WBINVD:
2167 break;
2168 case SVM_EXIT_MONITOR:
2169 if (!ghcb_rax_is_valid(ghcb) ||
2170 !ghcb_rcx_is_valid(ghcb) ||
2171 !ghcb_rdx_is_valid(ghcb))
2172 goto vmgexit_err;
2173 break;
2174 case SVM_EXIT_MWAIT:
2175 if (!ghcb_rax_is_valid(ghcb) ||
2176 !ghcb_rcx_is_valid(ghcb))
2177 goto vmgexit_err;
2178 break;
2179 case SVM_VMGEXIT_MMIO_READ:
2180 case SVM_VMGEXIT_MMIO_WRITE:
2181 if (!ghcb_sw_scratch_is_valid(ghcb))
2182 goto vmgexit_err;
2183 break;
2184 case SVM_VMGEXIT_NMI_COMPLETE:
2185 case SVM_VMGEXIT_AP_HLT_LOOP:
2186 case SVM_VMGEXIT_AP_JUMP_TABLE:
2187 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2188 break;
2189 default:
2190 goto vmgexit_err;
2191 }
2192
2193 return 0;
2194
2195vmgexit_err:
2196 vcpu = &svm->vcpu;
2197
2198 if (ghcb->ghcb_usage) {
2199 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2200 ghcb->ghcb_usage);
2201 } else {
2202 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2203 exit_code);
2204 dump_ghcb(svm);
2205 }
2206
2207 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2208 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2209 vcpu->run->internal.ndata = 2;
2210 vcpu->run->internal.data[0] = exit_code;
2211 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2212
2213 return -EINVAL;
2214}
2215
2216void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2217{
2218 if (!svm->ghcb)
2219 return;
2220
2221 if (svm->ghcb_sa_free) {
2222
2223
2224
2225
2226
2227 if (svm->ghcb_sa_sync) {
2228 kvm_write_guest(svm->vcpu.kvm,
2229 ghcb_get_sw_scratch(svm->ghcb),
2230 svm->ghcb_sa, svm->ghcb_sa_len);
2231 svm->ghcb_sa_sync = false;
2232 }
2233
2234 kfree(svm->ghcb_sa);
2235 svm->ghcb_sa = NULL;
2236 svm->ghcb_sa_free = false;
2237 }
2238
2239 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
2240
2241 sev_es_sync_to_ghcb(svm);
2242
2243 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
2244 svm->ghcb = NULL;
2245}
2246
2247void pre_sev_run(struct vcpu_svm *svm, int cpu)
2248{
2249 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2250 int asid = sev_get_asid(svm->vcpu.kvm);
2251
2252
2253 svm->asid = asid;
2254
2255
2256
2257
2258
2259
2260
2261 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2262 svm->vcpu.arch.last_vmentry_cpu == cpu)
2263 return;
2264
2265 sd->sev_vmcbs[asid] = svm->vmcb;
2266 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2267 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2268}
2269
2270#define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2271static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2272{
2273 struct vmcb_control_area *control = &svm->vmcb->control;
2274 struct ghcb *ghcb = svm->ghcb;
2275 u64 ghcb_scratch_beg, ghcb_scratch_end;
2276 u64 scratch_gpa_beg, scratch_gpa_end;
2277 void *scratch_va;
2278
2279 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2280 if (!scratch_gpa_beg) {
2281 pr_err("vmgexit: scratch gpa not provided\n");
2282 return false;
2283 }
2284
2285 scratch_gpa_end = scratch_gpa_beg + len;
2286 if (scratch_gpa_end < scratch_gpa_beg) {
2287 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2288 len, scratch_gpa_beg);
2289 return false;
2290 }
2291
2292 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2293
2294 ghcb_scratch_beg = control->ghcb_gpa +
2295 offsetof(struct ghcb, shared_buffer);
2296 ghcb_scratch_end = control->ghcb_gpa +
2297 offsetof(struct ghcb, reserved_1);
2298
2299
2300
2301
2302
2303 if (scratch_gpa_beg < ghcb_scratch_beg ||
2304 scratch_gpa_end > ghcb_scratch_end) {
2305 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2306 scratch_gpa_beg, scratch_gpa_end);
2307 return false;
2308 }
2309
2310 scratch_va = (void *)svm->ghcb;
2311 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2312 } else {
2313
2314
2315
2316
2317 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2318 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2319 len, GHCB_SCRATCH_AREA_LIMIT);
2320 return false;
2321 }
2322 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
2323 if (!scratch_va)
2324 return false;
2325
2326 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2327
2328 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2329
2330 kfree(scratch_va);
2331 return false;
2332 }
2333
2334
2335
2336
2337
2338
2339
2340 svm->ghcb_sa_sync = sync;
2341 svm->ghcb_sa_free = true;
2342 }
2343
2344 svm->ghcb_sa = scratch_va;
2345 svm->ghcb_sa_len = len;
2346
2347 return true;
2348}
2349
2350static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2351 unsigned int pos)
2352{
2353 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2354 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2355}
2356
2357static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2358{
2359 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2360}
2361
2362static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2363{
2364 svm->vmcb->control.ghcb_gpa = value;
2365}
2366
2367static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2368{
2369 struct vmcb_control_area *control = &svm->vmcb->control;
2370 struct kvm_vcpu *vcpu = &svm->vcpu;
2371 u64 ghcb_info;
2372 int ret = 1;
2373
2374 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2375
2376 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2377 control->ghcb_gpa);
2378
2379 switch (ghcb_info) {
2380 case GHCB_MSR_SEV_INFO_REQ:
2381 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2382 GHCB_VERSION_MIN,
2383 sev_enc_bit));
2384 break;
2385 case GHCB_MSR_CPUID_REQ: {
2386 u64 cpuid_fn, cpuid_reg, cpuid_value;
2387
2388 cpuid_fn = get_ghcb_msr_bits(svm,
2389 GHCB_MSR_CPUID_FUNC_MASK,
2390 GHCB_MSR_CPUID_FUNC_POS);
2391
2392
2393 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2394 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2395
2396 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2397 if (!ret) {
2398 ret = -EINVAL;
2399 break;
2400 }
2401
2402 cpuid_reg = get_ghcb_msr_bits(svm,
2403 GHCB_MSR_CPUID_REG_MASK,
2404 GHCB_MSR_CPUID_REG_POS);
2405 if (cpuid_reg == 0)
2406 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2407 else if (cpuid_reg == 1)
2408 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2409 else if (cpuid_reg == 2)
2410 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2411 else
2412 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2413
2414 set_ghcb_msr_bits(svm, cpuid_value,
2415 GHCB_MSR_CPUID_VALUE_MASK,
2416 GHCB_MSR_CPUID_VALUE_POS);
2417
2418 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2419 GHCB_MSR_INFO_MASK,
2420 GHCB_MSR_INFO_POS);
2421 break;
2422 }
2423 case GHCB_MSR_TERM_REQ: {
2424 u64 reason_set, reason_code;
2425
2426 reason_set = get_ghcb_msr_bits(svm,
2427 GHCB_MSR_TERM_REASON_SET_MASK,
2428 GHCB_MSR_TERM_REASON_SET_POS);
2429 reason_code = get_ghcb_msr_bits(svm,
2430 GHCB_MSR_TERM_REASON_MASK,
2431 GHCB_MSR_TERM_REASON_POS);
2432 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2433 reason_set, reason_code);
2434 fallthrough;
2435 }
2436 default:
2437 ret = -EINVAL;
2438 }
2439
2440 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2441 control->ghcb_gpa, ret);
2442
2443 return ret;
2444}
2445
2446int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2447{
2448 struct vcpu_svm *svm = to_svm(vcpu);
2449 struct vmcb_control_area *control = &svm->vmcb->control;
2450 u64 ghcb_gpa, exit_code;
2451 struct ghcb *ghcb;
2452 int ret;
2453
2454
2455 ghcb_gpa = control->ghcb_gpa;
2456 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2457 return sev_handle_vmgexit_msr_protocol(svm);
2458
2459 if (!ghcb_gpa) {
2460 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2461 return -EINVAL;
2462 }
2463
2464 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
2465
2466 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2467 ghcb_gpa);
2468 return -EINVAL;
2469 }
2470
2471 svm->ghcb = svm->ghcb_map.hva;
2472 ghcb = svm->ghcb_map.hva;
2473
2474 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
2475
2476 exit_code = ghcb_get_sw_exit_code(ghcb);
2477
2478 ret = sev_es_validate_vmgexit(svm);
2479 if (ret)
2480 return ret;
2481
2482 sev_es_sync_from_ghcb(svm);
2483 ghcb_set_sw_exit_info_1(ghcb, 0);
2484 ghcb_set_sw_exit_info_2(ghcb, 0);
2485
2486 ret = -EINVAL;
2487 switch (exit_code) {
2488 case SVM_VMGEXIT_MMIO_READ:
2489 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2490 break;
2491
2492 ret = kvm_sev_es_mmio_read(vcpu,
2493 control->exit_info_1,
2494 control->exit_info_2,
2495 svm->ghcb_sa);
2496 break;
2497 case SVM_VMGEXIT_MMIO_WRITE:
2498 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2499 break;
2500
2501 ret = kvm_sev_es_mmio_write(vcpu,
2502 control->exit_info_1,
2503 control->exit_info_2,
2504 svm->ghcb_sa);
2505 break;
2506 case SVM_VMGEXIT_NMI_COMPLETE:
2507 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
2508 break;
2509 case SVM_VMGEXIT_AP_HLT_LOOP:
2510 ret = kvm_emulate_ap_reset_hold(vcpu);
2511 break;
2512 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2513 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2514
2515 switch (control->exit_info_1) {
2516 case 0:
2517
2518 sev->ap_jump_table = control->exit_info_2;
2519 break;
2520 case 1:
2521
2522 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2523 break;
2524 default:
2525 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2526 control->exit_info_1);
2527 ghcb_set_sw_exit_info_1(ghcb, 1);
2528 ghcb_set_sw_exit_info_2(ghcb,
2529 X86_TRAP_UD |
2530 SVM_EVTINJ_TYPE_EXEPT |
2531 SVM_EVTINJ_VALID);
2532 }
2533
2534 ret = 1;
2535 break;
2536 }
2537 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2538 vcpu_unimpl(vcpu,
2539 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2540 control->exit_info_1, control->exit_info_2);
2541 break;
2542 default:
2543 ret = svm_invoke_exit_handler(vcpu, exit_code);
2544 }
2545
2546 return ret;
2547}
2548
2549int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2550{
2551 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2552 return -EINVAL;
2553
2554 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2555 svm->ghcb_sa, svm->ghcb_sa_len, in);
2556}
2557
2558void sev_es_init_vmcb(struct vcpu_svm *svm)
2559{
2560 struct kvm_vcpu *vcpu = &svm->vcpu;
2561
2562 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2563 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2564
2565
2566
2567
2568
2569
2570 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2571
2572
2573 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2574 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2575 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2576 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2577 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2578 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2579
2580 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2581
2582
2583 svm_set_intercept(svm, TRAP_EFER_WRITE);
2584 svm_set_intercept(svm, TRAP_CR0_WRITE);
2585 svm_set_intercept(svm, TRAP_CR4_WRITE);
2586 svm_set_intercept(svm, TRAP_CR8_WRITE);
2587
2588
2589 clr_exception_intercept(svm, GP_VECTOR);
2590
2591
2592 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2593
2594
2595 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2596 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2597 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2598 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2599 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2600 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2601}
2602
2603void sev_es_create_vcpu(struct vcpu_svm *svm)
2604{
2605
2606
2607
2608
2609 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2610 GHCB_VERSION_MIN,
2611 sev_enc_bit));
2612}
2613
2614void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
2615{
2616 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2617 struct vmcb_save_area *hostsa;
2618
2619
2620
2621
2622
2623
2624 vmsave(__sme_page_pa(sd->save_area));
2625
2626
2627 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2628 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2629
2630
2631 hostsa->pkru = read_pkru();
2632
2633
2634 hostsa->xss = host_xss;
2635}
2636
2637void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2638{
2639 struct vcpu_svm *svm = to_svm(vcpu);
2640
2641
2642 if (!svm->received_first_sipi) {
2643 svm->received_first_sipi = true;
2644 return;
2645 }
2646
2647
2648
2649
2650
2651
2652 if (!svm->ghcb)
2653 return;
2654
2655 ghcb_set_sw_exit_info_2(svm->ghcb, 1);
2656}
2657