linux/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * xen_vmcall_test
   4 *
   5 * Copyright © 2020 Amazon.com, Inc. or its affiliates.
   6 *
   7 * Userspace hypercall testing
   8 */
   9
  10#include "test_util.h"
  11#include "kvm_util.h"
  12#include "processor.h"
  13
  14#define VCPU_ID         5
  15
  16#define HCALL_REGION_GPA        0xc0000000ULL
  17#define HCALL_REGION_SLOT       10
  18#define PAGE_SIZE               4096
  19
  20static struct kvm_vm *vm;
  21
  22#define INPUTVALUE 17
  23#define ARGVALUE(x) (0xdeadbeef5a5a0000UL + x)
  24#define RETVALUE 0xcafef00dfbfbffffUL
  25
  26#define XEN_HYPERCALL_MSR       0x40000200
  27#define HV_GUEST_OS_ID_MSR      0x40000000
  28#define HV_HYPERCALL_MSR        0x40000001
  29
  30#define HVCALL_SIGNAL_EVENT             0x005d
  31#define HV_STATUS_INVALID_ALIGNMENT     4
  32
  33static void guest_code(void)
  34{
  35        unsigned long rax = INPUTVALUE;
  36        unsigned long rdi = ARGVALUE(1);
  37        unsigned long rsi = ARGVALUE(2);
  38        unsigned long rdx = ARGVALUE(3);
  39        unsigned long rcx;
  40        register unsigned long r10 __asm__("r10") = ARGVALUE(4);
  41        register unsigned long r8 __asm__("r8") = ARGVALUE(5);
  42        register unsigned long r9 __asm__("r9") = ARGVALUE(6);
  43
  44        /* First a direct invocation of 'vmcall' */
  45        __asm__ __volatile__("vmcall" :
  46                             "=a"(rax) :
  47                             "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
  48                             "r"(r10), "r"(r8), "r"(r9));
  49        GUEST_ASSERT(rax == RETVALUE);
  50
  51        /* Fill in the Xen hypercall page */
  52        __asm__ __volatile__("wrmsr" : : "c" (XEN_HYPERCALL_MSR),
  53                             "a" (HCALL_REGION_GPA & 0xffffffff),
  54                             "d" (HCALL_REGION_GPA >> 32));
  55
  56        /* Set Hyper-V Guest OS ID */
  57        __asm__ __volatile__("wrmsr" : : "c" (HV_GUEST_OS_ID_MSR),
  58                             "a" (0x5a), "d" (0));
  59
  60        /* Hyper-V hypercall page */
  61        u64 msrval = HCALL_REGION_GPA + PAGE_SIZE + 1;
  62        __asm__ __volatile__("wrmsr" : : "c" (HV_HYPERCALL_MSR),
  63                             "a" (msrval & 0xffffffff),
  64                             "d" (msrval >> 32));
  65
  66        /* Invoke a Xen hypercall */
  67        __asm__ __volatile__("call *%1" : "=a"(rax) :
  68                             "r"(HCALL_REGION_GPA + INPUTVALUE * 32),
  69                             "a"(rax), "D"(rdi), "S"(rsi), "d"(rdx),
  70                             "r"(r10), "r"(r8), "r"(r9));
  71        GUEST_ASSERT(rax == RETVALUE);
  72
  73        /* Invoke a Hyper-V hypercall */
  74        rax = 0;
  75        rcx = HVCALL_SIGNAL_EVENT;      /* code */
  76        rdx = 0x5a5a5a5a;               /* ingpa (badly aligned) */
  77        __asm__ __volatile__("call *%1" : "=a"(rax) :
  78                             "r"(HCALL_REGION_GPA + PAGE_SIZE),
  79                             "a"(rax), "c"(rcx), "d"(rdx),
  80                             "r"(r8));
  81        GUEST_ASSERT(rax == HV_STATUS_INVALID_ALIGNMENT);
  82
  83        GUEST_DONE();
  84}
  85
  86int main(int argc, char *argv[])
  87{
  88        if (!(kvm_check_cap(KVM_CAP_XEN_HVM) &
  89              KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) ) {
  90                print_skip("KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL not available");
  91                exit(KSFT_SKIP);
  92        }
  93
  94        vm = vm_create_default(VCPU_ID, 0, (void *) guest_code);
  95        vcpu_set_hv_cpuid(vm, VCPU_ID);
  96
  97        struct kvm_xen_hvm_config hvmc = {
  98                .flags = KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL,
  99                .msr = XEN_HYPERCALL_MSR,
 100        };
 101        vm_ioctl(vm, KVM_XEN_HVM_CONFIG, &hvmc);
 102
 103        /* Map a region for the hypercall pages */
 104        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 105                                    HCALL_REGION_GPA, HCALL_REGION_SLOT, 2, 0);
 106        virt_map(vm, HCALL_REGION_GPA, HCALL_REGION_GPA, 2);
 107
 108        for (;;) {
 109                volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 110                struct ucall uc;
 111
 112                vcpu_run(vm, VCPU_ID);
 113
 114                if (run->exit_reason == KVM_EXIT_XEN) {
 115                        ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
 116                        ASSERT_EQ(run->xen.u.hcall.cpl, 0);
 117                        ASSERT_EQ(run->xen.u.hcall.longmode, 1);
 118                        ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
 119                        ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
 120                        ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
 121                        ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
 122                        ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
 123                        ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
 124                        ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
 125                        run->xen.u.hcall.result = RETVALUE;
 126                        continue;
 127                }
 128
 129                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 130                            "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
 131                            run->exit_reason,
 132                            exit_reason_str(run->exit_reason));
 133
 134                switch (get_ucall(vm, VCPU_ID, &uc)) {
 135                case UCALL_ABORT:
 136                        TEST_FAIL("%s", (const char *)uc.args[0]);
 137                        /* NOT REACHED */
 138                case UCALL_SYNC:
 139                        break;
 140                case UCALL_DONE:
 141                        goto done;
 142                default:
 143                        TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
 144                }
 145        }
 146done:
 147        kvm_vm_free(vm);
 148        return 0;
 149}
 150
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.