linux/arch/x86/kvm/vmx/vmx_ops.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __KVM_X86_VMX_INSN_H
   3#define __KVM_X86_VMX_INSN_H
   4
   5#include <linux/nospec.h>
   6
   7#include <asm/kvm_host.h>
   8#include <asm/vmx.h>
   9
  10#include "evmcs.h"
  11#include "vmcs.h"
  12
  13#define __ex(x) __kvm_handle_fault_on_reboot(x)
  14
  15asmlinkage void vmread_error(unsigned long field, bool fault);
  16__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
  17                                                         bool fault);
  18void vmwrite_error(unsigned long field, unsigned long value);
  19void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
  20void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
  21void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
  22void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
  23
  24static __always_inline void vmcs_check16(unsigned long field)
  25{
  26        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  27                         "16-bit accessor invalid for 64-bit field");
  28        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  29                         "16-bit accessor invalid for 64-bit high field");
  30        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  31                         "16-bit accessor invalid for 32-bit high field");
  32        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  33                         "16-bit accessor invalid for natural width field");
  34}
  35
  36static __always_inline void vmcs_check32(unsigned long field)
  37{
  38        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  39                         "32-bit accessor invalid for 16-bit field");
  40        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  41                         "32-bit accessor invalid for 64-bit field");
  42        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  43                         "32-bit accessor invalid for 64-bit high field");
  44        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  45                         "32-bit accessor invalid for natural width field");
  46}
  47
  48static __always_inline void vmcs_check64(unsigned long field)
  49{
  50        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  51                         "64-bit accessor invalid for 16-bit field");
  52        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  53                         "64-bit accessor invalid for 64-bit high field");
  54        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  55                         "64-bit accessor invalid for 32-bit field");
  56        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
  57                         "64-bit accessor invalid for natural width field");
  58}
  59
  60static __always_inline void vmcs_checkl(unsigned long field)
  61{
  62        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
  63                         "Natural width accessor invalid for 16-bit field");
  64        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
  65                         "Natural width accessor invalid for 64-bit field");
  66        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
  67                         "Natural width accessor invalid for 64-bit high field");
  68        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
  69                         "Natural width accessor invalid for 32-bit field");
  70}
  71
  72static __always_inline unsigned long __vmcs_readl(unsigned long field)
  73{
  74        unsigned long value;
  75
  76        asm volatile("1: vmread %2, %1\n\t"
  77                     ".byte 0x3e\n\t" /* branch taken hint */
  78                     "ja 3f\n\t"
  79
  80                     /*
  81                      * VMREAD failed.  Push '0' for @fault, push the failing
  82                      * @field, and bounce through the trampoline to preserve
  83                      * volatile registers.
  84                      */
  85                     "push $0\n\t"
  86                     "push %2\n\t"
  87                     "2:call vmread_error_trampoline\n\t"
  88
  89                     /*
  90                      * Unwind the stack.  Note, the trampoline zeros out the
  91                      * memory for @fault so that the result is '0' on error.
  92                      */
  93                     "pop %2\n\t"
  94                     "pop %1\n\t"
  95                     "3:\n\t"
  96
  97                     /* VMREAD faulted.  As above, except push '1' for @fault. */
  98                     ".pushsection .fixup, \"ax\"\n\t"
  99                     "4: push $1\n\t"
 100                     "push %2\n\t"
 101                     "jmp 2b\n\t"
 102                     ".popsection\n\t"
 103                     _ASM_EXTABLE(1b, 4b)
 104                     : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
 105        return value;
 106}
 107
 108static __always_inline u16 vmcs_read16(unsigned long field)
 109{
 110        vmcs_check16(field);
 111        if (static_branch_unlikely(&enable_evmcs))
 112                return evmcs_read16(field);
 113        return __vmcs_readl(field);
 114}
 115
 116static __always_inline u32 vmcs_read32(unsigned long field)
 117{
 118        vmcs_check32(field);
 119        if (static_branch_unlikely(&enable_evmcs))
 120                return evmcs_read32(field);
 121        return __vmcs_readl(field);
 122}
 123
 124static __always_inline u64 vmcs_read64(unsigned long field)
 125{
 126        vmcs_check64(field);
 127        if (static_branch_unlikely(&enable_evmcs))
 128                return evmcs_read64(field);
 129#ifdef CONFIG_X86_64
 130        return __vmcs_readl(field);
 131#else
 132        return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
 133#endif
 134}
 135
 136static __always_inline unsigned long vmcs_readl(unsigned long field)
 137{
 138        vmcs_checkl(field);
 139        if (static_branch_unlikely(&enable_evmcs))
 140                return evmcs_read64(field);
 141        return __vmcs_readl(field);
 142}
 143
 144#define vmx_asm1(insn, op1, error_args...)                              \
 145do {                                                                    \
 146        asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
 147                          ".byte 0x2e\n\t" /* branch not taken hint */  \
 148                          "jna %l[error]\n\t"                           \
 149                          _ASM_EXTABLE(1b, %l[fault])                   \
 150                          : : op1 : "cc" : error, fault);               \
 151        return;                                                         \
 152error:                                                                  \
 153        instrumentation_begin();                                        \
 154        insn##_error(error_args);                                       \
 155        instrumentation_end();                                          \
 156        return;                                                         \
 157fault:                                                                  \
 158        kvm_spurious_fault();                                           \
 159} while (0)
 160
 161#define vmx_asm2(insn, op1, op2, error_args...)                         \
 162do {                                                                    \
 163        asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
 164                          ".byte 0x2e\n\t" /* branch not taken hint */  \
 165                          "jna %l[error]\n\t"                           \
 166                          _ASM_EXTABLE(1b, %l[fault])                   \
 167                          : : op1, op2 : "cc" : error, fault);          \
 168        return;                                                         \
 169error:                                                                  \
 170        instrumentation_begin();                                        \
 171        insn##_error(error_args);                                       \
 172        instrumentation_end();                                          \
 173        return;                                                         \
 174fault:                                                                  \
 175        kvm_spurious_fault();                                           \
 176} while (0)
 177
 178static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
 179{
 180        vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
 181}
 182
 183static __always_inline void vmcs_write16(unsigned long field, u16 value)
 184{
 185        vmcs_check16(field);
 186        if (static_branch_unlikely(&enable_evmcs))
 187                return evmcs_write16(field, value);
 188
 189        __vmcs_writel(field, value);
 190}
 191
 192static __always_inline void vmcs_write32(unsigned long field, u32 value)
 193{
 194        vmcs_check32(field);
 195        if (static_branch_unlikely(&enable_evmcs))
 196                return evmcs_write32(field, value);
 197
 198        __vmcs_writel(field, value);
 199}
 200
 201static __always_inline void vmcs_write64(unsigned long field, u64 value)
 202{
 203        vmcs_check64(field);
 204        if (static_branch_unlikely(&enable_evmcs))
 205                return evmcs_write64(field, value);
 206
 207        __vmcs_writel(field, value);
 208#ifndef CONFIG_X86_64
 209        __vmcs_writel(field+1, value >> 32);
 210#endif
 211}
 212
 213static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
 214{
 215        vmcs_checkl(field);
 216        if (static_branch_unlikely(&enable_evmcs))
 217                return evmcs_write64(field, value);
 218
 219        __vmcs_writel(field, value);
 220}
 221
 222static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
 223{
 224        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 225                         "vmcs_clear_bits does not support 64-bit fields");
 226        if (static_branch_unlikely(&enable_evmcs))
 227                return evmcs_write32(field, evmcs_read32(field) & ~mask);
 228
 229        __vmcs_writel(field, __vmcs_readl(field) & ~mask);
 230}
 231
 232static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
 233{
 234        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
 235                         "vmcs_set_bits does not support 64-bit fields");
 236        if (static_branch_unlikely(&enable_evmcs))
 237                return evmcs_write32(field, evmcs_read32(field) | mask);
 238
 239        __vmcs_writel(field, __vmcs_readl(field) | mask);
 240}
 241
 242static inline void vmcs_clear(struct vmcs *vmcs)
 243{
 244        u64 phys_addr = __pa(vmcs);
 245
 246        vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
 247}
 248
 249static inline void vmcs_load(struct vmcs *vmcs)
 250{
 251        u64 phys_addr = __pa(vmcs);
 252
 253        if (static_branch_unlikely(&enable_evmcs))
 254                return evmcs_load(phys_addr);
 255
 256        vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
 257}
 258
 259static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
 260{
 261        struct {
 262                u64 vpid : 16;
 263                u64 rsvd : 48;
 264                u64 gva;
 265        } operand = { vpid, 0, gva };
 266
 267        vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
 268}
 269
 270static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
 271{
 272        struct {
 273                u64 eptp, gpa;
 274        } operand = {eptp, gpa};
 275
 276        vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
 277}
 278
 279static inline void vpid_sync_vcpu_single(int vpid)
 280{
 281        if (vpid == 0)
 282                return;
 283
 284        __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
 285}
 286
 287static inline void vpid_sync_vcpu_global(void)
 288{
 289        __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
 290}
 291
 292static inline void vpid_sync_context(int vpid)
 293{
 294        if (cpu_has_vmx_invvpid_single())
 295                vpid_sync_vcpu_single(vpid);
 296        else if (vpid != 0)
 297                vpid_sync_vcpu_global();
 298}
 299
 300static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
 301{
 302        if (vpid == 0)
 303                return;
 304
 305        if (cpu_has_vmx_invvpid_individual_addr())
 306                __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
 307        else
 308                vpid_sync_context(vpid);
 309}
 310
 311static inline void ept_sync_global(void)
 312{
 313        __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
 314}
 315
 316static inline void ept_sync_context(u64 eptp)
 317{
 318        if (cpu_has_vmx_invept_context())
 319                __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
 320        else
 321                ept_sync_global();
 322}
 323
 324#endif /* __KVM_X86_VMX_INSN_H */
 325