linux/arch/x86/include/asm/sev.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * AMD Encrypted Register State Support
   4 *
   5 * Author: Joerg Roedel <jroedel@suse.de>
   6 */
   7
   8#ifndef __ASM_ENCRYPTED_STATE_H
   9#define __ASM_ENCRYPTED_STATE_H
  10
  11#include <linux/types.h>
  12#include <asm/insn.h>
  13#include <asm/sev-common.h>
  14
  15#define GHCB_PROTO_OUR          0x0001UL
  16#define GHCB_PROTOCOL_MAX       1ULL
  17#define GHCB_DEFAULT_USAGE      0ULL
  18
  19#define VMGEXIT()                       { asm volatile("rep; vmmcall\n\r"); }
  20
  21enum es_result {
  22        ES_OK,                  /* All good */
  23        ES_UNSUPPORTED,         /* Requested operation not supported */
  24        ES_VMM_ERROR,           /* Unexpected state from the VMM */
  25        ES_DECODE_FAILED,       /* Instruction decoding failed */
  26        ES_EXCEPTION,           /* Instruction caused exception */
  27        ES_RETRY,               /* Retry instruction emulation */
  28};
  29
  30struct es_fault_info {
  31        unsigned long vector;
  32        unsigned long error_code;
  33        unsigned long cr2;
  34};
  35
  36struct pt_regs;
  37
  38/* ES instruction emulation context */
  39struct es_em_ctxt {
  40        struct pt_regs *regs;
  41        struct insn insn;
  42        struct es_fault_info fi;
  43};
  44
  45void do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code);
  46
  47static inline u64 lower_bits(u64 val, unsigned int bits)
  48{
  49        u64 mask = (1ULL << bits) - 1;
  50
  51        return (val & mask);
  52}
  53
  54struct real_mode_header;
  55enum stack_type;
  56
  57/* Early IDT entry points for #VC handler */
  58extern void vc_no_ghcb(void);
  59extern void vc_boot_ghcb(void);
  60extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
  61
  62#ifdef CONFIG_AMD_MEM_ENCRYPT
  63extern struct static_key_false sev_es_enable_key;
  64extern void __sev_es_ist_enter(struct pt_regs *regs);
  65extern void __sev_es_ist_exit(void);
  66static __always_inline void sev_es_ist_enter(struct pt_regs *regs)
  67{
  68        if (static_branch_unlikely(&sev_es_enable_key))
  69                __sev_es_ist_enter(regs);
  70}
  71static __always_inline void sev_es_ist_exit(void)
  72{
  73        if (static_branch_unlikely(&sev_es_enable_key))
  74                __sev_es_ist_exit();
  75}
  76extern int sev_es_setup_ap_jump_table(struct real_mode_header *rmh);
  77extern void __sev_es_nmi_complete(void);
  78static __always_inline void sev_es_nmi_complete(void)
  79{
  80        if (static_branch_unlikely(&sev_es_enable_key))
  81                __sev_es_nmi_complete();
  82}
  83extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd);
  84#else
  85static inline void sev_es_ist_enter(struct pt_regs *regs) { }
  86static inline void sev_es_ist_exit(void) { }
  87static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; }
  88static inline void sev_es_nmi_complete(void) { }
  89static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; }
  90#endif
  91
  92#endif
  93