linux/arch/arm64/include/asm/spectre.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Interface for managing mitigations for Spectre vulnerabilities.
   4 *
   5 * Copyright (C) 2020 Google LLC
   6 * Author: Will Deacon <will@kernel.org>
   7 */
   8
   9#ifndef __ASM_SPECTRE_H
  10#define __ASM_SPECTRE_H
  11
  12#define BP_HARDEN_EL2_SLOTS 4
  13#define __BP_HARDEN_HYP_VECS_SZ ((BP_HARDEN_EL2_SLOTS - 1) * SZ_2K)
  14
  15#ifndef __ASSEMBLY__
  16
  17#include <linux/percpu.h>
  18
  19#include <asm/cpufeature.h>
  20#include <asm/virt.h>
  21
  22/* Watch out, ordering is important here. */
  23enum mitigation_state {
  24        SPECTRE_UNAFFECTED,
  25        SPECTRE_MITIGATED,
  26        SPECTRE_VULNERABLE,
  27};
  28
  29struct task_struct;
  30
  31/*
  32 * Note: the order of this enum corresponds to __bp_harden_hyp_vecs and
  33 * we rely on having the direct vectors first.
  34 */
  35enum arm64_hyp_spectre_vector {
  36        /*
  37         * Take exceptions directly to __kvm_hyp_vector. This must be
  38         * 0 so that it used by default when mitigations are not needed.
  39         */
  40        HYP_VECTOR_DIRECT,
  41
  42        /*
  43         * Bounce via a slot in the hypervisor text mapping of
  44         * __bp_harden_hyp_vecs, which contains an SMC call.
  45         */
  46        HYP_VECTOR_SPECTRE_DIRECT,
  47
  48        /*
  49         * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
  50         * next to the idmap page.
  51         */
  52        HYP_VECTOR_INDIRECT,
  53
  54        /*
  55         * Bounce via a slot in a special mapping of __bp_harden_hyp_vecs
  56         * next to the idmap page, which contains an SMC call.
  57         */
  58        HYP_VECTOR_SPECTRE_INDIRECT,
  59};
  60
  61typedef void (*bp_hardening_cb_t)(void);
  62
  63struct bp_hardening_data {
  64        enum arm64_hyp_spectre_vector   slot;
  65        bp_hardening_cb_t               fn;
  66};
  67
  68DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
  69
  70static inline void arm64_apply_bp_hardening(void)
  71{
  72        struct bp_hardening_data *d;
  73
  74        if (!cpus_have_const_cap(ARM64_SPECTRE_V2))
  75                return;
  76
  77        d = this_cpu_ptr(&bp_hardening_data);
  78        if (d->fn)
  79                d->fn();
  80}
  81
  82enum mitigation_state arm64_get_spectre_v2_state(void);
  83bool has_spectre_v2(const struct arm64_cpu_capabilities *cap, int scope);
  84void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  85
  86bool has_spectre_v3a(const struct arm64_cpu_capabilities *cap, int scope);
  87void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  88
  89enum mitigation_state arm64_get_spectre_v4_state(void);
  90bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope);
  91void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
  92void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
  93
  94enum mitigation_state arm64_get_meltdown_state(void);
  95
  96#endif  /* __ASSEMBLY__ */
  97#endif  /* __ASM_SPECTRE_H */
  98