linux/arch/arm64/kvm/va_layout.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2017 ARM Ltd.
   4 * Author: Marc Zyngier <marc.zyngier@arm.com>
   5 */
   6
   7#include <linux/kvm_host.h>
   8#include <linux/random.h>
   9#include <linux/memblock.h>
  10#include <asm/alternative.h>
  11#include <asm/debug-monitors.h>
  12#include <asm/insn.h>
  13#include <asm/kvm_mmu.h>
  14#include <asm/memory.h>
  15
  16/*
  17 * The LSB of the HYP VA tag
  18 */
  19static u8 tag_lsb;
  20/*
  21 * The HYP VA tag value with the region bit
  22 */
  23static u64 tag_val;
  24static u64 va_mask;
  25
  26/*
  27 * Compute HYP VA by using the same computation as kern_hyp_va().
  28 */
  29static u64 __early_kern_hyp_va(u64 addr)
  30{
  31        addr &= va_mask;
  32        addr |= tag_val << tag_lsb;
  33        return addr;
  34}
  35
  36/*
  37 * Store a hyp VA <-> PA offset into a EL2-owned variable.
  38 */
  39static void init_hyp_physvirt_offset(void)
  40{
  41        u64 kern_va, hyp_va;
  42
  43        /* Compute the offset from the hyp VA and PA of a random symbol. */
  44        kern_va = (u64)lm_alias(__hyp_text_start);
  45        hyp_va = __early_kern_hyp_va(kern_va);
  46        hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
  47}
  48
  49/*
  50 * We want to generate a hyp VA with the following format (with V ==
  51 * vabits_actual):
  52 *
  53 *  63 ... V |     V-1    | V-2 .. tag_lsb | tag_lsb - 1 .. 0
  54 *  ---------------------------------------------------------
  55 * | 0000000 | hyp_va_msb |   random tag   |  kern linear VA |
  56 *           |--------- tag_val -----------|----- va_mask ---|
  57 *
  58 * which does not conflict with the idmap regions.
  59 */
  60__init void kvm_compute_layout(void)
  61{
  62        phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
  63        u64 hyp_va_msb;
  64
  65        /* Where is my RAM region? */
  66        hyp_va_msb  = idmap_addr & BIT(vabits_actual - 1);
  67        hyp_va_msb ^= BIT(vabits_actual - 1);
  68
  69        tag_lsb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
  70                        (u64)(high_memory - 1));
  71
  72        va_mask = GENMASK_ULL(tag_lsb - 1, 0);
  73        tag_val = hyp_va_msb;
  74
  75        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && tag_lsb != (vabits_actual - 1)) {
  76                /* We have some free bits to insert a random tag. */
  77                tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
  78        }
  79        tag_val >>= tag_lsb;
  80
  81        init_hyp_physvirt_offset();
  82}
  83
  84/*
  85 * The .hyp.reloc ELF section contains a list of kimg positions that
  86 * contains kimg VAs but will be accessed only in hyp execution context.
  87 * Convert them to hyp VAs. See gen-hyprel.c for more details.
  88 */
  89__init void kvm_apply_hyp_relocations(void)
  90{
  91        int32_t *rel;
  92        int32_t *begin = (int32_t *)__hyp_reloc_begin;
  93        int32_t *end = (int32_t *)__hyp_reloc_end;
  94
  95        for (rel = begin; rel < end; ++rel) {
  96                uintptr_t *ptr, kimg_va;
  97
  98                /*
  99                 * Each entry contains a 32-bit relative offset from itself
 100                 * to a kimg VA position.
 101                 */
 102                ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
 103
 104                /* Read the kimg VA value at the relocation address. */
 105                kimg_va = *ptr;
 106
 107                /* Convert to hyp VA and store back to the relocation address. */
 108                *ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
 109        }
 110}
 111
 112static u32 compute_instruction(int n, u32 rd, u32 rn)
 113{
 114        u32 insn = AARCH64_BREAK_FAULT;
 115
 116        switch (n) {
 117        case 0:
 118                insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_AND,
 119                                                          AARCH64_INSN_VARIANT_64BIT,
 120                                                          rn, rd, va_mask);
 121                break;
 122
 123        case 1:
 124                /* ROR is a variant of EXTR with Rm = Rn */
 125                insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
 126                                             rn, rn, rd,
 127                                             tag_lsb);
 128                break;
 129
 130        case 2:
 131                insn = aarch64_insn_gen_add_sub_imm(rd, rn,
 132                                                    tag_val & GENMASK(11, 0),
 133                                                    AARCH64_INSN_VARIANT_64BIT,
 134                                                    AARCH64_INSN_ADSB_ADD);
 135                break;
 136
 137        case 3:
 138                insn = aarch64_insn_gen_add_sub_imm(rd, rn,
 139                                                    tag_val & GENMASK(23, 12),
 140                                                    AARCH64_INSN_VARIANT_64BIT,
 141                                                    AARCH64_INSN_ADSB_ADD);
 142                break;
 143
 144        case 4:
 145                /* ROR is a variant of EXTR with Rm = Rn */
 146                insn = aarch64_insn_gen_extr(AARCH64_INSN_VARIANT_64BIT,
 147                                             rn, rn, rd, 64 - tag_lsb);
 148                break;
 149        }
 150
 151        return insn;
 152}
 153
 154void __init kvm_update_va_mask(struct alt_instr *alt,
 155                               __le32 *origptr, __le32 *updptr, int nr_inst)
 156{
 157        int i;
 158
 159        BUG_ON(nr_inst != 5);
 160
 161        for (i = 0; i < nr_inst; i++) {
 162                u32 rd, rn, insn, oinsn;
 163
 164                /*
 165                 * VHE doesn't need any address translation, let's NOP
 166                 * everything.
 167                 *
 168                 * Alternatively, if the tag is zero (because the layout
 169                 * dictates it and we don't have any spare bits in the
 170                 * address), NOP everything after masking the kernel VA.
 171                 */
 172                if (has_vhe() || (!tag_val && i > 0)) {
 173                        updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
 174                        continue;
 175                }
 176
 177                oinsn = le32_to_cpu(origptr[i]);
 178                rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
 179                rn = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RN, oinsn);
 180
 181                insn = compute_instruction(i, rd, rn);
 182                BUG_ON(insn == AARCH64_BREAK_FAULT);
 183
 184                updptr[i] = cpu_to_le32(insn);
 185        }
 186}
 187
 188void kvm_patch_vector_branch(struct alt_instr *alt,
 189                             __le32 *origptr, __le32 *updptr, int nr_inst)
 190{
 191        u64 addr;
 192        u32 insn;
 193
 194        BUG_ON(nr_inst != 4);
 195
 196        if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
 197                return;
 198
 199        /*
 200         * Compute HYP VA by using the same computation as kern_hyp_va()
 201         */
 202        addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
 203
 204        /* Use PC[10:7] to branch to the same vector in KVM */
 205        addr |= ((u64)origptr & GENMASK_ULL(10, 7));
 206
 207        /*
 208         * Branch over the preamble in order to avoid the initial store on
 209         * the stack (which we already perform in the hardening vectors).
 210         */
 211        addr += KVM_VECTOR_PREAMBLE;
 212
 213        /* movz x0, #(addr & 0xffff) */
 214        insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
 215                                         (u16)addr,
 216                                         0,
 217                                         AARCH64_INSN_VARIANT_64BIT,
 218                                         AARCH64_INSN_MOVEWIDE_ZERO);
 219        *updptr++ = cpu_to_le32(insn);
 220
 221        /* movk x0, #((addr >> 16) & 0xffff), lsl #16 */
 222        insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
 223                                         (u16)(addr >> 16),
 224                                         16,
 225                                         AARCH64_INSN_VARIANT_64BIT,
 226                                         AARCH64_INSN_MOVEWIDE_KEEP);
 227        *updptr++ = cpu_to_le32(insn);
 228
 229        /* movk x0, #((addr >> 32) & 0xffff), lsl #32 */
 230        insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
 231                                         (u16)(addr >> 32),
 232                                         32,
 233                                         AARCH64_INSN_VARIANT_64BIT,
 234                                         AARCH64_INSN_MOVEWIDE_KEEP);
 235        *updptr++ = cpu_to_le32(insn);
 236
 237        /* br x0 */
 238        insn = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_0,
 239                                           AARCH64_INSN_BRANCH_NOLINK);
 240        *updptr++ = cpu_to_le32(insn);
 241}
 242
 243static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
 244{
 245        u32 insn, oinsn, rd;
 246
 247        BUG_ON(nr_inst != 4);
 248
 249        /* Compute target register */
 250        oinsn = le32_to_cpu(*origptr);
 251        rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
 252
 253        /* movz rd, #(val & 0xffff) */
 254        insn = aarch64_insn_gen_movewide(rd,
 255                                         (u16)val,
 256                                         0,
 257                                         AARCH64_INSN_VARIANT_64BIT,
 258                                         AARCH64_INSN_MOVEWIDE_ZERO);
 259        *updptr++ = cpu_to_le32(insn);
 260
 261        /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
 262        insn = aarch64_insn_gen_movewide(rd,
 263                                         (u16)(val >> 16),
 264                                         16,
 265                                         AARCH64_INSN_VARIANT_64BIT,
 266                                         AARCH64_INSN_MOVEWIDE_KEEP);
 267        *updptr++ = cpu_to_le32(insn);
 268
 269        /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
 270        insn = aarch64_insn_gen_movewide(rd,
 271                                         (u16)(val >> 32),
 272                                         32,
 273                                         AARCH64_INSN_VARIANT_64BIT,
 274                                         AARCH64_INSN_MOVEWIDE_KEEP);
 275        *updptr++ = cpu_to_le32(insn);
 276
 277        /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
 278        insn = aarch64_insn_gen_movewide(rd,
 279                                         (u16)(val >> 48),
 280                                         48,
 281                                         AARCH64_INSN_VARIANT_64BIT,
 282                                         AARCH64_INSN_MOVEWIDE_KEEP);
 283        *updptr++ = cpu_to_le32(insn);
 284}
 285
 286void kvm_get_kimage_voffset(struct alt_instr *alt,
 287                            __le32 *origptr, __le32 *updptr, int nr_inst)
 288{
 289        generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
 290}
 291
 292void kvm_compute_final_ctr_el0(struct alt_instr *alt,
 293                               __le32 *origptr, __le32 *updptr, int nr_inst)
 294{
 295        generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
 296                       origptr, updptr, nr_inst);
 297}
 298