linux/arch/arm64/kvm/hyp/nvhe/setup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2020 Google LLC
   4 * Author: Quentin Perret <qperret@google.com>
   5 */
   6
   7#include <linux/kvm_host.h>
   8#include <asm/kvm_hyp.h>
   9#include <asm/kvm_mmu.h>
  10#include <asm/kvm_pgtable.h>
  11
  12#include <nvhe/early_alloc.h>
  13#include <nvhe/gfp.h>
  14#include <nvhe/memory.h>
  15#include <nvhe/mem_protect.h>
  16#include <nvhe/mm.h>
  17#include <nvhe/trap_handler.h>
  18
  19struct hyp_pool hpool;
  20unsigned long hyp_nr_cpus;
  21
  22#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
  23                         (unsigned long)__per_cpu_start)
  24
  25static void *vmemmap_base;
  26static void *hyp_pgt_base;
  27static void *host_s2_pgt_base;
  28static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
  29
  30static int divide_memory_pool(void *virt, unsigned long size)
  31{
  32        unsigned long vstart, vend, nr_pages;
  33
  34        hyp_early_alloc_init(virt, size);
  35
  36        hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
  37        nr_pages = (vend - vstart) >> PAGE_SHIFT;
  38        vmemmap_base = hyp_early_alloc_contig(nr_pages);
  39        if (!vmemmap_base)
  40                return -ENOMEM;
  41
  42        nr_pages = hyp_s1_pgtable_pages();
  43        hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
  44        if (!hyp_pgt_base)
  45                return -ENOMEM;
  46
  47        nr_pages = host_s2_pgtable_pages();
  48        host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
  49        if (!host_s2_pgt_base)
  50                return -ENOMEM;
  51
  52        return 0;
  53}
  54
  55static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
  56                                 unsigned long *per_cpu_base,
  57                                 u32 hyp_va_bits)
  58{
  59        void *start, *end, *virt = hyp_phys_to_virt(phys);
  60        unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
  61        int ret, i;
  62
  63        /* Recreate the hyp page-table using the early page allocator */
  64        hyp_early_alloc_init(hyp_pgt_base, pgt_size);
  65        ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
  66                                   &hyp_early_alloc_mm_ops);
  67        if (ret)
  68                return ret;
  69
  70        ret = hyp_create_idmap(hyp_va_bits);
  71        if (ret)
  72                return ret;
  73
  74        ret = hyp_map_vectors();
  75        if (ret)
  76                return ret;
  77
  78        ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
  79        if (ret)
  80                return ret;
  81
  82        ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
  83        if (ret)
  84                return ret;
  85
  86        ret = pkvm_create_mappings(__start_rodata, __end_rodata, PAGE_HYP_RO);
  87        if (ret)
  88                return ret;
  89
  90        ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
  91        if (ret)
  92                return ret;
  93
  94        ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
  95        if (ret)
  96                return ret;
  97
  98        ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, PAGE_HYP_RO);
  99        if (ret)
 100                return ret;
 101
 102        ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
 103        if (ret)
 104                return ret;
 105
 106        for (i = 0; i < hyp_nr_cpus; i++) {
 107                start = (void *)kern_hyp_va(per_cpu_base[i]);
 108                end = start + PAGE_ALIGN(hyp_percpu_size);
 109                ret = pkvm_create_mappings(start, end, PAGE_HYP);
 110                if (ret)
 111                        return ret;
 112
 113                end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
 114                start = end - PAGE_SIZE;
 115                ret = pkvm_create_mappings(start, end, PAGE_HYP);
 116                if (ret)
 117                        return ret;
 118        }
 119
 120        return 0;
 121}
 122
 123static void update_nvhe_init_params(void)
 124{
 125        struct kvm_nvhe_init_params *params;
 126        unsigned long i;
 127
 128        for (i = 0; i < hyp_nr_cpus; i++) {
 129                params = per_cpu_ptr(&kvm_init_params, i);
 130                params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
 131                dcache_clean_inval_poc((unsigned long)params,
 132                                    (unsigned long)params + sizeof(*params));
 133        }
 134}
 135
 136static void *hyp_zalloc_hyp_page(void *arg)
 137{
 138        return hyp_alloc_pages(&hpool, 0);
 139}
 140
 141static void hpool_get_page(void *addr)
 142{
 143        hyp_get_page(&hpool, addr);
 144}
 145
 146static void hpool_put_page(void *addr)
 147{
 148        hyp_put_page(&hpool, addr);
 149}
 150
 151void __noreturn __pkvm_init_finalise(void)
 152{
 153        struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
 154        struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
 155        unsigned long nr_pages, reserved_pages, pfn;
 156        int ret;
 157
 158        /* Now that the vmemmap is backed, install the full-fledged allocator */
 159        pfn = hyp_virt_to_pfn(hyp_pgt_base);
 160        nr_pages = hyp_s1_pgtable_pages();
 161        reserved_pages = hyp_early_alloc_nr_used_pages();
 162        ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
 163        if (ret)
 164                goto out;
 165
 166        ret = kvm_host_prepare_stage2(host_s2_pgt_base);
 167        if (ret)
 168                goto out;
 169
 170        pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
 171                .zalloc_page = hyp_zalloc_hyp_page,
 172                .phys_to_virt = hyp_phys_to_virt,
 173                .virt_to_phys = hyp_virt_to_phys,
 174                .get_page = hpool_get_page,
 175                .put_page = hpool_put_page,
 176        };
 177        pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
 178
 179out:
 180        /*
 181         * We tail-called to here from handle___pkvm_init() and will not return,
 182         * so make sure to propagate the return value to the host.
 183         */
 184        cpu_reg(host_ctxt, 1) = ret;
 185
 186        __host_enter(host_ctxt);
 187}
 188
 189int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
 190                unsigned long *per_cpu_base, u32 hyp_va_bits)
 191{
 192        struct kvm_nvhe_init_params *params;
 193        void *virt = hyp_phys_to_virt(phys);
 194        void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
 195        int ret;
 196
 197        if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
 198                return -EINVAL;
 199
 200        hyp_spin_lock_init(&pkvm_pgd_lock);
 201        hyp_nr_cpus = nr_cpus;
 202
 203        ret = divide_memory_pool(virt, size);
 204        if (ret)
 205                return ret;
 206
 207        ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
 208        if (ret)
 209                return ret;
 210
 211        update_nvhe_init_params();
 212
 213        /* Jump in the idmap page to switch to the new page-tables */
 214        params = this_cpu_ptr(&kvm_init_params);
 215        fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
 216        fn(__hyp_pa(params), __pkvm_init_finalise);
 217
 218        unreachable();
 219}
 220