linux/tools/testing/selftests/kvm/aarch64/get-reg-list.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Check for KVM_GET_REG_LIST regressions.
   4 *
   5 * Copyright (C) 2020, Red Hat, Inc.
   6 *
   7 * When attempting to migrate from a host with an older kernel to a host
   8 * with a newer kernel we allow the newer kernel on the destination to
   9 * list new registers with get-reg-list. We assume they'll be unused, at
  10 * least until the guest reboots, and so they're relatively harmless.
  11 * However, if the destination host with the newer kernel is missing
  12 * registers which the source host with the older kernel has, then that's
  13 * a regression in get-reg-list. This test checks for that regression by
  14 * checking the current list against a blessed list. We should never have
  15 * missing registers, but if new ones appear then they can probably be
  16 * added to the blessed list. A completely new blessed list can be created
  17 * by running the test with the --list command line argument.
  18 *
  19 * Note, the blessed list should be created from the oldest possible
  20 * kernel. We can't go older than v4.15, though, because that's the first
  21 * release to expose the ID system registers in KVM_GET_REG_LIST, see
  22 * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
  23 * from guests"). Also, one must use the --core-reg-fixup command line
  24 * option when running on an older kernel that doesn't include df205b5c6328
  25 * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
  26 */
  27#include <stdio.h>
  28#include <stdlib.h>
  29#include <string.h>
  30#include <unistd.h>
  31#include <sys/types.h>
  32#include <sys/wait.h>
  33#include "kvm_util.h"
  34#include "test_util.h"
  35#include "processor.h"
  36
  37static struct kvm_reg_list *reg_list;
  38static __u64 *blessed_reg, blessed_n;
  39
  40struct reg_sublist {
  41        const char *name;
  42        long capability;
  43        int feature;
  44        bool finalize;
  45        __u64 *regs;
  46        __u64 regs_n;
  47        __u64 *rejects_set;
  48        __u64 rejects_set_n;
  49};
  50
  51struct vcpu_config {
  52        char *name;
  53        struct reg_sublist sublists[];
  54};
  55
  56static struct vcpu_config *vcpu_configs[];
  57static int vcpu_configs_n;
  58
  59#define for_each_sublist(c, s)                                                  \
  60        for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
  61
  62#define for_each_reg(i)                                                         \
  63        for ((i) = 0; (i) < reg_list->n; ++(i))
  64
  65#define for_each_reg_filtered(i)                                                \
  66        for_each_reg(i)                                                         \
  67                if (!filter_reg(reg_list->reg[i]))
  68
  69#define for_each_missing_reg(i)                                                 \
  70        for ((i) = 0; (i) < blessed_n; ++(i))                                   \
  71                if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
  72
  73#define for_each_new_reg(i)                                                     \
  74        for_each_reg_filtered(i)                                                \
  75                if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
  76
  77static const char *config_name(struct vcpu_config *c)
  78{
  79        struct reg_sublist *s;
  80        int len = 0;
  81
  82        if (c->name)
  83                return c->name;
  84
  85        for_each_sublist(c, s)
  86                len += strlen(s->name) + 1;
  87
  88        c->name = malloc(len);
  89
  90        len = 0;
  91        for_each_sublist(c, s) {
  92                if (!strcmp(s->name, "base"))
  93                        continue;
  94                strcat(c->name + len, s->name);
  95                len += strlen(s->name) + 1;
  96                c->name[len - 1] = '+';
  97        }
  98        c->name[len - 1] = '\0';
  99
 100        return c->name;
 101}
 102
 103static bool has_cap(struct vcpu_config *c, long capability)
 104{
 105        struct reg_sublist *s;
 106
 107        for_each_sublist(c, s)
 108                if (s->capability == capability)
 109                        return true;
 110        return false;
 111}
 112
 113static bool filter_reg(__u64 reg)
 114{
 115        /*
 116         * DEMUX register presence depends on the host's CLIDR_EL1.
 117         * This means there's no set of them that we can bless.
 118         */
 119        if ((reg & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
 120                return true;
 121
 122        return false;
 123}
 124
 125static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
 126{
 127        int i;
 128
 129        for (i = 0; i < nr_regs; ++i)
 130                if (reg == regs[i])
 131                        return true;
 132        return false;
 133}
 134
 135static const char *str_with_index(const char *template, __u64 index)
 136{
 137        char *str, *p;
 138        int n;
 139
 140        str = strdup(template);
 141        p = strstr(str, "##");
 142        n = sprintf(p, "%lld", index);
 143        strcat(p + n, strstr(template, "##") + 2);
 144
 145        return (const char *)str;
 146}
 147
 148#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
 149
 150#define CORE_REGS_XX_NR_WORDS   2
 151#define CORE_SPSR_XX_NR_WORDS   2
 152#define CORE_FPREGS_XX_NR_WORDS 4
 153
 154static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
 155{
 156        __u64 core_off = id & ~REG_MASK, idx;
 157
 158        /*
 159         * core_off is the offset into struct kvm_regs
 160         */
 161        switch (core_off) {
 162        case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
 163             KVM_REG_ARM_CORE_REG(regs.regs[30]):
 164                idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
 165                TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
 166                return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
 167        case KVM_REG_ARM_CORE_REG(regs.sp):
 168                return "KVM_REG_ARM_CORE_REG(regs.sp)";
 169        case KVM_REG_ARM_CORE_REG(regs.pc):
 170                return "KVM_REG_ARM_CORE_REG(regs.pc)";
 171        case KVM_REG_ARM_CORE_REG(regs.pstate):
 172                return "KVM_REG_ARM_CORE_REG(regs.pstate)";
 173        case KVM_REG_ARM_CORE_REG(sp_el1):
 174                return "KVM_REG_ARM_CORE_REG(sp_el1)";
 175        case KVM_REG_ARM_CORE_REG(elr_el1):
 176                return "KVM_REG_ARM_CORE_REG(elr_el1)";
 177        case KVM_REG_ARM_CORE_REG(spsr[0]) ...
 178             KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
 179                idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
 180                TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
 181                return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
 182        case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
 183             KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
 184                idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
 185                TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
 186                return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
 187        case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
 188                return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
 189        case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
 190                return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
 191        }
 192
 193        TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
 194        return NULL;
 195}
 196
 197static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
 198{
 199        __u64 sve_off, n, i;
 200
 201        if (id == KVM_REG_ARM64_SVE_VLS)
 202                return "KVM_REG_ARM64_SVE_VLS";
 203
 204        sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
 205        i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
 206
 207        TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
 208
 209        switch (sve_off) {
 210        case KVM_REG_ARM64_SVE_ZREG_BASE ...
 211             KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
 212                n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
 213                TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
 214                            "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
 215                return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
 216        case KVM_REG_ARM64_SVE_PREG_BASE ...
 217             KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
 218                n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
 219                TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
 220                            "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
 221                return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
 222        case KVM_REG_ARM64_SVE_FFR_BASE:
 223                TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
 224                            "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
 225                return "KVM_REG_ARM64_SVE_FFR(0)";
 226        }
 227
 228        return NULL;
 229}
 230
 231static void print_reg(struct vcpu_config *c, __u64 id)
 232{
 233        unsigned op0, op1, crn, crm, op2;
 234        const char *reg_size = NULL;
 235
 236        TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
 237                    "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
 238
 239        switch (id & KVM_REG_SIZE_MASK) {
 240        case KVM_REG_SIZE_U8:
 241                reg_size = "KVM_REG_SIZE_U8";
 242                break;
 243        case KVM_REG_SIZE_U16:
 244                reg_size = "KVM_REG_SIZE_U16";
 245                break;
 246        case KVM_REG_SIZE_U32:
 247                reg_size = "KVM_REG_SIZE_U32";
 248                break;
 249        case KVM_REG_SIZE_U64:
 250                reg_size = "KVM_REG_SIZE_U64";
 251                break;
 252        case KVM_REG_SIZE_U128:
 253                reg_size = "KVM_REG_SIZE_U128";
 254                break;
 255        case KVM_REG_SIZE_U256:
 256                reg_size = "KVM_REG_SIZE_U256";
 257                break;
 258        case KVM_REG_SIZE_U512:
 259                reg_size = "KVM_REG_SIZE_U512";
 260                break;
 261        case KVM_REG_SIZE_U1024:
 262                reg_size = "KVM_REG_SIZE_U1024";
 263                break;
 264        case KVM_REG_SIZE_U2048:
 265                reg_size = "KVM_REG_SIZE_U2048";
 266                break;
 267        default:
 268                TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
 269                          config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
 270        }
 271
 272        switch (id & KVM_REG_ARM_COPROC_MASK) {
 273        case KVM_REG_ARM_CORE:
 274                printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
 275                break;
 276        case KVM_REG_ARM_DEMUX:
 277                TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
 278                            "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
 279                printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
 280                       reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
 281                break;
 282        case KVM_REG_ARM64_SYSREG:
 283                op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
 284                op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
 285                crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
 286                crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
 287                op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
 288                TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
 289                            "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
 290                printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
 291                break;
 292        case KVM_REG_ARM_FW:
 293                TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
 294                            "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
 295                printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
 296                break;
 297        case KVM_REG_ARM64_SVE:
 298                if (has_cap(c, KVM_CAP_ARM_SVE))
 299                        printf("\t%s,\n", sve_id_to_str(c, id));
 300                else
 301                        TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
 302                break;
 303        default:
 304                TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
 305                          config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
 306        }
 307}
 308
 309/*
 310 * Older kernels listed each 32-bit word of CORE registers separately.
 311 * For 64 and 128-bit registers we need to ignore the extra words. We
 312 * also need to fixup the sizes, because the older kernels stated all
 313 * registers were 64-bit, even when they weren't.
 314 */
 315static void core_reg_fixup(void)
 316{
 317        struct kvm_reg_list *tmp;
 318        __u64 id, core_off;
 319        int i;
 320
 321        tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
 322
 323        for (i = 0; i < reg_list->n; ++i) {
 324                id = reg_list->reg[i];
 325
 326                if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
 327                        tmp->reg[tmp->n++] = id;
 328                        continue;
 329                }
 330
 331                core_off = id & ~REG_MASK;
 332
 333                switch (core_off) {
 334                case 0x52: case 0xd2: case 0xd6:
 335                        /*
 336                         * These offsets are pointing at padding.
 337                         * We need to ignore them too.
 338                         */
 339                        continue;
 340                case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
 341                     KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
 342                        if (core_off & 3)
 343                                continue;
 344                        id &= ~KVM_REG_SIZE_MASK;
 345                        id |= KVM_REG_SIZE_U128;
 346                        tmp->reg[tmp->n++] = id;
 347                        continue;
 348                case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
 349                case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
 350                        id &= ~KVM_REG_SIZE_MASK;
 351                        id |= KVM_REG_SIZE_U32;
 352                        tmp->reg[tmp->n++] = id;
 353                        continue;
 354                default:
 355                        if (core_off & 1)
 356                                continue;
 357                        tmp->reg[tmp->n++] = id;
 358                        break;
 359                }
 360        }
 361
 362        free(reg_list);
 363        reg_list = tmp;
 364}
 365
 366static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
 367{
 368        struct reg_sublist *s;
 369
 370        for_each_sublist(c, s)
 371                if (s->capability)
 372                        init->features[s->feature / 32] |= 1 << (s->feature % 32);
 373}
 374
 375static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid, struct vcpu_config *c)
 376{
 377        struct reg_sublist *s;
 378        int feature;
 379
 380        for_each_sublist(c, s) {
 381                if (s->finalize) {
 382                        feature = s->feature;
 383                        vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
 384                }
 385        }
 386}
 387
 388static void check_supported(struct vcpu_config *c)
 389{
 390        struct reg_sublist *s;
 391
 392        for_each_sublist(c, s) {
 393                if (s->capability && !kvm_check_cap(s->capability)) {
 394                        fprintf(stderr, "%s: %s not available, skipping tests\n", config_name(c), s->name);
 395                        exit(KSFT_SKIP);
 396                }
 397        }
 398}
 399
 400static bool print_list;
 401static bool print_filtered;
 402static bool fixup_core_regs;
 403
 404static void run_test(struct vcpu_config *c)
 405{
 406        struct kvm_vcpu_init init = { .target = -1, };
 407        int new_regs = 0, missing_regs = 0, i, n;
 408        int failed_get = 0, failed_set = 0, failed_reject = 0;
 409        struct kvm_vm *vm;
 410        struct reg_sublist *s;
 411
 412        check_supported(c);
 413
 414        vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
 415        prepare_vcpu_init(c, &init);
 416        aarch64_vcpu_add_default(vm, 0, &init, NULL);
 417        finalize_vcpu(vm, 0, c);
 418
 419        reg_list = vcpu_get_reg_list(vm, 0);
 420
 421        if (fixup_core_regs)
 422                core_reg_fixup();
 423
 424        if (print_list || print_filtered) {
 425                putchar('\n');
 426                for_each_reg(i) {
 427                        __u64 id = reg_list->reg[i];
 428                        if ((print_list && !filter_reg(id)) ||
 429                            (print_filtered && filter_reg(id)))
 430                                print_reg(c, id);
 431                }
 432                putchar('\n');
 433                return;
 434        }
 435
 436        /*
 437         * We only test that we can get the register and then write back the
 438         * same value. Some registers may allow other values to be written
 439         * back, but others only allow some bits to be changed, and at least
 440         * for ID registers set will fail if the value does not exactly match
 441         * what was returned by get. If registers that allow other values to
 442         * be written need to have the other values tested, then we should
 443         * create a new set of tests for those in a new independent test
 444         * executable.
 445         */
 446        for_each_reg(i) {
 447                uint8_t addr[2048 / 8];
 448                struct kvm_one_reg reg = {
 449                        .id = reg_list->reg[i],
 450                        .addr = (__u64)&addr,
 451                };
 452                bool reject_reg = false;
 453                int ret;
 454
 455                ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, &reg);
 456                if (ret) {
 457                        printf("%s: Failed to get ", config_name(c));
 458                        print_reg(c, reg.id);
 459                        putchar('\n');
 460                        ++failed_get;
 461                }
 462
 463                /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
 464                for_each_sublist(c, s) {
 465                        if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
 466                                reject_reg = true;
 467                                ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
 468                                if (ret != -1 || errno != EPERM) {
 469                                        printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
 470                                        print_reg(c, reg.id);
 471                                        putchar('\n');
 472                                        ++failed_reject;
 473                                }
 474                                break;
 475                        }
 476                }
 477
 478                if (!reject_reg) {
 479                        ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, &reg);
 480                        if (ret) {
 481                                printf("%s: Failed to set ", config_name(c));
 482                                print_reg(c, reg.id);
 483                                putchar('\n');
 484                                ++failed_set;
 485                        }
 486                }
 487        }
 488
 489        for_each_sublist(c, s)
 490                blessed_n += s->regs_n;
 491        blessed_reg = calloc(blessed_n, sizeof(__u64));
 492
 493        n = 0;
 494        for_each_sublist(c, s) {
 495                for (i = 0; i < s->regs_n; ++i)
 496                        blessed_reg[n++] = s->regs[i];
 497        }
 498
 499        for_each_new_reg(i)
 500                ++new_regs;
 501
 502        for_each_missing_reg(i)
 503                ++missing_regs;
 504
 505        if (new_regs || missing_regs) {
 506                printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
 507                printf("%s: Number registers:         %5lld\n", config_name(c), reg_list->n);
 508        }
 509
 510        if (new_regs) {
 511                printf("\n%s: There are %d new registers.\n"
 512                       "Consider adding them to the blessed reg "
 513                       "list with the following lines:\n\n", config_name(c), new_regs);
 514                for_each_new_reg(i)
 515                        print_reg(c, reg_list->reg[i]);
 516                putchar('\n');
 517        }
 518
 519        if (missing_regs) {
 520                printf("\n%s: There are %d missing registers.\n"
 521                       "The following lines are missing registers:\n\n", config_name(c), missing_regs);
 522                for_each_missing_reg(i)
 523                        print_reg(c, blessed_reg[i]);
 524                putchar('\n');
 525        }
 526
 527        TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
 528                    "%s: There are %d missing registers; "
 529                    "%d registers failed get; %d registers failed set; %d registers failed reject",
 530                    config_name(c), missing_regs, failed_get, failed_set, failed_reject);
 531
 532        pr_info("%s: PASS\n", config_name(c));
 533        blessed_n = 0;
 534        free(blessed_reg);
 535        free(reg_list);
 536        kvm_vm_free(vm);
 537}
 538
 539static void help(void)
 540{
 541        struct vcpu_config *c;
 542        int i;
 543
 544        printf(
 545        "\n"
 546        "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
 547        " --config=<selection>        Used to select a specific vcpu configuration for the test/listing\n"
 548        "                             '<selection>' may be\n");
 549
 550        for (i = 0; i < vcpu_configs_n; ++i) {
 551                c = vcpu_configs[i];
 552                printf(
 553        "                               '%s'\n", config_name(c));
 554        }
 555
 556        printf(
 557        "\n"
 558        " --list                      Print the register list rather than test it (requires --config)\n"
 559        " --list-filtered             Print registers that would normally be filtered out (requires --config)\n"
 560        " --core-reg-fixup            Needed when running on old kernels with broken core reg listings\n"
 561        "\n"
 562        );
 563}
 564
 565static struct vcpu_config *parse_config(const char *config)
 566{
 567        struct vcpu_config *c;
 568        int i;
 569
 570        if (config[8] != '=')
 571                help(), exit(1);
 572
 573        for (i = 0; i < vcpu_configs_n; ++i) {
 574                c = vcpu_configs[i];
 575                if (strcmp(config_name(c), &config[9]) == 0)
 576                        break;
 577        }
 578
 579        if (i == vcpu_configs_n)
 580                help(), exit(1);
 581
 582        return c;
 583}
 584
 585int main(int ac, char **av)
 586{
 587        struct vcpu_config *c, *sel = NULL;
 588        int i, ret = 0;
 589        pid_t pid;
 590
 591        for (i = 1; i < ac; ++i) {
 592                if (strcmp(av[i], "--core-reg-fixup") == 0)
 593                        fixup_core_regs = true;
 594                else if (strncmp(av[i], "--config", 8) == 0)
 595                        sel = parse_config(av[i]);
 596                else if (strcmp(av[i], "--list") == 0)
 597                        print_list = true;
 598                else if (strcmp(av[i], "--list-filtered") == 0)
 599                        print_filtered = true;
 600                else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
 601                        help(), exit(0);
 602                else
 603                        help(), exit(1);
 604        }
 605
 606        if (print_list || print_filtered) {
 607                /*
 608                 * We only want to print the register list of a single config.
 609                 */
 610                if (!sel)
 611                        help(), exit(1);
 612        }
 613
 614        for (i = 0; i < vcpu_configs_n; ++i) {
 615                c = vcpu_configs[i];
 616                if (sel && c != sel)
 617                        continue;
 618
 619                pid = fork();
 620
 621                if (!pid) {
 622                        run_test(c);
 623                        exit(0);
 624                } else {
 625                        int wstatus;
 626                        pid_t wpid = wait(&wstatus);
 627                        TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
 628                        if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
 629                                ret = KSFT_FAIL;
 630                }
 631        }
 632
 633        return ret;
 634}
 635
 636/*
 637 * The current blessed list was primed with the output of kernel version
 638 * v4.15 with --core-reg-fixup and then later updated with new registers.
 639 *
 640 * The blessed list is up to date with kernel version v5.13-rc3
 641 */
 642static __u64 base_regs[] = {
 643        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
 644        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
 645        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
 646        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
 647        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
 648        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
 649        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
 650        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
 651        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
 652        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
 653        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
 654        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
 655        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
 656        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
 657        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
 658        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
 659        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
 660        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
 661        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
 662        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
 663        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
 664        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
 665        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
 666        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
 667        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
 668        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
 669        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
 670        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
 671        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
 672        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
 673        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
 674        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
 675        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
 676        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
 677        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
 678        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
 679        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
 680        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
 681        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
 682        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
 683        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
 684        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
 685        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
 686        KVM_REG_ARM_FW_REG(0),
 687        KVM_REG_ARM_FW_REG(1),
 688        KVM_REG_ARM_FW_REG(2),
 689        ARM64_SYS_REG(3, 3, 14, 3, 1),  /* CNTV_CTL_EL0 */
 690        ARM64_SYS_REG(3, 3, 14, 3, 2),  /* CNTV_CVAL_EL0 */
 691        ARM64_SYS_REG(3, 3, 14, 0, 2),
 692        ARM64_SYS_REG(3, 0, 0, 0, 0),   /* MIDR_EL1 */
 693        ARM64_SYS_REG(3, 0, 0, 0, 6),   /* REVIDR_EL1 */
 694        ARM64_SYS_REG(3, 1, 0, 0, 1),   /* CLIDR_EL1 */
 695        ARM64_SYS_REG(3, 1, 0, 0, 7),   /* AIDR_EL1 */
 696        ARM64_SYS_REG(3, 3, 0, 0, 1),   /* CTR_EL0 */
 697        ARM64_SYS_REG(2, 0, 0, 0, 4),
 698        ARM64_SYS_REG(2, 0, 0, 0, 5),
 699        ARM64_SYS_REG(2, 0, 0, 0, 6),
 700        ARM64_SYS_REG(2, 0, 0, 0, 7),
 701        ARM64_SYS_REG(2, 0, 0, 1, 4),
 702        ARM64_SYS_REG(2, 0, 0, 1, 5),
 703        ARM64_SYS_REG(2, 0, 0, 1, 6),
 704        ARM64_SYS_REG(2, 0, 0, 1, 7),
 705        ARM64_SYS_REG(2, 0, 0, 2, 0),   /* MDCCINT_EL1 */
 706        ARM64_SYS_REG(2, 0, 0, 2, 2),   /* MDSCR_EL1 */
 707        ARM64_SYS_REG(2, 0, 0, 2, 4),
 708        ARM64_SYS_REG(2, 0, 0, 2, 5),
 709        ARM64_SYS_REG(2, 0, 0, 2, 6),
 710        ARM64_SYS_REG(2, 0, 0, 2, 7),
 711        ARM64_SYS_REG(2, 0, 0, 3, 4),
 712        ARM64_SYS_REG(2, 0, 0, 3, 5),
 713        ARM64_SYS_REG(2, 0, 0, 3, 6),
 714        ARM64_SYS_REG(2, 0, 0, 3, 7),
 715        ARM64_SYS_REG(2, 0, 0, 4, 4),
 716        ARM64_SYS_REG(2, 0, 0, 4, 5),
 717        ARM64_SYS_REG(2, 0, 0, 4, 6),
 718        ARM64_SYS_REG(2, 0, 0, 4, 7),
 719        ARM64_SYS_REG(2, 0, 0, 5, 4),
 720        ARM64_SYS_REG(2, 0, 0, 5, 5),
 721        ARM64_SYS_REG(2, 0, 0, 5, 6),
 722        ARM64_SYS_REG(2, 0, 0, 5, 7),
 723        ARM64_SYS_REG(2, 0, 0, 6, 4),
 724        ARM64_SYS_REG(2, 0, 0, 6, 5),
 725        ARM64_SYS_REG(2, 0, 0, 6, 6),
 726        ARM64_SYS_REG(2, 0, 0, 6, 7),
 727        ARM64_SYS_REG(2, 0, 0, 7, 4),
 728        ARM64_SYS_REG(2, 0, 0, 7, 5),
 729        ARM64_SYS_REG(2, 0, 0, 7, 6),
 730        ARM64_SYS_REG(2, 0, 0, 7, 7),
 731        ARM64_SYS_REG(2, 0, 0, 8, 4),
 732        ARM64_SYS_REG(2, 0, 0, 8, 5),
 733        ARM64_SYS_REG(2, 0, 0, 8, 6),
 734        ARM64_SYS_REG(2, 0, 0, 8, 7),
 735        ARM64_SYS_REG(2, 0, 0, 9, 4),
 736        ARM64_SYS_REG(2, 0, 0, 9, 5),
 737        ARM64_SYS_REG(2, 0, 0, 9, 6),
 738        ARM64_SYS_REG(2, 0, 0, 9, 7),
 739        ARM64_SYS_REG(2, 0, 0, 10, 4),
 740        ARM64_SYS_REG(2, 0, 0, 10, 5),
 741        ARM64_SYS_REG(2, 0, 0, 10, 6),
 742        ARM64_SYS_REG(2, 0, 0, 10, 7),
 743        ARM64_SYS_REG(2, 0, 0, 11, 4),
 744        ARM64_SYS_REG(2, 0, 0, 11, 5),
 745        ARM64_SYS_REG(2, 0, 0, 11, 6),
 746        ARM64_SYS_REG(2, 0, 0, 11, 7),
 747        ARM64_SYS_REG(2, 0, 0, 12, 4),
 748        ARM64_SYS_REG(2, 0, 0, 12, 5),
 749        ARM64_SYS_REG(2, 0, 0, 12, 6),
 750        ARM64_SYS_REG(2, 0, 0, 12, 7),
 751        ARM64_SYS_REG(2, 0, 0, 13, 4),
 752        ARM64_SYS_REG(2, 0, 0, 13, 5),
 753        ARM64_SYS_REG(2, 0, 0, 13, 6),
 754        ARM64_SYS_REG(2, 0, 0, 13, 7),
 755        ARM64_SYS_REG(2, 0, 0, 14, 4),
 756        ARM64_SYS_REG(2, 0, 0, 14, 5),
 757        ARM64_SYS_REG(2, 0, 0, 14, 6),
 758        ARM64_SYS_REG(2, 0, 0, 14, 7),
 759        ARM64_SYS_REG(2, 0, 0, 15, 4),
 760        ARM64_SYS_REG(2, 0, 0, 15, 5),
 761        ARM64_SYS_REG(2, 0, 0, 15, 6),
 762        ARM64_SYS_REG(2, 0, 0, 15, 7),
 763        ARM64_SYS_REG(2, 4, 0, 7, 0),   /* DBGVCR32_EL2 */
 764        ARM64_SYS_REG(3, 0, 0, 0, 5),   /* MPIDR_EL1 */
 765        ARM64_SYS_REG(3, 0, 0, 1, 0),   /* ID_PFR0_EL1 */
 766        ARM64_SYS_REG(3, 0, 0, 1, 1),   /* ID_PFR1_EL1 */
 767        ARM64_SYS_REG(3, 0, 0, 1, 2),   /* ID_DFR0_EL1 */
 768        ARM64_SYS_REG(3, 0, 0, 1, 3),   /* ID_AFR0_EL1 */
 769        ARM64_SYS_REG(3, 0, 0, 1, 4),   /* ID_MMFR0_EL1 */
 770        ARM64_SYS_REG(3, 0, 0, 1, 5),   /* ID_MMFR1_EL1 */
 771        ARM64_SYS_REG(3, 0, 0, 1, 6),   /* ID_MMFR2_EL1 */
 772        ARM64_SYS_REG(3, 0, 0, 1, 7),   /* ID_MMFR3_EL1 */
 773        ARM64_SYS_REG(3, 0, 0, 2, 0),   /* ID_ISAR0_EL1 */
 774        ARM64_SYS_REG(3, 0, 0, 2, 1),   /* ID_ISAR1_EL1 */
 775        ARM64_SYS_REG(3, 0, 0, 2, 2),   /* ID_ISAR2_EL1 */
 776        ARM64_SYS_REG(3, 0, 0, 2, 3),   /* ID_ISAR3_EL1 */
 777        ARM64_SYS_REG(3, 0, 0, 2, 4),   /* ID_ISAR4_EL1 */
 778        ARM64_SYS_REG(3, 0, 0, 2, 5),   /* ID_ISAR5_EL1 */
 779        ARM64_SYS_REG(3, 0, 0, 2, 6),   /* ID_MMFR4_EL1 */
 780        ARM64_SYS_REG(3, 0, 0, 2, 7),   /* ID_ISAR6_EL1 */
 781        ARM64_SYS_REG(3, 0, 0, 3, 0),   /* MVFR0_EL1 */
 782        ARM64_SYS_REG(3, 0, 0, 3, 1),   /* MVFR1_EL1 */
 783        ARM64_SYS_REG(3, 0, 0, 3, 2),   /* MVFR2_EL1 */
 784        ARM64_SYS_REG(3, 0, 0, 3, 3),
 785        ARM64_SYS_REG(3, 0, 0, 3, 4),   /* ID_PFR2_EL1 */
 786        ARM64_SYS_REG(3, 0, 0, 3, 5),   /* ID_DFR1_EL1 */
 787        ARM64_SYS_REG(3, 0, 0, 3, 6),   /* ID_MMFR5_EL1 */
 788        ARM64_SYS_REG(3, 0, 0, 3, 7),
 789        ARM64_SYS_REG(3, 0, 0, 4, 0),   /* ID_AA64PFR0_EL1 */
 790        ARM64_SYS_REG(3, 0, 0, 4, 1),   /* ID_AA64PFR1_EL1 */
 791        ARM64_SYS_REG(3, 0, 0, 4, 2),
 792        ARM64_SYS_REG(3, 0, 0, 4, 3),
 793        ARM64_SYS_REG(3, 0, 0, 4, 4),   /* ID_AA64ZFR0_EL1 */
 794        ARM64_SYS_REG(3, 0, 0, 4, 5),
 795        ARM64_SYS_REG(3, 0, 0, 4, 6),
 796        ARM64_SYS_REG(3, 0, 0, 4, 7),
 797        ARM64_SYS_REG(3, 0, 0, 5, 0),   /* ID_AA64DFR0_EL1 */
 798        ARM64_SYS_REG(3, 0, 0, 5, 1),   /* ID_AA64DFR1_EL1 */
 799        ARM64_SYS_REG(3, 0, 0, 5, 2),
 800        ARM64_SYS_REG(3, 0, 0, 5, 3),
 801        ARM64_SYS_REG(3, 0, 0, 5, 4),   /* ID_AA64AFR0_EL1 */
 802        ARM64_SYS_REG(3, 0, 0, 5, 5),   /* ID_AA64AFR1_EL1 */
 803        ARM64_SYS_REG(3, 0, 0, 5, 6),
 804        ARM64_SYS_REG(3, 0, 0, 5, 7),
 805        ARM64_SYS_REG(3, 0, 0, 6, 0),   /* ID_AA64ISAR0_EL1 */
 806        ARM64_SYS_REG(3, 0, 0, 6, 1),   /* ID_AA64ISAR1_EL1 */
 807        ARM64_SYS_REG(3, 0, 0, 6, 2),
 808        ARM64_SYS_REG(3, 0, 0, 6, 3),
 809        ARM64_SYS_REG(3, 0, 0, 6, 4),
 810        ARM64_SYS_REG(3, 0, 0, 6, 5),
 811        ARM64_SYS_REG(3, 0, 0, 6, 6),
 812        ARM64_SYS_REG(3, 0, 0, 6, 7),
 813        ARM64_SYS_REG(3, 0, 0, 7, 0),   /* ID_AA64MMFR0_EL1 */
 814        ARM64_SYS_REG(3, 0, 0, 7, 1),   /* ID_AA64MMFR1_EL1 */
 815        ARM64_SYS_REG(3, 0, 0, 7, 2),   /* ID_AA64MMFR2_EL1 */
 816        ARM64_SYS_REG(3, 0, 0, 7, 3),
 817        ARM64_SYS_REG(3, 0, 0, 7, 4),
 818        ARM64_SYS_REG(3, 0, 0, 7, 5),
 819        ARM64_SYS_REG(3, 0, 0, 7, 6),
 820        ARM64_SYS_REG(3, 0, 0, 7, 7),
 821        ARM64_SYS_REG(3, 0, 1, 0, 0),   /* SCTLR_EL1 */
 822        ARM64_SYS_REG(3, 0, 1, 0, 1),   /* ACTLR_EL1 */
 823        ARM64_SYS_REG(3, 0, 1, 0, 2),   /* CPACR_EL1 */
 824        ARM64_SYS_REG(3, 0, 2, 0, 0),   /* TTBR0_EL1 */
 825        ARM64_SYS_REG(3, 0, 2, 0, 1),   /* TTBR1_EL1 */
 826        ARM64_SYS_REG(3, 0, 2, 0, 2),   /* TCR_EL1 */
 827        ARM64_SYS_REG(3, 0, 5, 1, 0),   /* AFSR0_EL1 */
 828        ARM64_SYS_REG(3, 0, 5, 1, 1),   /* AFSR1_EL1 */
 829        ARM64_SYS_REG(3, 0, 5, 2, 0),   /* ESR_EL1 */
 830        ARM64_SYS_REG(3, 0, 6, 0, 0),   /* FAR_EL1 */
 831        ARM64_SYS_REG(3, 0, 7, 4, 0),   /* PAR_EL1 */
 832        ARM64_SYS_REG(3, 0, 10, 2, 0),  /* MAIR_EL1 */
 833        ARM64_SYS_REG(3, 0, 10, 3, 0),  /* AMAIR_EL1 */
 834        ARM64_SYS_REG(3, 0, 12, 0, 0),  /* VBAR_EL1 */
 835        ARM64_SYS_REG(3, 0, 12, 1, 1),  /* DISR_EL1 */
 836        ARM64_SYS_REG(3, 0, 13, 0, 1),  /* CONTEXTIDR_EL1 */
 837        ARM64_SYS_REG(3, 0, 13, 0, 4),  /* TPIDR_EL1 */
 838        ARM64_SYS_REG(3, 0, 14, 1, 0),  /* CNTKCTL_EL1 */
 839        ARM64_SYS_REG(3, 2, 0, 0, 0),   /* CSSELR_EL1 */
 840        ARM64_SYS_REG(3, 3, 13, 0, 2),  /* TPIDR_EL0 */
 841        ARM64_SYS_REG(3, 3, 13, 0, 3),  /* TPIDRRO_EL0 */
 842        ARM64_SYS_REG(3, 4, 3, 0, 0),   /* DACR32_EL2 */
 843        ARM64_SYS_REG(3, 4, 5, 0, 1),   /* IFSR32_EL2 */
 844        ARM64_SYS_REG(3, 4, 5, 3, 0),   /* FPEXC32_EL2 */
 845};
 846
 847static __u64 pmu_regs[] = {
 848        ARM64_SYS_REG(3, 0, 9, 14, 1),  /* PMINTENSET_EL1 */
 849        ARM64_SYS_REG(3, 0, 9, 14, 2),  /* PMINTENCLR_EL1 */
 850        ARM64_SYS_REG(3, 3, 9, 12, 0),  /* PMCR_EL0 */
 851        ARM64_SYS_REG(3, 3, 9, 12, 1),  /* PMCNTENSET_EL0 */
 852        ARM64_SYS_REG(3, 3, 9, 12, 2),  /* PMCNTENCLR_EL0 */
 853        ARM64_SYS_REG(3, 3, 9, 12, 3),  /* PMOVSCLR_EL0 */
 854        ARM64_SYS_REG(3, 3, 9, 12, 4),  /* PMSWINC_EL0 */
 855        ARM64_SYS_REG(3, 3, 9, 12, 5),  /* PMSELR_EL0 */
 856        ARM64_SYS_REG(3, 3, 9, 13, 0),  /* PMCCNTR_EL0 */
 857        ARM64_SYS_REG(3, 3, 9, 14, 0),  /* PMUSERENR_EL0 */
 858        ARM64_SYS_REG(3, 3, 9, 14, 3),  /* PMOVSSET_EL0 */
 859        ARM64_SYS_REG(3, 3, 14, 8, 0),
 860        ARM64_SYS_REG(3, 3, 14, 8, 1),
 861        ARM64_SYS_REG(3, 3, 14, 8, 2),
 862        ARM64_SYS_REG(3, 3, 14, 8, 3),
 863        ARM64_SYS_REG(3, 3, 14, 8, 4),
 864        ARM64_SYS_REG(3, 3, 14, 8, 5),
 865        ARM64_SYS_REG(3, 3, 14, 8, 6),
 866        ARM64_SYS_REG(3, 3, 14, 8, 7),
 867        ARM64_SYS_REG(3, 3, 14, 9, 0),
 868        ARM64_SYS_REG(3, 3, 14, 9, 1),
 869        ARM64_SYS_REG(3, 3, 14, 9, 2),
 870        ARM64_SYS_REG(3, 3, 14, 9, 3),
 871        ARM64_SYS_REG(3, 3, 14, 9, 4),
 872        ARM64_SYS_REG(3, 3, 14, 9, 5),
 873        ARM64_SYS_REG(3, 3, 14, 9, 6),
 874        ARM64_SYS_REG(3, 3, 14, 9, 7),
 875        ARM64_SYS_REG(3, 3, 14, 10, 0),
 876        ARM64_SYS_REG(3, 3, 14, 10, 1),
 877        ARM64_SYS_REG(3, 3, 14, 10, 2),
 878        ARM64_SYS_REG(3, 3, 14, 10, 3),
 879        ARM64_SYS_REG(3, 3, 14, 10, 4),
 880        ARM64_SYS_REG(3, 3, 14, 10, 5),
 881        ARM64_SYS_REG(3, 3, 14, 10, 6),
 882        ARM64_SYS_REG(3, 3, 14, 10, 7),
 883        ARM64_SYS_REG(3, 3, 14, 11, 0),
 884        ARM64_SYS_REG(3, 3, 14, 11, 1),
 885        ARM64_SYS_REG(3, 3, 14, 11, 2),
 886        ARM64_SYS_REG(3, 3, 14, 11, 3),
 887        ARM64_SYS_REG(3, 3, 14, 11, 4),
 888        ARM64_SYS_REG(3, 3, 14, 11, 5),
 889        ARM64_SYS_REG(3, 3, 14, 11, 6),
 890        ARM64_SYS_REG(3, 3, 14, 12, 0),
 891        ARM64_SYS_REG(3, 3, 14, 12, 1),
 892        ARM64_SYS_REG(3, 3, 14, 12, 2),
 893        ARM64_SYS_REG(3, 3, 14, 12, 3),
 894        ARM64_SYS_REG(3, 3, 14, 12, 4),
 895        ARM64_SYS_REG(3, 3, 14, 12, 5),
 896        ARM64_SYS_REG(3, 3, 14, 12, 6),
 897        ARM64_SYS_REG(3, 3, 14, 12, 7),
 898        ARM64_SYS_REG(3, 3, 14, 13, 0),
 899        ARM64_SYS_REG(3, 3, 14, 13, 1),
 900        ARM64_SYS_REG(3, 3, 14, 13, 2),
 901        ARM64_SYS_REG(3, 3, 14, 13, 3),
 902        ARM64_SYS_REG(3, 3, 14, 13, 4),
 903        ARM64_SYS_REG(3, 3, 14, 13, 5),
 904        ARM64_SYS_REG(3, 3, 14, 13, 6),
 905        ARM64_SYS_REG(3, 3, 14, 13, 7),
 906        ARM64_SYS_REG(3, 3, 14, 14, 0),
 907        ARM64_SYS_REG(3, 3, 14, 14, 1),
 908        ARM64_SYS_REG(3, 3, 14, 14, 2),
 909        ARM64_SYS_REG(3, 3, 14, 14, 3),
 910        ARM64_SYS_REG(3, 3, 14, 14, 4),
 911        ARM64_SYS_REG(3, 3, 14, 14, 5),
 912        ARM64_SYS_REG(3, 3, 14, 14, 6),
 913        ARM64_SYS_REG(3, 3, 14, 14, 7),
 914        ARM64_SYS_REG(3, 3, 14, 15, 0),
 915        ARM64_SYS_REG(3, 3, 14, 15, 1),
 916        ARM64_SYS_REG(3, 3, 14, 15, 2),
 917        ARM64_SYS_REG(3, 3, 14, 15, 3),
 918        ARM64_SYS_REG(3, 3, 14, 15, 4),
 919        ARM64_SYS_REG(3, 3, 14, 15, 5),
 920        ARM64_SYS_REG(3, 3, 14, 15, 6),
 921        ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
 922};
 923
 924static __u64 vregs[] = {
 925        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
 926        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
 927        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
 928        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
 929        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
 930        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
 931        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
 932        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
 933        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
 934        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
 935        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
 936        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
 937        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
 938        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
 939        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
 940        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
 941        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
 942        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
 943        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
 944        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
 945        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
 946        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
 947        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
 948        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
 949        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
 950        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
 951        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
 952        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
 953        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
 954        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
 955        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
 956        KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
 957};
 958
 959static __u64 sve_regs[] = {
 960        KVM_REG_ARM64_SVE_VLS,
 961        KVM_REG_ARM64_SVE_ZREG(0, 0),
 962        KVM_REG_ARM64_SVE_ZREG(1, 0),
 963        KVM_REG_ARM64_SVE_ZREG(2, 0),
 964        KVM_REG_ARM64_SVE_ZREG(3, 0),
 965        KVM_REG_ARM64_SVE_ZREG(4, 0),
 966        KVM_REG_ARM64_SVE_ZREG(5, 0),
 967        KVM_REG_ARM64_SVE_ZREG(6, 0),
 968        KVM_REG_ARM64_SVE_ZREG(7, 0),
 969        KVM_REG_ARM64_SVE_ZREG(8, 0),
 970        KVM_REG_ARM64_SVE_ZREG(9, 0),
 971        KVM_REG_ARM64_SVE_ZREG(10, 0),
 972        KVM_REG_ARM64_SVE_ZREG(11, 0),
 973        KVM_REG_ARM64_SVE_ZREG(12, 0),
 974        KVM_REG_ARM64_SVE_ZREG(13, 0),
 975        KVM_REG_ARM64_SVE_ZREG(14, 0),
 976        KVM_REG_ARM64_SVE_ZREG(15, 0),
 977        KVM_REG_ARM64_SVE_ZREG(16, 0),
 978        KVM_REG_ARM64_SVE_ZREG(17, 0),
 979        KVM_REG_ARM64_SVE_ZREG(18, 0),
 980        KVM_REG_ARM64_SVE_ZREG(19, 0),
 981        KVM_REG_ARM64_SVE_ZREG(20, 0),
 982        KVM_REG_ARM64_SVE_ZREG(21, 0),
 983        KVM_REG_ARM64_SVE_ZREG(22, 0),
 984        KVM_REG_ARM64_SVE_ZREG(23, 0),
 985        KVM_REG_ARM64_SVE_ZREG(24, 0),
 986        KVM_REG_ARM64_SVE_ZREG(25, 0),
 987        KVM_REG_ARM64_SVE_ZREG(26, 0),
 988        KVM_REG_ARM64_SVE_ZREG(27, 0),
 989        KVM_REG_ARM64_SVE_ZREG(28, 0),
 990        KVM_REG_ARM64_SVE_ZREG(29, 0),
 991        KVM_REG_ARM64_SVE_ZREG(30, 0),
 992        KVM_REG_ARM64_SVE_ZREG(31, 0),
 993        KVM_REG_ARM64_SVE_PREG(0, 0),
 994        KVM_REG_ARM64_SVE_PREG(1, 0),
 995        KVM_REG_ARM64_SVE_PREG(2, 0),
 996        KVM_REG_ARM64_SVE_PREG(3, 0),
 997        KVM_REG_ARM64_SVE_PREG(4, 0),
 998        KVM_REG_ARM64_SVE_PREG(5, 0),
 999        KVM_REG_ARM64_SVE_PREG(6, 0),
1000        KVM_REG_ARM64_SVE_PREG(7, 0),
1001        KVM_REG_ARM64_SVE_PREG(8, 0),
1002        KVM_REG_ARM64_SVE_PREG(9, 0),
1003        KVM_REG_ARM64_SVE_PREG(10, 0),
1004        KVM_REG_ARM64_SVE_PREG(11, 0),
1005        KVM_REG_ARM64_SVE_PREG(12, 0),
1006        KVM_REG_ARM64_SVE_PREG(13, 0),
1007        KVM_REG_ARM64_SVE_PREG(14, 0),
1008        KVM_REG_ARM64_SVE_PREG(15, 0),
1009        KVM_REG_ARM64_SVE_FFR(0),
1010        ARM64_SYS_REG(3, 0, 1, 2, 0),   /* ZCR_EL1 */
1011};
1012
1013static __u64 sve_rejects_set[] = {
1014        KVM_REG_ARM64_SVE_VLS,
1015};
1016
1017#define BASE_SUBLIST \
1018        { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
1019#define VREGS_SUBLIST \
1020        { "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
1021#define PMU_SUBLIST \
1022        { "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
1023          .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
1024#define SVE_SUBLIST \
1025        { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
1026          .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
1027          .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
1028
1029static struct vcpu_config vregs_config = {
1030        .sublists = {
1031        BASE_SUBLIST,
1032        VREGS_SUBLIST,
1033        {0},
1034        },
1035};
1036static struct vcpu_config vregs_pmu_config = {
1037        .sublists = {
1038        BASE_SUBLIST,
1039        VREGS_SUBLIST,
1040        PMU_SUBLIST,
1041        {0},
1042        },
1043};
1044static struct vcpu_config sve_config = {
1045        .sublists = {
1046        BASE_SUBLIST,
1047        SVE_SUBLIST,
1048        {0},
1049        },
1050};
1051static struct vcpu_config sve_pmu_config = {
1052        .sublists = {
1053        BASE_SUBLIST,
1054        SVE_SUBLIST,
1055        PMU_SUBLIST,
1056        {0},
1057        },
1058};
1059
1060static struct vcpu_config *vcpu_configs[] = {
1061        &vregs_config,
1062        &vregs_pmu_config,
1063        &sve_config,
1064        &sve_pmu_config,
1065};
1066static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
1067