linux/arch/mips/kvm/vz.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * KVM/MIPS: Support for hardware virtualization extensions
   7 *
   8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
   9 * Authors: Yann Le Du <ledu@kymasys.com>
  10 */
  11
  12#include <linux/errno.h>
  13#include <linux/err.h>
  14#include <linux/module.h>
  15#include <linux/preempt.h>
  16#include <linux/vmalloc.h>
  17#include <asm/cacheflush.h>
  18#include <asm/cacheops.h>
  19#include <asm/cmpxchg.h>
  20#include <asm/fpu.h>
  21#include <asm/hazards.h>
  22#include <asm/inst.h>
  23#include <asm/mmu_context.h>
  24#include <asm/r4kcache.h>
  25#include <asm/time.h>
  26#include <asm/tlb.h>
  27#include <asm/tlbex.h>
  28
  29#include <linux/kvm_host.h>
  30
  31#include "interrupt.h"
  32#ifdef CONFIG_CPU_LOONGSON64
  33#include "loongson_regs.h"
  34#endif
  35
  36#include "trace.h"
  37
  38/* Pointers to last VCPU loaded on each physical CPU */
  39static struct kvm_vcpu *last_vcpu[NR_CPUS];
  40/* Pointers to last VCPU executed on each physical CPU */
  41static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
  42
  43/*
  44 * Number of guest VTLB entries to use, so we can catch inconsistency between
  45 * CPUs.
  46 */
  47static unsigned int kvm_vz_guest_vtlb_size;
  48
  49static inline long kvm_vz_read_gc0_ebase(void)
  50{
  51        if (sizeof(long) == 8 && cpu_has_ebase_wg)
  52                return read_gc0_ebase_64();
  53        else
  54                return read_gc0_ebase();
  55}
  56
  57static inline void kvm_vz_write_gc0_ebase(long v)
  58{
  59        /*
  60         * First write with WG=1 to write upper bits, then write again in case
  61         * WG should be left at 0.
  62         * write_gc0_ebase_64() is no longer UNDEFINED since R6.
  63         */
  64        if (sizeof(long) == 8 &&
  65            (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
  66                write_gc0_ebase_64(v | MIPS_EBASE_WG);
  67                write_gc0_ebase_64(v);
  68        } else {
  69                write_gc0_ebase(v | MIPS_EBASE_WG);
  70                write_gc0_ebase(v);
  71        }
  72}
  73
  74/*
  75 * These Config bits may be writable by the guest:
  76 * Config:      [K23, KU] (!TLB), K0
  77 * Config1:     (none)
  78 * Config2:     [TU, SU] (impl)
  79 * Config3:     ISAOnExc
  80 * Config4:     FTLBPageSize
  81 * Config5:     K, CV, MSAEn, UFE, FRE, SBRI, UFR
  82 */
  83
  84static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
  85{
  86        return CONF_CM_CMASK;
  87}
  88
  89static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
  90{
  91        return 0;
  92}
  93
  94static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
  95{
  96        return 0;
  97}
  98
  99static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
 100{
 101        return MIPS_CONF3_ISA_OE;
 102}
 103
 104static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
 105{
 106        /* no need to be exact */
 107        return MIPS_CONF4_VFTLBPAGESIZE;
 108}
 109
 110static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
 111{
 112        unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
 113
 114        /* Permit MSAEn changes if MSA supported and enabled */
 115        if (kvm_mips_guest_has_msa(&vcpu->arch))
 116                mask |= MIPS_CONF5_MSAEN;
 117
 118        /*
 119         * Permit guest FPU mode changes if FPU is enabled and the relevant
 120         * feature exists according to FIR register.
 121         */
 122        if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
 123                if (cpu_has_ufr)
 124                        mask |= MIPS_CONF5_UFR;
 125                if (cpu_has_fre)
 126                        mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
 127        }
 128
 129        return mask;
 130}
 131
 132static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu)
 133{
 134        return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER;
 135}
 136
 137/*
 138 * VZ optionally allows these additional Config bits to be written by root:
 139 * Config:      M, [MT]
 140 * Config1:     M, [MMUSize-1, C2, MD, PC, WR, CA], FP
 141 * Config2:     M
 142 * Config3:     M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
 143 *              VInt, SP, CDMM, MT, SM, TL]
 144 * Config4:     M, [VTLBSizeExt, MMUSizeExt]
 145 * Config5:     MRP
 146 */
 147
 148static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
 149{
 150        return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
 151}
 152
 153static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
 154{
 155        unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
 156
 157        /* Permit FPU to be present if FPU is supported */
 158        if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
 159                mask |= MIPS_CONF1_FP;
 160
 161        return mask;
 162}
 163
 164static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
 165{
 166        return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
 167}
 168
 169static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
 170{
 171        unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
 172                MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
 173
 174        /* Permit MSA to be present if MSA is supported */
 175        if (kvm_mips_guest_can_have_msa(&vcpu->arch))
 176                mask |= MIPS_CONF3_MSA;
 177
 178        return mask;
 179}
 180
 181static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
 182{
 183        return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
 184}
 185
 186static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
 187{
 188        return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
 189}
 190
 191static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu)
 192{
 193        return kvm_vz_config6_guest_wrmask(vcpu) |
 194                LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS;
 195}
 196
 197static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
 198{
 199        /* VZ guest has already converted gva to gpa */
 200        return gva;
 201}
 202
 203static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
 204{
 205        set_bit(priority, &vcpu->arch.pending_exceptions);
 206        clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
 207}
 208
 209static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
 210{
 211        clear_bit(priority, &vcpu->arch.pending_exceptions);
 212        set_bit(priority, &vcpu->arch.pending_exceptions_clr);
 213}
 214
 215static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
 216{
 217        /*
 218         * timer expiry is asynchronous to vcpu execution therefore defer guest
 219         * cp0 accesses
 220         */
 221        kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
 222}
 223
 224static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
 225{
 226        /*
 227         * timer expiry is asynchronous to vcpu execution therefore defer guest
 228         * cp0 accesses
 229         */
 230        kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
 231}
 232
 233static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
 234                                   struct kvm_mips_interrupt *irq)
 235{
 236        int intr = (int)irq->irq;
 237
 238        /*
 239         * interrupts are asynchronous to vcpu execution therefore defer guest
 240         * cp0 accesses
 241         */
 242        kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
 243}
 244
 245static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
 246                                     struct kvm_mips_interrupt *irq)
 247{
 248        int intr = (int)irq->irq;
 249
 250        /*
 251         * interrupts are asynchronous to vcpu execution therefore defer guest
 252         * cp0 accesses
 253         */
 254        kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
 255}
 256
 257static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
 258                                 u32 cause)
 259{
 260        u32 irq = (priority < MIPS_EXC_MAX) ?
 261                kvm_priority_to_irq[priority] : 0;
 262
 263        switch (priority) {
 264        case MIPS_EXC_INT_TIMER:
 265                set_gc0_cause(C_TI);
 266                break;
 267
 268        case MIPS_EXC_INT_IO_1:
 269        case MIPS_EXC_INT_IO_2:
 270        case MIPS_EXC_INT_IPI_1:
 271        case MIPS_EXC_INT_IPI_2:
 272                if (cpu_has_guestctl2)
 273                        set_c0_guestctl2(irq);
 274                else
 275                        set_gc0_cause(irq);
 276                break;
 277
 278        default:
 279                break;
 280        }
 281
 282        clear_bit(priority, &vcpu->arch.pending_exceptions);
 283        return 1;
 284}
 285
 286static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
 287                               u32 cause)
 288{
 289        u32 irq = (priority < MIPS_EXC_MAX) ?
 290                kvm_priority_to_irq[priority] : 0;
 291
 292        switch (priority) {
 293        case MIPS_EXC_INT_TIMER:
 294                /*
 295                 * Explicitly clear irq associated with Cause.IP[IPTI]
 296                 * if GuestCtl2 virtual interrupt register not
 297                 * supported or if not using GuestCtl2 Hardware Clear.
 298                 */
 299                if (cpu_has_guestctl2) {
 300                        if (!(read_c0_guestctl2() & (irq << 14)))
 301                                clear_c0_guestctl2(irq);
 302                } else {
 303                        clear_gc0_cause(irq);
 304                }
 305                break;
 306
 307        case MIPS_EXC_INT_IO_1:
 308        case MIPS_EXC_INT_IO_2:
 309        case MIPS_EXC_INT_IPI_1:
 310        case MIPS_EXC_INT_IPI_2:
 311                /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
 312                if (cpu_has_guestctl2) {
 313                        if (!(read_c0_guestctl2() & (irq << 14)))
 314                                clear_c0_guestctl2(irq);
 315                } else {
 316                        clear_gc0_cause(irq);
 317                }
 318                break;
 319
 320        default:
 321                break;
 322        }
 323
 324        clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
 325        return 1;
 326}
 327
 328/*
 329 * VZ guest timer handling.
 330 */
 331
 332/**
 333 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
 334 * @vcpu:       Virtual CPU.
 335 *
 336 * Returns:     true if the VZ GTOffset & real guest CP0_Count should be used
 337 *              instead of software emulation of guest timer.
 338 *              false otherwise.
 339 */
 340static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
 341{
 342        if (kvm_mips_count_disabled(vcpu))
 343                return false;
 344
 345        /* Chosen frequency must match real frequency */
 346        if (mips_hpt_frequency != vcpu->arch.count_hz)
 347                return false;
 348
 349        /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
 350        if (current_cpu_data.gtoffset_mask != 0xffffffff)
 351                return false;
 352
 353        return true;
 354}
 355
 356/**
 357 * _kvm_vz_restore_stimer() - Restore soft timer state.
 358 * @vcpu:       Virtual CPU.
 359 * @compare:    CP0_Compare register value, restored by caller.
 360 * @cause:      CP0_Cause register to restore.
 361 *
 362 * Restore VZ state relating to the soft timer. The hard timer can be enabled
 363 * later.
 364 */
 365static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
 366                                   u32 cause)
 367{
 368        /*
 369         * Avoid spurious counter interrupts by setting Guest CP0_Count to just
 370         * after Guest CP0_Compare.
 371         */
 372        write_c0_gtoffset(compare - read_c0_count());
 373
 374        back_to_back_c0_hazard();
 375        write_gc0_cause(cause);
 376}
 377
 378/**
 379 * _kvm_vz_restore_htimer() - Restore hard timer state.
 380 * @vcpu:       Virtual CPU.
 381 * @compare:    CP0_Compare register value, restored by caller.
 382 * @cause:      CP0_Cause register to restore.
 383 *
 384 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
 385 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
 386 */
 387static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
 388                                   u32 compare, u32 cause)
 389{
 390        u32 start_count, after_count;
 391        ktime_t freeze_time;
 392        unsigned long flags;
 393
 394        /*
 395         * Freeze the soft-timer and sync the guest CP0_Count with it. We do
 396         * this with interrupts disabled to avoid latency.
 397         */
 398        local_irq_save(flags);
 399        freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
 400        write_c0_gtoffset(start_count - read_c0_count());
 401        local_irq_restore(flags);
 402
 403        /* restore guest CP0_Cause, as TI may already be set */
 404        back_to_back_c0_hazard();
 405        write_gc0_cause(cause);
 406
 407        /*
 408         * The above sequence isn't atomic and would result in lost timer
 409         * interrupts if we're not careful. Detect if a timer interrupt is due
 410         * and assert it.
 411         */
 412        back_to_back_c0_hazard();
 413        after_count = read_gc0_count();
 414        if (after_count - start_count > compare - start_count - 1)
 415                kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
 416}
 417
 418/**
 419 * kvm_vz_restore_timer() - Restore timer state.
 420 * @vcpu:       Virtual CPU.
 421 *
 422 * Restore soft timer state from saved context.
 423 */
 424static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
 425{
 426        struct mips_coproc *cop0 = vcpu->arch.cop0;
 427        u32 cause, compare;
 428
 429        compare = kvm_read_sw_gc0_compare(cop0);
 430        cause = kvm_read_sw_gc0_cause(cop0);
 431
 432        write_gc0_compare(compare);
 433        _kvm_vz_restore_stimer(vcpu, compare, cause);
 434}
 435
 436/**
 437 * kvm_vz_acquire_htimer() - Switch to hard timer state.
 438 * @vcpu:       Virtual CPU.
 439 *
 440 * Restore hard timer state on top of existing soft timer state if possible.
 441 *
 442 * Since hard timer won't remain active over preemption, preemption should be
 443 * disabled by the caller.
 444 */
 445void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
 446{
 447        u32 gctl0;
 448
 449        gctl0 = read_c0_guestctl0();
 450        if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
 451                /* enable guest access to hard timer */
 452                write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
 453
 454                _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
 455                                       read_gc0_cause());
 456        }
 457}
 458
 459/**
 460 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
 461 * @vcpu:       Virtual CPU.
 462 * @compare:    Pointer to write compare value to.
 463 * @cause:      Pointer to write cause value to.
 464 *
 465 * Save VZ guest timer state and switch to software emulation of guest CP0
 466 * timer. The hard timer must already be in use, so preemption should be
 467 * disabled.
 468 */
 469static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
 470                                u32 *out_compare, u32 *out_cause)
 471{
 472        u32 cause, compare, before_count, end_count;
 473        ktime_t before_time;
 474
 475        compare = read_gc0_compare();
 476        *out_compare = compare;
 477
 478        before_time = ktime_get();
 479
 480        /*
 481         * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
 482         * at which no pending timer interrupt is missing.
 483         */
 484        before_count = read_gc0_count();
 485        back_to_back_c0_hazard();
 486        cause = read_gc0_cause();
 487        *out_cause = cause;
 488
 489        /*
 490         * Record a final CP0_Count which we will transfer to the soft-timer.
 491         * This is recorded *after* saving CP0_Cause, so we don't get any timer
 492         * interrupts from just after the final CP0_Count point.
 493         */
 494        back_to_back_c0_hazard();
 495        end_count = read_gc0_count();
 496
 497        /*
 498         * The above sequence isn't atomic, so we could miss a timer interrupt
 499         * between reading CP0_Cause and end_count. Detect and record any timer
 500         * interrupt due between before_count and end_count.
 501         */
 502        if (end_count - before_count > compare - before_count - 1)
 503                kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
 504
 505        /*
 506         * Restore soft-timer, ignoring a small amount of negative drift due to
 507         * delay between freeze_hrtimer and setting CP0_GTOffset.
 508         */
 509        kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
 510}
 511
 512/**
 513 * kvm_vz_save_timer() - Save guest timer state.
 514 * @vcpu:       Virtual CPU.
 515 *
 516 * Save VZ guest timer state and switch to soft guest timer if hard timer was in
 517 * use.
 518 */
 519static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
 520{
 521        struct mips_coproc *cop0 = vcpu->arch.cop0;
 522        u32 gctl0, compare, cause;
 523
 524        gctl0 = read_c0_guestctl0();
 525        if (gctl0 & MIPS_GCTL0_GT) {
 526                /* disable guest use of hard timer */
 527                write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
 528
 529                /* save hard timer state */
 530                _kvm_vz_save_htimer(vcpu, &compare, &cause);
 531        } else {
 532                compare = read_gc0_compare();
 533                cause = read_gc0_cause();
 534        }
 535
 536        /* save timer-related state to VCPU context */
 537        kvm_write_sw_gc0_cause(cop0, cause);
 538        kvm_write_sw_gc0_compare(cop0, compare);
 539}
 540
 541/**
 542 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
 543 * @vcpu:       Virtual CPU.
 544 *
 545 * Transfers the state of the hard guest timer to the soft guest timer, leaving
 546 * guest state intact so it can continue to be used with the soft timer.
 547 */
 548void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
 549{
 550        u32 gctl0, compare, cause;
 551
 552        preempt_disable();
 553        gctl0 = read_c0_guestctl0();
 554        if (gctl0 & MIPS_GCTL0_GT) {
 555                /* disable guest use of timer */
 556                write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
 557
 558                /* switch to soft timer */
 559                _kvm_vz_save_htimer(vcpu, &compare, &cause);
 560
 561                /* leave soft timer in usable state */
 562                _kvm_vz_restore_stimer(vcpu, compare, cause);
 563        }
 564        preempt_enable();
 565}
 566
 567/**
 568 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
 569 * @inst:       32-bit instruction encoding.
 570 *
 571 * Finds whether @inst encodes an EVA memory access instruction, which would
 572 * indicate that emulation of it should access the user mode address space
 573 * instead of the kernel mode address space. This matters for MUSUK segments
 574 * which are TLB mapped for user mode but unmapped for kernel mode.
 575 *
 576 * Returns:     Whether @inst encodes an EVA accessor instruction.
 577 */
 578static bool is_eva_access(union mips_instruction inst)
 579{
 580        if (inst.spec3_format.opcode != spec3_op)
 581                return false;
 582
 583        switch (inst.spec3_format.func) {
 584        case lwle_op:
 585        case lwre_op:
 586        case cachee_op:
 587        case sbe_op:
 588        case she_op:
 589        case sce_op:
 590        case swe_op:
 591        case swle_op:
 592        case swre_op:
 593        case prefe_op:
 594        case lbue_op:
 595        case lhue_op:
 596        case lbe_op:
 597        case lhe_op:
 598        case lle_op:
 599        case lwe_op:
 600                return true;
 601        default:
 602                return false;
 603        }
 604}
 605
 606/**
 607 * is_eva_am_mapped() - Find whether an access mode is mapped.
 608 * @vcpu:       KVM VCPU state.
 609 * @am:         3-bit encoded access mode.
 610 * @eu:         Segment becomes unmapped and uncached when Status.ERL=1.
 611 *
 612 * Decode @am to find whether it encodes a mapped segment for the current VCPU
 613 * state. Where necessary @eu and the actual instruction causing the fault are
 614 * taken into account to make the decision.
 615 *
 616 * Returns:     Whether the VCPU faulted on a TLB mapped address.
 617 */
 618static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
 619{
 620        u32 am_lookup;
 621        int err;
 622
 623        /*
 624         * Interpret access control mode. We assume address errors will already
 625         * have been caught by the guest, leaving us with:
 626         *      AM      UM  SM  KM  31..24 23..16
 627         * UK    0 000          Unm   0      0
 628         * MK    1 001          TLB   1
 629         * MSK   2 010      TLB TLB   1
 630         * MUSK  3 011  TLB TLB TLB   1
 631         * MUSUK 4 100  TLB TLB Unm   0      1
 632         * USK   5 101      Unm Unm   0      0
 633         * -     6 110                0      0
 634         * UUSK  7 111  Unm Unm Unm   0      0
 635         *
 636         * We shift a magic value by AM across the sign bit to find if always
 637         * TLB mapped, and if not shift by 8 again to find if it depends on KM.
 638         */
 639        am_lookup = 0x70080000 << am;
 640        if ((s32)am_lookup < 0) {
 641                /*
 642                 * MK, MSK, MUSK
 643                 * Always TLB mapped, unless SegCtl.EU && ERL
 644                 */
 645                if (!eu || !(read_gc0_status() & ST0_ERL))
 646                        return true;
 647        } else {
 648                am_lookup <<= 8;
 649                if ((s32)am_lookup < 0) {
 650                        union mips_instruction inst;
 651                        unsigned int status;
 652                        u32 *opc;
 653
 654                        /*
 655                         * MUSUK
 656                         * TLB mapped if not in kernel mode
 657                         */
 658                        status = read_gc0_status();
 659                        if (!(status & (ST0_EXL | ST0_ERL)) &&
 660                            (status & ST0_KSU))
 661                                return true;
 662                        /*
 663                         * EVA access instructions in kernel
 664                         * mode access user address space.
 665                         */
 666                        opc = (u32 *)vcpu->arch.pc;
 667                        if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
 668                                opc += 1;
 669                        err = kvm_get_badinstr(opc, vcpu, &inst.word);
 670                        if (!err && is_eva_access(inst))
 671                                return true;
 672                }
 673        }
 674
 675        return false;
 676}
 677
 678/**
 679 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
 680 * @vcpu:       KVM VCPU state.
 681 * @gva:        Guest virtual address to convert.
 682 * @gpa:        Output guest physical address.
 683 *
 684 * Convert a guest virtual address (GVA) which is valid according to the guest
 685 * context, to a guest physical address (GPA).
 686 *
 687 * Returns:     0 on success.
 688 *              -errno on failure.
 689 */
 690static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
 691                             unsigned long *gpa)
 692{
 693        u32 gva32 = gva;
 694        unsigned long segctl;
 695
 696        if ((long)gva == (s32)gva32) {
 697                /* Handle canonical 32-bit virtual address */
 698                if (cpu_guest_has_segments) {
 699                        unsigned long mask, pa;
 700
 701                        switch (gva32 >> 29) {
 702                        case 0:
 703                        case 1: /* CFG5 (1GB) */
 704                                segctl = read_gc0_segctl2() >> 16;
 705                                mask = (unsigned long)0xfc0000000ull;
 706                                break;
 707                        case 2:
 708                        case 3: /* CFG4 (1GB) */
 709                                segctl = read_gc0_segctl2();
 710                                mask = (unsigned long)0xfc0000000ull;
 711                                break;
 712                        case 4: /* CFG3 (512MB) */
 713                                segctl = read_gc0_segctl1() >> 16;
 714                                mask = (unsigned long)0xfe0000000ull;
 715                                break;
 716                        case 5: /* CFG2 (512MB) */
 717                                segctl = read_gc0_segctl1();
 718                                mask = (unsigned long)0xfe0000000ull;
 719                                break;
 720                        case 6: /* CFG1 (512MB) */
 721                                segctl = read_gc0_segctl0() >> 16;
 722                                mask = (unsigned long)0xfe0000000ull;
 723                                break;
 724                        case 7: /* CFG0 (512MB) */
 725                                segctl = read_gc0_segctl0();
 726                                mask = (unsigned long)0xfe0000000ull;
 727                                break;
 728                        default:
 729                                /*
 730                                 * GCC 4.9 isn't smart enough to figure out that
 731                                 * segctl and mask are always initialised.
 732                                 */
 733                                unreachable();
 734                        }
 735
 736                        if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
 737                                             segctl & 0x0008))
 738                                goto tlb_mapped;
 739
 740                        /* Unmapped, find guest physical address */
 741                        pa = (segctl << 20) & mask;
 742                        pa |= gva32 & ~mask;
 743                        *gpa = pa;
 744                        return 0;
 745                } else if ((s32)gva32 < (s32)0xc0000000) {
 746                        /* legacy unmapped KSeg0 or KSeg1 */
 747                        *gpa = gva32 & 0x1fffffff;
 748                        return 0;
 749                }
 750#ifdef CONFIG_64BIT
 751        } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
 752                /* XKPHYS */
 753                if (cpu_guest_has_segments) {
 754                        /*
 755                         * Each of the 8 regions can be overridden by SegCtl2.XR
 756                         * to use SegCtl1.XAM.
 757                         */
 758                        segctl = read_gc0_segctl2();
 759                        if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
 760                                segctl = read_gc0_segctl1();
 761                                if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
 762                                                     0))
 763                                        goto tlb_mapped;
 764                        }
 765
 766                }
 767                /*
 768                 * Traditionally fully unmapped.
 769                 * Bits 61:59 specify the CCA, which we can just mask off here.
 770                 * Bits 58:PABITS should be zero, but we shouldn't have got here
 771                 * if it wasn't.
 772                 */
 773                *gpa = gva & 0x07ffffffffffffff;
 774                return 0;
 775#endif
 776        }
 777
 778tlb_mapped:
 779        return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
 780}
 781
 782/**
 783 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
 784 * @vcpu:       KVM VCPU state.
 785 * @badvaddr:   Root BadVAddr.
 786 * @gpa:        Output guest physical address.
 787 *
 788 * VZ implementations are permitted to report guest virtual addresses (GVA) in
 789 * BadVAddr on a root exception during guest execution, instead of the more
 790 * convenient guest physical addresses (GPA). When we get a GVA, this function
 791 * converts it to a GPA, taking into account guest segmentation and guest TLB
 792 * state.
 793 *
 794 * Returns:     0 on success.
 795 *              -errno on failure.
 796 */
 797static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
 798                                  unsigned long *gpa)
 799{
 800        unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
 801                                 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
 802
 803        /* If BadVAddr is GPA, then all is well in the world */
 804        if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
 805                *gpa = badvaddr;
 806                return 0;
 807        }
 808
 809        /* Otherwise we'd expect it to be GVA ... */
 810        if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
 811                 "Unexpected gexccode %#x\n", gexccode))
 812                return -EINVAL;
 813
 814        /* ... and we need to perform the GVA->GPA translation in software */
 815        return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
 816}
 817
 818static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
 819{
 820        u32 *opc = (u32 *) vcpu->arch.pc;
 821        u32 cause = vcpu->arch.host_cp0_cause;
 822        u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 823        unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
 824        u32 inst = 0;
 825
 826        /*
 827         *  Fetch the instruction.
 828         */
 829        if (cause & CAUSEF_BD)
 830                opc += 1;
 831        kvm_get_badinstr(opc, vcpu, &inst);
 832
 833        kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
 834                exccode, opc, inst, badvaddr,
 835                read_gc0_status());
 836        kvm_arch_vcpu_dump_regs(vcpu);
 837        vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
 838        return RESUME_HOST;
 839}
 840
 841static unsigned long mips_process_maar(unsigned int op, unsigned long val)
 842{
 843        /* Mask off unused bits */
 844        unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
 845
 846        if (read_gc0_pagegrain() & PG_ELPA)
 847                mask |= 0x00ffffff00000000ull;
 848        if (cpu_guest_has_mvh)
 849                mask |= MIPS_MAAR_VH;
 850
 851        /* Set or clear VH */
 852        if (op == mtc_op) {
 853                /* clear VH */
 854                val &= ~MIPS_MAAR_VH;
 855        } else if (op == dmtc_op) {
 856                /* set VH to match VL */
 857                val &= ~MIPS_MAAR_VH;
 858                if (val & MIPS_MAAR_VL)
 859                        val |= MIPS_MAAR_VH;
 860        }
 861
 862        return val & mask;
 863}
 864
 865static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
 866{
 867        struct mips_coproc *cop0 = vcpu->arch.cop0;
 868
 869        val &= MIPS_MAARI_INDEX;
 870        if (val == MIPS_MAARI_INDEX)
 871                kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
 872        else if (val < ARRAY_SIZE(vcpu->arch.maar))
 873                kvm_write_sw_gc0_maari(cop0, val);
 874}
 875
 876static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
 877                                              u32 *opc, u32 cause,
 878                                              struct kvm_vcpu *vcpu)
 879{
 880        struct mips_coproc *cop0 = vcpu->arch.cop0;
 881        enum emulation_result er = EMULATE_DONE;
 882        u32 rt, rd, sel;
 883        unsigned long curr_pc;
 884        unsigned long val;
 885
 886        /*
 887         * Update PC and hold onto current PC in case there is
 888         * an error and we want to rollback the PC
 889         */
 890        curr_pc = vcpu->arch.pc;
 891        er = update_pc(vcpu, cause);
 892        if (er == EMULATE_FAIL)
 893                return er;
 894
 895        if (inst.co_format.co) {
 896                switch (inst.co_format.func) {
 897                case wait_op:
 898                        er = kvm_mips_emul_wait(vcpu);
 899                        break;
 900                default:
 901                        er = EMULATE_FAIL;
 902                }
 903        } else {
 904                rt = inst.c0r_format.rt;
 905                rd = inst.c0r_format.rd;
 906                sel = inst.c0r_format.sel;
 907
 908                switch (inst.c0r_format.rs) {
 909                case dmfc_op:
 910                case mfc_op:
 911#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
 912                        cop0->stat[rd][sel]++;
 913#endif
 914                        if (rd == MIPS_CP0_COUNT &&
 915                            sel == 0) {                 /* Count */
 916                                val = kvm_mips_read_count(vcpu);
 917                        } else if (rd == MIPS_CP0_COMPARE &&
 918                                   sel == 0) {          /* Compare */
 919                                val = read_gc0_compare();
 920                        } else if (rd == MIPS_CP0_LLADDR &&
 921                                   sel == 0) {          /* LLAddr */
 922                                if (cpu_guest_has_rw_llb)
 923                                        val = read_gc0_lladdr() &
 924                                                MIPS_LLADDR_LLB;
 925                                else
 926                                        val = 0;
 927                        } else if (rd == MIPS_CP0_LLADDR &&
 928                                   sel == 1 &&          /* MAAR */
 929                                   cpu_guest_has_maar &&
 930                                   !cpu_guest_has_dyn_maar) {
 931                                /* MAARI must be in range */
 932                                BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
 933                                                ARRAY_SIZE(vcpu->arch.maar));
 934                                val = vcpu->arch.maar[
 935                                        kvm_read_sw_gc0_maari(cop0)];
 936                        } else if ((rd == MIPS_CP0_PRID &&
 937                                    (sel == 0 ||        /* PRid */
 938                                     sel == 2 ||        /* CDMMBase */
 939                                     sel == 3)) ||      /* CMGCRBase */
 940                                   (rd == MIPS_CP0_STATUS &&
 941                                    (sel == 2 ||        /* SRSCtl */
 942                                     sel == 3)) ||      /* SRSMap */
 943                                   (rd == MIPS_CP0_CONFIG &&
 944                                    (sel == 6 ||        /* Config6 */
 945                                     sel == 7)) ||      /* Config7 */
 946                                   (rd == MIPS_CP0_LLADDR &&
 947                                    (sel == 2) &&       /* MAARI */
 948                                    cpu_guest_has_maar &&
 949                                    !cpu_guest_has_dyn_maar) ||
 950                                   (rd == MIPS_CP0_ERRCTL &&
 951                                    (sel == 0))) {      /* ErrCtl */
 952                                val = cop0->reg[rd][sel];
 953#ifdef CONFIG_CPU_LOONGSON64
 954                        } else if (rd == MIPS_CP0_DIAG &&
 955                                   (sel == 0)) {        /* Diag */
 956                                val = cop0->reg[rd][sel];
 957#endif
 958                        } else {
 959                                val = 0;
 960                                er = EMULATE_FAIL;
 961                        }
 962
 963                        if (er != EMULATE_FAIL) {
 964                                /* Sign extend */
 965                                if (inst.c0r_format.rs == mfc_op)
 966                                        val = (int)val;
 967                                vcpu->arch.gprs[rt] = val;
 968                        }
 969
 970                        trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
 971                                        KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
 972                                      KVM_TRACE_COP0(rd, sel), val);
 973                        break;
 974
 975                case dmtc_op:
 976                case mtc_op:
 977#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
 978                        cop0->stat[rd][sel]++;
 979#endif
 980                        val = vcpu->arch.gprs[rt];
 981                        trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
 982                                        KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
 983                                      KVM_TRACE_COP0(rd, sel), val);
 984
 985                        if (rd == MIPS_CP0_COUNT &&
 986                            sel == 0) {                 /* Count */
 987                                kvm_vz_lose_htimer(vcpu);
 988                                kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
 989                        } else if (rd == MIPS_CP0_COMPARE &&
 990                                   sel == 0) {          /* Compare */
 991                                kvm_mips_write_compare(vcpu,
 992                                                       vcpu->arch.gprs[rt],
 993                                                       true);
 994                        } else if (rd == MIPS_CP0_LLADDR &&
 995                                   sel == 0) {          /* LLAddr */
 996                                /*
 997                                 * P5600 generates GPSI on guest MTC0 LLAddr.
 998                                 * Only allow the guest to clear LLB.
 999                                 */
1000                                if (cpu_guest_has_rw_llb &&
1001                                    !(val & MIPS_LLADDR_LLB))
1002                                        write_gc0_lladdr(0);
1003                        } else if (rd == MIPS_CP0_LLADDR &&
1004                                   sel == 1 &&          /* MAAR */
1005                                   cpu_guest_has_maar &&
1006                                   !cpu_guest_has_dyn_maar) {
1007                                val = mips_process_maar(inst.c0r_format.rs,
1008                                                        val);
1009
1010                                /* MAARI must be in range */
1011                                BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1012                                                ARRAY_SIZE(vcpu->arch.maar));
1013                                vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1014                                                                        val;
1015                        } else if (rd == MIPS_CP0_LLADDR &&
1016                                   (sel == 2) &&        /* MAARI */
1017                                   cpu_guest_has_maar &&
1018                                   !cpu_guest_has_dyn_maar) {
1019                                kvm_write_maari(vcpu, val);
1020                        } else if (rd == MIPS_CP0_CONFIG &&
1021                                   (sel == 6)) {
1022                                cop0->reg[rd][sel] = (int)val;
1023                        } else if (rd == MIPS_CP0_ERRCTL &&
1024                                   (sel == 0)) {        /* ErrCtl */
1025                                /* ignore the written value */
1026#ifdef CONFIG_CPU_LOONGSON64
1027                        } else if (rd == MIPS_CP0_DIAG &&
1028                                   (sel == 0)) {        /* Diag */
1029                                unsigned long flags;
1030
1031                                local_irq_save(flags);
1032                                if (val & LOONGSON_DIAG_BTB) {
1033                                        /* Flush BTB */
1034                                        set_c0_diag(LOONGSON_DIAG_BTB);
1035                                }
1036                                if (val & LOONGSON_DIAG_ITLB) {
1037                                        /* Flush ITLB */
1038                                        set_c0_diag(LOONGSON_DIAG_ITLB);
1039                                }
1040                                if (val & LOONGSON_DIAG_DTLB) {
1041                                        /* Flush DTLB */
1042                                        set_c0_diag(LOONGSON_DIAG_DTLB);
1043                                }
1044                                if (val & LOONGSON_DIAG_VTLB) {
1045                                        /* Flush VTLB */
1046                                        kvm_loongson_clear_guest_vtlb();
1047                                }
1048                                if (val & LOONGSON_DIAG_FTLB) {
1049                                        /* Flush FTLB */
1050                                        kvm_loongson_clear_guest_ftlb();
1051                                }
1052                                local_irq_restore(flags);
1053#endif
1054                        } else {
1055                                er = EMULATE_FAIL;
1056                        }
1057                        break;
1058
1059                default:
1060                        er = EMULATE_FAIL;
1061                        break;
1062                }
1063        }
1064        /* Rollback PC only if emulation was unsuccessful */
1065        if (er == EMULATE_FAIL) {
1066                kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1067                        curr_pc, __func__, inst.word);
1068
1069                vcpu->arch.pc = curr_pc;
1070        }
1071
1072        return er;
1073}
1074
1075static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1076                                               u32 *opc, u32 cause,
1077                                               struct kvm_vcpu *vcpu)
1078{
1079        enum emulation_result er = EMULATE_DONE;
1080        u32 cache, op_inst, op, base;
1081        s16 offset;
1082        struct kvm_vcpu_arch *arch = &vcpu->arch;
1083        unsigned long va, curr_pc;
1084
1085        /*
1086         * Update PC and hold onto current PC in case there is
1087         * an error and we want to rollback the PC
1088         */
1089        curr_pc = vcpu->arch.pc;
1090        er = update_pc(vcpu, cause);
1091        if (er == EMULATE_FAIL)
1092                return er;
1093
1094        base = inst.i_format.rs;
1095        op_inst = inst.i_format.rt;
1096        if (cpu_has_mips_r6)
1097                offset = inst.spec3_format.simmediate;
1098        else
1099                offset = inst.i_format.simmediate;
1100        cache = op_inst & CacheOp_Cache;
1101        op = op_inst & CacheOp_Op;
1102
1103        va = arch->gprs[base] + offset;
1104
1105        kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1106                  cache, op, base, arch->gprs[base], offset);
1107
1108        /* Secondary or tirtiary cache ops ignored */
1109        if (cache != Cache_I && cache != Cache_D)
1110                return EMULATE_DONE;
1111
1112        switch (op_inst) {
1113        case Index_Invalidate_I:
1114                flush_icache_line_indexed(va);
1115                return EMULATE_DONE;
1116        case Index_Writeback_Inv_D:
1117                flush_dcache_line_indexed(va);
1118                return EMULATE_DONE;
1119        case Hit_Invalidate_I:
1120        case Hit_Invalidate_D:
1121        case Hit_Writeback_Inv_D:
1122                if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1123                        /* We can just flush entire icache */
1124                        local_flush_icache_range(0, 0);
1125                        return EMULATE_DONE;
1126                }
1127
1128                /* So far, other platforms support guest hit cache ops */
1129                break;
1130        default:
1131                break;
1132        }
1133
1134        kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1135                curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1136                offset);
1137        /* Rollback PC */
1138        vcpu->arch.pc = curr_pc;
1139
1140        return EMULATE_FAIL;
1141}
1142
1143#ifdef CONFIG_CPU_LOONGSON64
1144static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1145                                              u32 *opc, u32 cause,
1146                                              struct kvm_vcpu *vcpu)
1147{
1148        unsigned int rs, rd;
1149        unsigned int hostcfg;
1150        unsigned long curr_pc;
1151        enum emulation_result er = EMULATE_DONE;
1152
1153        /*
1154         * Update PC and hold onto current PC in case there is
1155         * an error and we want to rollback the PC
1156         */
1157        curr_pc = vcpu->arch.pc;
1158        er = update_pc(vcpu, cause);
1159        if (er == EMULATE_FAIL)
1160                return er;
1161
1162        rs = inst.loongson3_lscsr_format.rs;
1163        rd = inst.loongson3_lscsr_format.rd;
1164        switch (inst.loongson3_lscsr_format.fr) {
1165        case 0x8:  /* Read CPUCFG */
1166                ++vcpu->stat.vz_cpucfg_exits;
1167                hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1168
1169                switch (vcpu->arch.gprs[rs]) {
1170                case LOONGSON_CFG0:
1171                        vcpu->arch.gprs[rd] = 0x14c000;
1172                        break;
1173                case LOONGSON_CFG1:
1174                        hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1175                                    LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1176                                    LOONGSON_CFG1_SFBP);
1177                        vcpu->arch.gprs[rd] = hostcfg;
1178                        break;
1179                case LOONGSON_CFG2:
1180                        hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1181                                    LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1182                        vcpu->arch.gprs[rd] = hostcfg;
1183                        break;
1184                case LOONGSON_CFG3:
1185                        vcpu->arch.gprs[rd] = hostcfg;
1186                        break;
1187                default:
1188                        /* Don't export any other advanced features to guest */
1189                        vcpu->arch.gprs[rd] = 0;
1190                        break;
1191                }
1192                break;
1193
1194        default:
1195                kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1196                        inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1197                er = EMULATE_FAIL;
1198                break;
1199        }
1200
1201        /* Rollback PC only if emulation was unsuccessful */
1202        if (er == EMULATE_FAIL) {
1203                kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1204                        curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1205
1206                vcpu->arch.pc = curr_pc;
1207        }
1208
1209        return er;
1210}
1211#endif
1212
1213static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1214                                                     struct kvm_vcpu *vcpu)
1215{
1216        enum emulation_result er = EMULATE_DONE;
1217        struct kvm_vcpu_arch *arch = &vcpu->arch;
1218        union mips_instruction inst;
1219        int rd, rt, sel;
1220        int err;
1221
1222        /*
1223         *  Fetch the instruction.
1224         */
1225        if (cause & CAUSEF_BD)
1226                opc += 1;
1227        err = kvm_get_badinstr(opc, vcpu, &inst.word);
1228        if (err)
1229                return EMULATE_FAIL;
1230
1231        switch (inst.r_format.opcode) {
1232        case cop0_op:
1233                er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
1234                break;
1235#ifndef CONFIG_CPU_MIPSR6
1236        case cache_op:
1237                trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1238                er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1239                break;
1240#endif
1241#ifdef CONFIG_CPU_LOONGSON64
1242        case lwc2_op:
1243                er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu);
1244                break;
1245#endif
1246        case spec3_op:
1247                switch (inst.spec3_format.func) {
1248#ifdef CONFIG_CPU_MIPSR6
1249                case cache6_op:
1250                        trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1251                        er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
1252                        break;
1253#endif
1254                case rdhwr_op:
1255                        if (inst.r_format.rs || (inst.r_format.re >> 3))
1256                                goto unknown;
1257
1258                        rd = inst.r_format.rd;
1259                        rt = inst.r_format.rt;
1260                        sel = inst.r_format.re & 0x7;
1261
1262                        switch (rd) {
1263                        case MIPS_HWR_CC:       /* Read count register */
1264                                arch->gprs[rt] =
1265                                        (long)(int)kvm_mips_read_count(vcpu);
1266                                break;
1267                        default:
1268                                trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1269                                              KVM_TRACE_HWR(rd, sel), 0);
1270                                goto unknown;
1271                        }
1272
1273                        trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1274                                      KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1275
1276                        er = update_pc(vcpu, cause);
1277                        break;
1278                default:
1279                        goto unknown;
1280                }
1281                break;
1282unknown:
1283
1284        default:
1285                kvm_err("GPSI exception not supported (%p/%#x)\n",
1286                                opc, inst.word);
1287                kvm_arch_vcpu_dump_regs(vcpu);
1288                er = EMULATE_FAIL;
1289                break;
1290        }
1291
1292        return er;
1293}
1294
1295static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1296                                                     struct kvm_vcpu *vcpu)
1297{
1298        enum emulation_result er = EMULATE_DONE;
1299        struct kvm_vcpu_arch *arch = &vcpu->arch;
1300        union mips_instruction inst;
1301        int err;
1302
1303        /*
1304         *  Fetch the instruction.
1305         */
1306        if (cause & CAUSEF_BD)
1307                opc += 1;
1308        err = kvm_get_badinstr(opc, vcpu, &inst.word);
1309        if (err)
1310                return EMULATE_FAIL;
1311
1312        /* complete MTC0 on behalf of guest and advance EPC */
1313        if (inst.c0r_format.opcode == cop0_op &&
1314            inst.c0r_format.rs == mtc_op &&
1315            inst.c0r_format.z == 0) {
1316                int rt = inst.c0r_format.rt;
1317                int rd = inst.c0r_format.rd;
1318                int sel = inst.c0r_format.sel;
1319                unsigned int val = arch->gprs[rt];
1320                unsigned int old_val, change;
1321
1322                trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1323                              val);
1324
1325                if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1326                        /* FR bit should read as zero if no FPU */
1327                        if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1328                                val &= ~(ST0_CU1 | ST0_FR);
1329
1330                        /*
1331                         * Also don't allow FR to be set if host doesn't support
1332                         * it.
1333                         */
1334                        if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1335                                val &= ~ST0_FR;
1336
1337                        old_val = read_gc0_status();
1338                        change = val ^ old_val;
1339
1340                        if (change & ST0_FR) {
1341                                /*
1342                                 * FPU and Vector register state is made
1343                                 * UNPREDICTABLE by a change of FR, so don't
1344                                 * even bother saving it.
1345                                 */
1346                                kvm_drop_fpu(vcpu);
1347                        }
1348
1349                        /*
1350                         * If MSA state is already live, it is undefined how it
1351                         * interacts with FR=0 FPU state, and we don't want to
1352                         * hit reserved instruction exceptions trying to save
1353                         * the MSA state later when CU=1 && FR=1, so play it
1354                         * safe and save it first.
1355                         */
1356                        if (change & ST0_CU1 && !(val & ST0_FR) &&
1357                            vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1358                                kvm_lose_fpu(vcpu);
1359
1360                        write_gc0_status(val);
1361                } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1362                        u32 old_cause = read_gc0_cause();
1363                        u32 change = old_cause ^ val;
1364
1365                        /* DC bit enabling/disabling timer? */
1366                        if (change & CAUSEF_DC) {
1367                                if (val & CAUSEF_DC) {
1368                                        kvm_vz_lose_htimer(vcpu);
1369                                        kvm_mips_count_disable_cause(vcpu);
1370                                } else {
1371                                        kvm_mips_count_enable_cause(vcpu);
1372                                }
1373                        }
1374
1375                        /* Only certain bits are RW to the guest */
1376                        change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1377                                   CAUSEF_IP0 | CAUSEF_IP1);
1378
1379                        /* WP can only be cleared */
1380                        change &= ~CAUSEF_WP | old_cause;
1381
1382                        write_gc0_cause(old_cause ^ change);
1383                } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1384                        write_gc0_intctl(val);
1385                } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1386                        old_val = read_gc0_config5();
1387                        change = val ^ old_val;
1388                        /* Handle changes in FPU/MSA modes */
1389                        preempt_disable();
1390
1391                        /*
1392                         * Propagate FRE changes immediately if the FPU
1393                         * context is already loaded.
1394                         */
1395                        if (change & MIPS_CONF5_FRE &&
1396                            vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1397                                change_c0_config5(MIPS_CONF5_FRE, val);
1398
1399                        preempt_enable();
1400
1401                        val = old_val ^
1402                                (change & kvm_vz_config5_guest_wrmask(vcpu));
1403                        write_gc0_config5(val);
1404                } else {
1405                        kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1406                            opc, inst.word);
1407                        er = EMULATE_FAIL;
1408                }
1409
1410                if (er != EMULATE_FAIL)
1411                        er = update_pc(vcpu, cause);
1412        } else {
1413                kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1414                        opc, inst.word);
1415                er = EMULATE_FAIL;
1416        }
1417
1418        return er;
1419}
1420
1421static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1422                                                     struct kvm_vcpu *vcpu)
1423{
1424        /*
1425         * Presumably this is due to MC (guest mode change), so lets trace some
1426         * relevant info.
1427         */
1428        trace_kvm_guest_mode_change(vcpu);
1429
1430        return EMULATE_DONE;
1431}
1432
1433static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1434                                                   struct kvm_vcpu *vcpu)
1435{
1436        enum emulation_result er;
1437        union mips_instruction inst;
1438        unsigned long curr_pc;
1439        int err;
1440
1441        if (cause & CAUSEF_BD)
1442                opc += 1;
1443        err = kvm_get_badinstr(opc, vcpu, &inst.word);
1444        if (err)
1445                return EMULATE_FAIL;
1446
1447        /*
1448         * Update PC and hold onto current PC in case there is
1449         * an error and we want to rollback the PC
1450         */
1451        curr_pc = vcpu->arch.pc;
1452        er = update_pc(vcpu, cause);
1453        if (er == EMULATE_FAIL)
1454                return er;
1455
1456        er = kvm_mips_emul_hypcall(vcpu, inst);
1457        if (er == EMULATE_FAIL)
1458                vcpu->arch.pc = curr_pc;
1459
1460        return er;
1461}
1462
1463static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1464                                                        u32 cause,
1465                                                        u32 *opc,
1466                                                        struct kvm_vcpu *vcpu)
1467{
1468        u32 inst;
1469
1470        /*
1471         *  Fetch the instruction.
1472         */
1473        if (cause & CAUSEF_BD)
1474                opc += 1;
1475        kvm_get_badinstr(opc, vcpu, &inst);
1476
1477        kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x  Status: %#x\n",
1478                gexccode, opc, inst, read_gc0_status());
1479
1480        return EMULATE_FAIL;
1481}
1482
1483static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1484{
1485        u32 *opc = (u32 *) vcpu->arch.pc;
1486        u32 cause = vcpu->arch.host_cp0_cause;
1487        enum emulation_result er = EMULATE_DONE;
1488        u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1489                        MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1490        int ret = RESUME_GUEST;
1491
1492        trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1493        switch (gexccode) {
1494        case MIPS_GCTL0_GEXC_GPSI:
1495                ++vcpu->stat.vz_gpsi_exits;
1496                er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1497                break;
1498        case MIPS_GCTL0_GEXC_GSFC:
1499                ++vcpu->stat.vz_gsfc_exits;
1500                er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1501                break;
1502        case MIPS_GCTL0_GEXC_HC:
1503                ++vcpu->stat.vz_hc_exits;
1504                er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1505                break;
1506        case MIPS_GCTL0_GEXC_GRR:
1507                ++vcpu->stat.vz_grr_exits;
1508                er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1509                                                       vcpu);
1510                break;
1511        case MIPS_GCTL0_GEXC_GVA:
1512                ++vcpu->stat.vz_gva_exits;
1513                er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1514                                                       vcpu);
1515                break;
1516        case MIPS_GCTL0_GEXC_GHFC:
1517                ++vcpu->stat.vz_ghfc_exits;
1518                er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1519                break;
1520        case MIPS_GCTL0_GEXC_GPA:
1521                ++vcpu->stat.vz_gpa_exits;
1522                er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1523                                                       vcpu);
1524                break;
1525        default:
1526                ++vcpu->stat.vz_resvd_exits;
1527                er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1528                                                       vcpu);
1529                break;
1530
1531        }
1532
1533        if (er == EMULATE_DONE) {
1534                ret = RESUME_GUEST;
1535        } else if (er == EMULATE_HYPERCALL) {
1536                ret = kvm_mips_handle_hypcall(vcpu);
1537        } else {
1538                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1539                ret = RESUME_HOST;
1540        }
1541        return ret;
1542}
1543
1544/**
1545 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1546 * @vcpu:       Virtual CPU context.
1547 *
1548 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1549 * by the root context.
1550 */
1551static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1552{
1553        u32 cause = vcpu->arch.host_cp0_cause;
1554        enum emulation_result er = EMULATE_FAIL;
1555        int ret = RESUME_GUEST;
1556
1557        if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1558                /*
1559                 * If guest FPU not present, the FPU operation should have been
1560                 * treated as a reserved instruction!
1561                 * If FPU already in use, we shouldn't get this at all.
1562                 */
1563                if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1564                            vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1565                        preempt_enable();
1566                        return EMULATE_FAIL;
1567                }
1568
1569                kvm_own_fpu(vcpu);
1570                er = EMULATE_DONE;
1571        }
1572        /* other coprocessors not handled */
1573
1574        switch (er) {
1575        case EMULATE_DONE:
1576                ret = RESUME_GUEST;
1577                break;
1578
1579        case EMULATE_FAIL:
1580                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1581                ret = RESUME_HOST;
1582                break;
1583
1584        default:
1585                BUG();
1586        }
1587        return ret;
1588}
1589
1590/**
1591 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1592 * @vcpu:       Virtual CPU context.
1593 *
1594 * Handle when the guest attempts to use MSA when it is disabled in the root
1595 * context.
1596 */
1597static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1598{
1599        /*
1600         * If MSA not present or not exposed to guest or FR=0, the MSA operation
1601         * should have been treated as a reserved instruction!
1602         * Same if CU1=1, FR=0.
1603         * If MSA already in use, we shouldn't get this at all.
1604         */
1605        if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1606            (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1607            !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1608            vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1609                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1610                return RESUME_HOST;
1611        }
1612
1613        kvm_own_msa(vcpu);
1614
1615        return RESUME_GUEST;
1616}
1617
1618static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1619{
1620        struct kvm_run *run = vcpu->run;
1621        u32 *opc = (u32 *) vcpu->arch.pc;
1622        u32 cause = vcpu->arch.host_cp0_cause;
1623        ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1624        union mips_instruction inst;
1625        enum emulation_result er = EMULATE_DONE;
1626        int err, ret = RESUME_GUEST;
1627
1628        if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1629                /* A code fetch fault doesn't count as an MMIO */
1630                if (kvm_is_ifetch_fault(&vcpu->arch)) {
1631                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1632                        return RESUME_HOST;
1633                }
1634
1635                /* Fetch the instruction */
1636                if (cause & CAUSEF_BD)
1637                        opc += 1;
1638                err = kvm_get_badinstr(opc, vcpu, &inst.word);
1639                if (err) {
1640                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1641                        return RESUME_HOST;
1642                }
1643
1644                /* Treat as MMIO */
1645                er = kvm_mips_emulate_load(inst, cause, vcpu);
1646                if (er == EMULATE_FAIL) {
1647                        kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1648                                opc, badvaddr);
1649                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1650                }
1651        }
1652
1653        if (er == EMULATE_DONE) {
1654                ret = RESUME_GUEST;
1655        } else if (er == EMULATE_DO_MMIO) {
1656                run->exit_reason = KVM_EXIT_MMIO;
1657                ret = RESUME_HOST;
1658        } else {
1659                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1660                ret = RESUME_HOST;
1661        }
1662        return ret;
1663}
1664
1665static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1666{
1667        struct kvm_run *run = vcpu->run;
1668        u32 *opc = (u32 *) vcpu->arch.pc;
1669        u32 cause = vcpu->arch.host_cp0_cause;
1670        ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1671        union mips_instruction inst;
1672        enum emulation_result er = EMULATE_DONE;
1673        int err;
1674        int ret = RESUME_GUEST;
1675
1676        /* Just try the access again if we couldn't do the translation */
1677        if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1678                return RESUME_GUEST;
1679        vcpu->arch.host_cp0_badvaddr = badvaddr;
1680
1681        if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1682                /* Fetch the instruction */
1683                if (cause & CAUSEF_BD)
1684                        opc += 1;
1685                err = kvm_get_badinstr(opc, vcpu, &inst.word);
1686                if (err) {
1687                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1688                        return RESUME_HOST;
1689                }
1690
1691                /* Treat as MMIO */
1692                er = kvm_mips_emulate_store(inst, cause, vcpu);
1693                if (er == EMULATE_FAIL) {
1694                        kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1695                                opc, badvaddr);
1696                        run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1697                }
1698        }
1699
1700        if (er == EMULATE_DONE) {
1701                ret = RESUME_GUEST;
1702        } else if (er == EMULATE_DO_MMIO) {
1703                run->exit_reason = KVM_EXIT_MMIO;
1704                ret = RESUME_HOST;
1705        } else {
1706                run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1707                ret = RESUME_HOST;
1708        }
1709        return ret;
1710}
1711
1712static u64 kvm_vz_get_one_regs[] = {
1713        KVM_REG_MIPS_CP0_INDEX,
1714        KVM_REG_MIPS_CP0_ENTRYLO0,
1715        KVM_REG_MIPS_CP0_ENTRYLO1,
1716        KVM_REG_MIPS_CP0_CONTEXT,
1717        KVM_REG_MIPS_CP0_PAGEMASK,
1718        KVM_REG_MIPS_CP0_PAGEGRAIN,
1719        KVM_REG_MIPS_CP0_WIRED,
1720        KVM_REG_MIPS_CP0_HWRENA,
1721        KVM_REG_MIPS_CP0_BADVADDR,
1722        KVM_REG_MIPS_CP0_COUNT,
1723        KVM_REG_MIPS_CP0_ENTRYHI,
1724        KVM_REG_MIPS_CP0_COMPARE,
1725        KVM_REG_MIPS_CP0_STATUS,
1726        KVM_REG_MIPS_CP0_INTCTL,
1727        KVM_REG_MIPS_CP0_CAUSE,
1728        KVM_REG_MIPS_CP0_EPC,
1729        KVM_REG_MIPS_CP0_PRID,
1730        KVM_REG_MIPS_CP0_EBASE,
1731        KVM_REG_MIPS_CP0_CONFIG,
1732        KVM_REG_MIPS_CP0_CONFIG1,
1733        KVM_REG_MIPS_CP0_CONFIG2,
1734        KVM_REG_MIPS_CP0_CONFIG3,
1735        KVM_REG_MIPS_CP0_CONFIG4,
1736        KVM_REG_MIPS_CP0_CONFIG5,
1737        KVM_REG_MIPS_CP0_CONFIG6,
1738#ifdef CONFIG_64BIT
1739        KVM_REG_MIPS_CP0_XCONTEXT,
1740#endif
1741        KVM_REG_MIPS_CP0_ERROREPC,
1742
1743        KVM_REG_MIPS_COUNT_CTL,
1744        KVM_REG_MIPS_COUNT_RESUME,
1745        KVM_REG_MIPS_COUNT_HZ,
1746};
1747
1748static u64 kvm_vz_get_one_regs_contextconfig[] = {
1749        KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1750#ifdef CONFIG_64BIT
1751        KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1752#endif
1753};
1754
1755static u64 kvm_vz_get_one_regs_segments[] = {
1756        KVM_REG_MIPS_CP0_SEGCTL0,
1757        KVM_REG_MIPS_CP0_SEGCTL1,
1758        KVM_REG_MIPS_CP0_SEGCTL2,
1759};
1760
1761static u64 kvm_vz_get_one_regs_htw[] = {
1762        KVM_REG_MIPS_CP0_PWBASE,
1763        KVM_REG_MIPS_CP0_PWFIELD,
1764        KVM_REG_MIPS_CP0_PWSIZE,
1765        KVM_REG_MIPS_CP0_PWCTL,
1766};
1767
1768static u64 kvm_vz_get_one_regs_kscratch[] = {
1769        KVM_REG_MIPS_CP0_KSCRATCH1,
1770        KVM_REG_MIPS_CP0_KSCRATCH2,
1771        KVM_REG_MIPS_CP0_KSCRATCH3,
1772        KVM_REG_MIPS_CP0_KSCRATCH4,
1773        KVM_REG_MIPS_CP0_KSCRATCH5,
1774        KVM_REG_MIPS_CP0_KSCRATCH6,
1775};
1776
1777static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1778{
1779        unsigned long ret;
1780
1781        ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1782        if (cpu_guest_has_userlocal)
1783                ++ret;
1784        if (cpu_guest_has_badinstr)
1785                ++ret;
1786        if (cpu_guest_has_badinstrp)
1787                ++ret;
1788        if (cpu_guest_has_contextconfig)
1789                ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1790        if (cpu_guest_has_segments)
1791                ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1792        if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1793                ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1794        if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1795                ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1796        ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1797
1798        return ret;
1799}
1800
1801static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1802{
1803        u64 index;
1804        unsigned int i;
1805
1806        if (copy_to_user(indices, kvm_vz_get_one_regs,
1807                         sizeof(kvm_vz_get_one_regs)))
1808                return -EFAULT;
1809        indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1810
1811        if (cpu_guest_has_userlocal) {
1812                index = KVM_REG_MIPS_CP0_USERLOCAL;
1813                if (copy_to_user(indices, &index, sizeof(index)))
1814                        return -EFAULT;
1815                ++indices;
1816        }
1817        if (cpu_guest_has_badinstr) {
1818                index = KVM_REG_MIPS_CP0_BADINSTR;
1819                if (copy_to_user(indices, &index, sizeof(index)))
1820                        return -EFAULT;
1821                ++indices;
1822        }
1823        if (cpu_guest_has_badinstrp) {
1824                index = KVM_REG_MIPS_CP0_BADINSTRP;
1825                if (copy_to_user(indices, &index, sizeof(index)))
1826                        return -EFAULT;
1827                ++indices;
1828        }
1829        if (cpu_guest_has_contextconfig) {
1830                if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1831                                 sizeof(kvm_vz_get_one_regs_contextconfig)))
1832                        return -EFAULT;
1833                indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1834        }
1835        if (cpu_guest_has_segments) {
1836                if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1837                                 sizeof(kvm_vz_get_one_regs_segments)))
1838                        return -EFAULT;
1839                indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1840        }
1841        if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1842                if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1843                                 sizeof(kvm_vz_get_one_regs_htw)))
1844                        return -EFAULT;
1845                indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1846        }
1847        if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1848                for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1849                        index = KVM_REG_MIPS_CP0_MAAR(i);
1850                        if (copy_to_user(indices, &index, sizeof(index)))
1851                                return -EFAULT;
1852                        ++indices;
1853                }
1854
1855                index = KVM_REG_MIPS_CP0_MAARI;
1856                if (copy_to_user(indices, &index, sizeof(index)))
1857                        return -EFAULT;
1858                ++indices;
1859        }
1860        for (i = 0; i < 6; ++i) {
1861                if (!cpu_guest_has_kscr(i + 2))
1862                        continue;
1863
1864                if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1865                                 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1866                        return -EFAULT;
1867                ++indices;
1868        }
1869
1870        return 0;
1871}
1872
1873static inline s64 entrylo_kvm_to_user(unsigned long v)
1874{
1875        s64 mask, ret = v;
1876
1877        if (BITS_PER_LONG == 32) {
1878                /*
1879                 * KVM API exposes 64-bit version of the register, so move the
1880                 * RI/XI bits up into place.
1881                 */
1882                mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1883                ret &= ~mask;
1884                ret |= ((s64)v & mask) << 32;
1885        }
1886        return ret;
1887}
1888
1889static inline unsigned long entrylo_user_to_kvm(s64 v)
1890{
1891        unsigned long mask, ret = v;
1892
1893        if (BITS_PER_LONG == 32) {
1894                /*
1895                 * KVM API exposes 64-bit versiono of the register, so move the
1896                 * RI/XI bits down into place.
1897                 */
1898                mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1899                ret &= ~mask;
1900                ret |= (v >> 32) & mask;
1901        }
1902        return ret;
1903}
1904
1905static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1906                              const struct kvm_one_reg *reg,
1907                              s64 *v)
1908{
1909        struct mips_coproc *cop0 = vcpu->arch.cop0;
1910        unsigned int idx;
1911
1912        switch (reg->id) {
1913        case KVM_REG_MIPS_CP0_INDEX:
1914                *v = (long)read_gc0_index();
1915                break;
1916        case KVM_REG_MIPS_CP0_ENTRYLO0:
1917                *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1918                break;
1919        case KVM_REG_MIPS_CP0_ENTRYLO1:
1920                *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1921                break;
1922        case KVM_REG_MIPS_CP0_CONTEXT:
1923                *v = (long)read_gc0_context();
1924                break;
1925        case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1926                if (!cpu_guest_has_contextconfig)
1927                        return -EINVAL;
1928                *v = read_gc0_contextconfig();
1929                break;
1930        case KVM_REG_MIPS_CP0_USERLOCAL:
1931                if (!cpu_guest_has_userlocal)
1932                        return -EINVAL;
1933                *v = read_gc0_userlocal();
1934                break;
1935#ifdef CONFIG_64BIT
1936        case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1937                if (!cpu_guest_has_contextconfig)
1938                        return -EINVAL;
1939                *v = read_gc0_xcontextconfig();
1940                break;
1941#endif
1942        case KVM_REG_MIPS_CP0_PAGEMASK:
1943                *v = (long)read_gc0_pagemask();
1944                break;
1945        case KVM_REG_MIPS_CP0_PAGEGRAIN:
1946                *v = (long)read_gc0_pagegrain();
1947                break;
1948        case KVM_REG_MIPS_CP0_SEGCTL0:
1949                if (!cpu_guest_has_segments)
1950                        return -EINVAL;
1951                *v = read_gc0_segctl0();
1952                break;
1953        case KVM_REG_MIPS_CP0_SEGCTL1:
1954                if (!cpu_guest_has_segments)
1955                        return -EINVAL;
1956                *v = read_gc0_segctl1();
1957                break;
1958        case KVM_REG_MIPS_CP0_SEGCTL2:
1959                if (!cpu_guest_has_segments)
1960                        return -EINVAL;
1961                *v = read_gc0_segctl2();
1962                break;
1963        case KVM_REG_MIPS_CP0_PWBASE:
1964                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1965                        return -EINVAL;
1966                *v = read_gc0_pwbase();
1967                break;
1968        case KVM_REG_MIPS_CP0_PWFIELD:
1969                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1970                        return -EINVAL;
1971                *v = read_gc0_pwfield();
1972                break;
1973        case KVM_REG_MIPS_CP0_PWSIZE:
1974                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1975                        return -EINVAL;
1976                *v = read_gc0_pwsize();
1977                break;
1978        case KVM_REG_MIPS_CP0_WIRED:
1979                *v = (long)read_gc0_wired();
1980                break;
1981        case KVM_REG_MIPS_CP0_PWCTL:
1982                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1983                        return -EINVAL;
1984                *v = read_gc0_pwctl();
1985                break;
1986        case KVM_REG_MIPS_CP0_HWRENA:
1987                *v = (long)read_gc0_hwrena();
1988                break;
1989        case KVM_REG_MIPS_CP0_BADVADDR:
1990                *v = (long)read_gc0_badvaddr();
1991                break;
1992        case KVM_REG_MIPS_CP0_BADINSTR:
1993                if (!cpu_guest_has_badinstr)
1994                        return -EINVAL;
1995                *v = read_gc0_badinstr();
1996                break;
1997        case KVM_REG_MIPS_CP0_BADINSTRP:
1998                if (!cpu_guest_has_badinstrp)
1999                        return -EINVAL;
2000                *v = read_gc0_badinstrp();
2001                break;
2002        case KVM_REG_MIPS_CP0_COUNT:
2003                *v = kvm_mips_read_count(vcpu);
2004                break;
2005        case KVM_REG_MIPS_CP0_ENTRYHI:
2006                *v = (long)read_gc0_entryhi();
2007                break;
2008        case KVM_REG_MIPS_CP0_COMPARE:
2009                *v = (long)read_gc0_compare();
2010                break;
2011        case KVM_REG_MIPS_CP0_STATUS:
2012                *v = (long)read_gc0_status();
2013                break;
2014        case KVM_REG_MIPS_CP0_INTCTL:
2015                *v = read_gc0_intctl();
2016                break;
2017        case KVM_REG_MIPS_CP0_CAUSE:
2018                *v = (long)read_gc0_cause();
2019                break;
2020        case KVM_REG_MIPS_CP0_EPC:
2021                *v = (long)read_gc0_epc();
2022                break;
2023        case KVM_REG_MIPS_CP0_PRID:
2024                switch (boot_cpu_type()) {
2025                case CPU_CAVIUM_OCTEON3:
2026                        /* Octeon III has a read-only guest.PRid */
2027                        *v = read_gc0_prid();
2028                        break;
2029                default:
2030                        *v = (long)kvm_read_c0_guest_prid(cop0);
2031                        break;
2032                }
2033                break;
2034        case KVM_REG_MIPS_CP0_EBASE:
2035                *v = kvm_vz_read_gc0_ebase();
2036                break;
2037        case KVM_REG_MIPS_CP0_CONFIG:
2038                *v = read_gc0_config();
2039                break;
2040        case KVM_REG_MIPS_CP0_CONFIG1:
2041                if (!cpu_guest_has_conf1)
2042                        return -EINVAL;
2043                *v = read_gc0_config1();
2044                break;
2045        case KVM_REG_MIPS_CP0_CONFIG2:
2046                if (!cpu_guest_has_conf2)
2047                        return -EINVAL;
2048                *v = read_gc0_config2();
2049                break;
2050        case KVM_REG_MIPS_CP0_CONFIG3:
2051                if (!cpu_guest_has_conf3)
2052                        return -EINVAL;
2053                *v = read_gc0_config3();
2054                break;
2055        case KVM_REG_MIPS_CP0_CONFIG4:
2056                if (!cpu_guest_has_conf4)
2057                        return -EINVAL;
2058                *v = read_gc0_config4();
2059                break;
2060        case KVM_REG_MIPS_CP0_CONFIG5:
2061                if (!cpu_guest_has_conf5)
2062                        return -EINVAL;
2063                *v = read_gc0_config5();
2064                break;
2065        case KVM_REG_MIPS_CP0_CONFIG6:
2066                *v = kvm_read_sw_gc0_config6(cop0);
2067                break;
2068        case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2069                if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2070                        return -EINVAL;
2071                idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2072                if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2073                        return -EINVAL;
2074                *v = vcpu->arch.maar[idx];
2075                break;
2076        case KVM_REG_MIPS_CP0_MAARI:
2077                if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2078                        return -EINVAL;
2079                *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2080                break;
2081#ifdef CONFIG_64BIT
2082        case KVM_REG_MIPS_CP0_XCONTEXT:
2083                *v = read_gc0_xcontext();
2084                break;
2085#endif
2086        case KVM_REG_MIPS_CP0_ERROREPC:
2087                *v = (long)read_gc0_errorepc();
2088                break;
2089        case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2090                idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2091                if (!cpu_guest_has_kscr(idx))
2092                        return -EINVAL;
2093                switch (idx) {
2094                case 2:
2095                        *v = (long)read_gc0_kscratch1();
2096                        break;
2097                case 3:
2098                        *v = (long)read_gc0_kscratch2();
2099                        break;
2100                case 4:
2101                        *v = (long)read_gc0_kscratch3();
2102                        break;
2103                case 5:
2104                        *v = (long)read_gc0_kscratch4();
2105                        break;
2106                case 6:
2107                        *v = (long)read_gc0_kscratch5();
2108                        break;
2109                case 7:
2110                        *v = (long)read_gc0_kscratch6();
2111                        break;
2112                }
2113                break;
2114        case KVM_REG_MIPS_COUNT_CTL:
2115                *v = vcpu->arch.count_ctl;
2116                break;
2117        case KVM_REG_MIPS_COUNT_RESUME:
2118                *v = ktime_to_ns(vcpu->arch.count_resume);
2119                break;
2120        case KVM_REG_MIPS_COUNT_HZ:
2121                *v = vcpu->arch.count_hz;
2122                break;
2123        default:
2124                return -EINVAL;
2125        }
2126        return 0;
2127}
2128
2129static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2130                              const struct kvm_one_reg *reg,
2131                              s64 v)
2132{
2133        struct mips_coproc *cop0 = vcpu->arch.cop0;
2134        unsigned int idx;
2135        int ret = 0;
2136        unsigned int cur, change;
2137
2138        switch (reg->id) {
2139        case KVM_REG_MIPS_CP0_INDEX:
2140                write_gc0_index(v);
2141                break;
2142        case KVM_REG_MIPS_CP0_ENTRYLO0:
2143                write_gc0_entrylo0(entrylo_user_to_kvm(v));
2144                break;
2145        case KVM_REG_MIPS_CP0_ENTRYLO1:
2146                write_gc0_entrylo1(entrylo_user_to_kvm(v));
2147                break;
2148        case KVM_REG_MIPS_CP0_CONTEXT:
2149                write_gc0_context(v);
2150                break;
2151        case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2152                if (!cpu_guest_has_contextconfig)
2153                        return -EINVAL;
2154                write_gc0_contextconfig(v);
2155                break;
2156        case KVM_REG_MIPS_CP0_USERLOCAL:
2157                if (!cpu_guest_has_userlocal)
2158                        return -EINVAL;
2159                write_gc0_userlocal(v);
2160                break;
2161#ifdef CONFIG_64BIT
2162        case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2163                if (!cpu_guest_has_contextconfig)
2164                        return -EINVAL;
2165                write_gc0_xcontextconfig(v);
2166                break;
2167#endif
2168        case KVM_REG_MIPS_CP0_PAGEMASK:
2169                write_gc0_pagemask(v);
2170                break;
2171        case KVM_REG_MIPS_CP0_PAGEGRAIN:
2172                write_gc0_pagegrain(v);
2173                break;
2174        case KVM_REG_MIPS_CP0_SEGCTL0:
2175                if (!cpu_guest_has_segments)
2176                        return -EINVAL;
2177                write_gc0_segctl0(v);
2178                break;
2179        case KVM_REG_MIPS_CP0_SEGCTL1:
2180                if (!cpu_guest_has_segments)
2181                        return -EINVAL;
2182                write_gc0_segctl1(v);
2183                break;
2184        case KVM_REG_MIPS_CP0_SEGCTL2:
2185                if (!cpu_guest_has_segments)
2186                        return -EINVAL;
2187                write_gc0_segctl2(v);
2188                break;
2189        case KVM_REG_MIPS_CP0_PWBASE:
2190                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2191                        return -EINVAL;
2192                write_gc0_pwbase(v);
2193                break;
2194        case KVM_REG_MIPS_CP0_PWFIELD:
2195                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2196                        return -EINVAL;
2197                write_gc0_pwfield(v);
2198                break;
2199        case KVM_REG_MIPS_CP0_PWSIZE:
2200                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2201                        return -EINVAL;
2202                write_gc0_pwsize(v);
2203                break;
2204        case KVM_REG_MIPS_CP0_WIRED:
2205                change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2206                break;
2207        case KVM_REG_MIPS_CP0_PWCTL:
2208                if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2209                        return -EINVAL;
2210                write_gc0_pwctl(v);
2211                break;
2212        case KVM_REG_MIPS_CP0_HWRENA:
2213                write_gc0_hwrena(v);
2214                break;
2215        case KVM_REG_MIPS_CP0_BADVADDR:
2216                write_gc0_badvaddr(v);
2217                break;
2218        case KVM_REG_MIPS_CP0_BADINSTR:
2219                if (!cpu_guest_has_badinstr)
2220                        return -EINVAL;
2221                write_gc0_badinstr(v);
2222                break;
2223        case KVM_REG_MIPS_CP0_BADINSTRP:
2224                if (!cpu_guest_has_badinstrp)
2225                        return -EINVAL;
2226                write_gc0_badinstrp(v);
2227                break;
2228        case KVM_REG_MIPS_CP0_COUNT:
2229                kvm_mips_write_count(vcpu, v);
2230                break;
2231        case KVM_REG_MIPS_CP0_ENTRYHI:
2232                write_gc0_entryhi(v);
2233                break;
2234        case KVM_REG_MIPS_CP0_COMPARE:
2235                kvm_mips_write_compare(vcpu, v, false);
2236                break;
2237        case KVM_REG_MIPS_CP0_STATUS:
2238                write_gc0_status(v);
2239                break;
2240        case KVM_REG_MIPS_CP0_INTCTL:
2241                write_gc0_intctl(v);
2242                break;
2243        case KVM_REG_MIPS_CP0_CAUSE:
2244                /*
2245                 * If the timer is stopped or started (DC bit) it must look
2246                 * atomic with changes to the timer interrupt pending bit (TI).
2247                 * A timer interrupt should not happen in between.
2248                 */
2249                if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2250                        if (v & CAUSEF_DC) {
2251                                /* disable timer first */
2252                                kvm_mips_count_disable_cause(vcpu);
2253                                change_gc0_cause((u32)~CAUSEF_DC, v);
2254                        } else {
2255                                /* enable timer last */
2256                                change_gc0_cause((u32)~CAUSEF_DC, v);
2257                                kvm_mips_count_enable_cause(vcpu);
2258                        }
2259                } else {
2260                        write_gc0_cause(v);
2261                }
2262                break;
2263        case KVM_REG_MIPS_CP0_EPC:
2264                write_gc0_epc(v);
2265                break;
2266        case KVM_REG_MIPS_CP0_PRID:
2267                switch (boot_cpu_type()) {
2268                case CPU_CAVIUM_OCTEON3:
2269                        /* Octeon III has a guest.PRid, but its read-only */
2270                        break;
2271                default:
2272                        kvm_write_c0_guest_prid(cop0, v);
2273                        break;
2274                }
2275                break;
2276        case KVM_REG_MIPS_CP0_EBASE:
2277                kvm_vz_write_gc0_ebase(v);
2278                break;
2279        case KVM_REG_MIPS_CP0_CONFIG:
2280                cur = read_gc0_config();
2281                change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2282                if (change) {
2283                        v = cur ^ change;
2284                        write_gc0_config(v);
2285                }
2286                break;
2287        case KVM_REG_MIPS_CP0_CONFIG1:
2288                if (!cpu_guest_has_conf1)
2289                        break;
2290                cur = read_gc0_config1();
2291                change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2292                if (change) {
2293                        v = cur ^ change;
2294                        write_gc0_config1(v);
2295                }
2296                break;
2297        case KVM_REG_MIPS_CP0_CONFIG2:
2298                if (!cpu_guest_has_conf2)
2299                        break;
2300                cur = read_gc0_config2();
2301                change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2302                if (change) {
2303                        v = cur ^ change;
2304                        write_gc0_config2(v);
2305                }
2306                break;
2307        case KVM_REG_MIPS_CP0_CONFIG3:
2308                if (!cpu_guest_has_conf3)
2309                        break;
2310                cur = read_gc0_config3();
2311                change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2312                if (change) {
2313                        v = cur ^ change;
2314                        write_gc0_config3(v);
2315                }
2316                break;
2317        case KVM_REG_MIPS_CP0_CONFIG4:
2318                if (!cpu_guest_has_conf4)
2319                        break;
2320                cur = read_gc0_config4();
2321                change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2322                if (change) {
2323                        v = cur ^ change;
2324                        write_gc0_config4(v);
2325                }
2326                break;
2327        case KVM_REG_MIPS_CP0_CONFIG5:
2328                if (!cpu_guest_has_conf5)
2329                        break;
2330                cur = read_gc0_config5();
2331                change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2332                if (change) {
2333                        v = cur ^ change;
2334                        write_gc0_config5(v);
2335                }
2336                break;
2337        case KVM_REG_MIPS_CP0_CONFIG6:
2338                cur = kvm_read_sw_gc0_config6(cop0);
2339                change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu);
2340                if (change) {
2341                        v = cur ^ change;
2342                        kvm_write_sw_gc0_config6(cop0, (int)v);
2343                }
2344                break;
2345        case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2346                if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2347                        return -EINVAL;
2348                idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2349                if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2350                        return -EINVAL;
2351                vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2352                break;
2353        case KVM_REG_MIPS_CP0_MAARI:
2354                if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2355                        return -EINVAL;
2356                kvm_write_maari(vcpu, v);
2357                break;
2358#ifdef CONFIG_64BIT
2359        case KVM_REG_MIPS_CP0_XCONTEXT:
2360                write_gc0_xcontext(v);
2361                break;
2362#endif
2363        case KVM_REG_MIPS_CP0_ERROREPC:
2364                write_gc0_errorepc(v);
2365                break;
2366        case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2367                idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2368                if (!cpu_guest_has_kscr(idx))
2369                        return -EINVAL;
2370                switch (idx) {
2371                case 2:
2372                        write_gc0_kscratch1(v);
2373                        break;
2374                case 3:
2375                        write_gc0_kscratch2(v);
2376                        break;
2377                case 4:
2378                        write_gc0_kscratch3(v);
2379                        break;
2380                case 5:
2381                        write_gc0_kscratch4(v);
2382                        break;
2383                case 6:
2384                        write_gc0_kscratch5(v);
2385                        break;
2386                case 7:
2387                        write_gc0_kscratch6(v);
2388                        break;
2389                }
2390                break;
2391        case KVM_REG_MIPS_COUNT_CTL:
2392                ret = kvm_mips_set_count_ctl(vcpu, v);
2393                break;
2394        case KVM_REG_MIPS_COUNT_RESUME:
2395                ret = kvm_mips_set_count_resume(vcpu, v);
2396                break;
2397        case KVM_REG_MIPS_COUNT_HZ:
2398                ret = kvm_mips_set_count_hz(vcpu, v);
2399                break;
2400        default:
2401                return -EINVAL;
2402        }
2403        return ret;
2404}
2405
2406#define guestid_cache(cpu)      (cpu_data[cpu].guestid_cache)
2407static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2408{
2409        unsigned long guestid = guestid_cache(cpu);
2410
2411        if (!(++guestid & GUESTID_MASK)) {
2412                if (cpu_has_vtag_icache)
2413                        flush_icache_all();
2414
2415                if (!guestid)           /* fix version if needed */
2416                        guestid = GUESTID_FIRST_VERSION;
2417
2418                ++guestid;              /* guestid 0 reserved for root */
2419
2420                /* start new guestid cycle */
2421                kvm_vz_local_flush_roottlb_all_guests();
2422                kvm_vz_local_flush_guesttlb_all();
2423        }
2424
2425        guestid_cache(cpu) = guestid;
2426}
2427
2428/* Returns 1 if the guest TLB may be clobbered */
2429static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2430{
2431        int ret = 0;
2432        int i;
2433
2434        if (!kvm_request_pending(vcpu))
2435                return 0;
2436
2437        if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2438                if (cpu_has_guestid) {
2439                        /* Drop all GuestIDs for this VCPU */
2440                        for_each_possible_cpu(i)
2441                                vcpu->arch.vzguestid[i] = 0;
2442                        /* This will clobber guest TLB contents too */
2443                        ret = 1;
2444                }
2445                /*
2446                 * For Root ASID Dealias (RAD) we don't do anything here, but we
2447                 * still need the request to ensure we recheck asid_flush_mask.
2448                 * We can still return 0 as only the root TLB will be affected
2449                 * by a root ASID flush.
2450                 */
2451        }
2452
2453        return ret;
2454}
2455
2456static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2457{
2458        unsigned int wired = read_gc0_wired();
2459        struct kvm_mips_tlb *tlbs;
2460        int i;
2461
2462        /* Expand the wired TLB array if necessary */
2463        wired &= MIPSR6_WIRED_WIRED;
2464        if (wired > vcpu->arch.wired_tlb_limit) {
2465                tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2466                                sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2467                if (WARN_ON(!tlbs)) {
2468                        /* Save whatever we can */
2469                        wired = vcpu->arch.wired_tlb_limit;
2470                } else {
2471                        vcpu->arch.wired_tlb = tlbs;
2472                        vcpu->arch.wired_tlb_limit = wired;
2473                }
2474        }
2475
2476        if (wired)
2477                /* Save wired entries from the guest TLB */
2478                kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2479        /* Invalidate any dropped entries since last time */
2480        for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2481                vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2482                vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2483                vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2484                vcpu->arch.wired_tlb[i].tlb_mask = 0;
2485        }
2486        vcpu->arch.wired_tlb_used = wired;
2487}
2488
2489static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2490{
2491        /* Load wired entries into the guest TLB */
2492        if (vcpu->arch.wired_tlb)
2493                kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2494                                     vcpu->arch.wired_tlb_used);
2495}
2496
2497static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2498{
2499        struct kvm *kvm = vcpu->kvm;
2500        struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2501        bool migrated;
2502
2503        /*
2504         * Are we entering guest context on a different CPU to last time?
2505         * If so, the VCPU's guest TLB state on this CPU may be stale.
2506         */
2507        migrated = (vcpu->arch.last_exec_cpu != cpu);
2508        vcpu->arch.last_exec_cpu = cpu;
2509
2510        /*
2511         * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2512         * remains set until another vcpu is loaded in.  As a rule GuestRID
2513         * remains zeroed when in root context unless the kernel is busy
2514         * manipulating guest tlb entries.
2515         */
2516        if (cpu_has_guestid) {
2517                /*
2518                 * Check if our GuestID is of an older version and thus invalid.
2519                 *
2520                 * We also discard the stored GuestID if we've executed on
2521                 * another CPU, as the guest mappings may have changed without
2522                 * hypervisor knowledge.
2523                 */
2524                if (migrated ||
2525                    (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2526                                        GUESTID_VERSION_MASK) {
2527                        kvm_vz_get_new_guestid(cpu, vcpu);
2528                        vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2529                        trace_kvm_guestid_change(vcpu,
2530                                                 vcpu->arch.vzguestid[cpu]);
2531                }
2532
2533                /* Restore GuestID */
2534                change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2535        } else {
2536                /*
2537                 * The Guest TLB only stores a single guest's TLB state, so
2538                 * flush it if another VCPU has executed on this CPU.
2539                 *
2540                 * We also flush if we've executed on another CPU, as the guest
2541                 * mappings may have changed without hypervisor knowledge.
2542                 */
2543                if (migrated || last_exec_vcpu[cpu] != vcpu)
2544                        kvm_vz_local_flush_guesttlb_all();
2545                last_exec_vcpu[cpu] = vcpu;
2546
2547                /*
2548                 * Root ASID dealiases guest GPA mappings in the root TLB.
2549                 * Allocate new root ASID if needed.
2550                 */
2551                if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2552                        get_new_mmu_context(gpa_mm);
2553                else
2554                        check_mmu_context(gpa_mm);
2555        }
2556}
2557
2558static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2559{
2560        struct mips_coproc *cop0 = vcpu->arch.cop0;
2561        bool migrated, all;
2562
2563        /*
2564         * Have we migrated to a different CPU?
2565         * If so, any old guest TLB state may be stale.
2566         */
2567        migrated = (vcpu->arch.last_sched_cpu != cpu);
2568
2569        /*
2570         * Was this the last VCPU to run on this CPU?
2571         * If not, any old guest state from this VCPU will have been clobbered.
2572         */
2573        all = migrated || (last_vcpu[cpu] != vcpu);
2574        last_vcpu[cpu] = vcpu;
2575
2576        /*
2577         * Restore CP0_Wired unconditionally as we clear it after use, and
2578         * restore wired guest TLB entries (while in guest context).
2579         */
2580        kvm_restore_gc0_wired(cop0);
2581        if (current->flags & PF_VCPU) {
2582                tlbw_use_hazard();
2583                kvm_vz_vcpu_load_tlb(vcpu, cpu);
2584                kvm_vz_vcpu_load_wired(vcpu);
2585        }
2586
2587        /*
2588         * Restore timer state regardless, as e.g. Cause.TI can change over time
2589         * if left unmaintained.
2590         */
2591        kvm_vz_restore_timer(vcpu);
2592
2593        /* Set MC bit if we want to trace guest mode changes */
2594        if (kvm_trace_guest_mode_change)
2595                set_c0_guestctl0(MIPS_GCTL0_MC);
2596        else
2597                clear_c0_guestctl0(MIPS_GCTL0_MC);
2598
2599        /* Don't bother restoring registers multiple times unless necessary */
2600        if (!all)
2601                return 0;
2602
2603        /*
2604         * Restore config registers first, as some implementations restrict
2605         * writes to other registers when the corresponding feature bits aren't
2606         * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2607         */
2608        kvm_restore_gc0_config(cop0);
2609        if (cpu_guest_has_conf1)
2610                kvm_restore_gc0_config1(cop0);
2611        if (cpu_guest_has_conf2)
2612                kvm_restore_gc0_config2(cop0);
2613        if (cpu_guest_has_conf3)
2614                kvm_restore_gc0_config3(cop0);
2615        if (cpu_guest_has_conf4)
2616                kvm_restore_gc0_config4(cop0);
2617        if (cpu_guest_has_conf5)
2618                kvm_restore_gc0_config5(cop0);
2619        if (cpu_guest_has_conf6)
2620                kvm_restore_gc0_config6(cop0);
2621        if (cpu_guest_has_conf7)
2622                kvm_restore_gc0_config7(cop0);
2623
2624        kvm_restore_gc0_index(cop0);
2625        kvm_restore_gc0_entrylo0(cop0);
2626        kvm_restore_gc0_entrylo1(cop0);
2627        kvm_restore_gc0_context(cop0);
2628        if (cpu_guest_has_contextconfig)
2629                kvm_restore_gc0_contextconfig(cop0);
2630#ifdef CONFIG_64BIT
2631        kvm_restore_gc0_xcontext(cop0);
2632        if (cpu_guest_has_contextconfig)
2633                kvm_restore_gc0_xcontextconfig(cop0);
2634#endif
2635        kvm_restore_gc0_pagemask(cop0);
2636        kvm_restore_gc0_pagegrain(cop0);
2637        kvm_restore_gc0_hwrena(cop0);
2638        kvm_restore_gc0_badvaddr(cop0);
2639        kvm_restore_gc0_entryhi(cop0);
2640        kvm_restore_gc0_status(cop0);
2641        kvm_restore_gc0_intctl(cop0);
2642        kvm_restore_gc0_epc(cop0);
2643        kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2644        if (cpu_guest_has_userlocal)
2645                kvm_restore_gc0_userlocal(cop0);
2646
2647        kvm_restore_gc0_errorepc(cop0);
2648
2649        /* restore KScratch registers if enabled in guest */
2650        if (cpu_guest_has_conf4) {
2651                if (cpu_guest_has_kscr(2))
2652                        kvm_restore_gc0_kscratch1(cop0);
2653                if (cpu_guest_has_kscr(3))
2654                        kvm_restore_gc0_kscratch2(cop0);
2655                if (cpu_guest_has_kscr(4))
2656                        kvm_restore_gc0_kscratch3(cop0);
2657                if (cpu_guest_has_kscr(5))
2658                        kvm_restore_gc0_kscratch4(cop0);
2659                if (cpu_guest_has_kscr(6))
2660                        kvm_restore_gc0_kscratch5(cop0);
2661                if (cpu_guest_has_kscr(7))
2662                        kvm_restore_gc0_kscratch6(cop0);
2663        }
2664
2665        if (cpu_guest_has_badinstr)
2666                kvm_restore_gc0_badinstr(cop0);
2667        if (cpu_guest_has_badinstrp)
2668                kvm_restore_gc0_badinstrp(cop0);
2669
2670        if (cpu_guest_has_segments) {
2671                kvm_restore_gc0_segctl0(cop0);
2672                kvm_restore_gc0_segctl1(cop0);
2673                kvm_restore_gc0_segctl2(cop0);
2674        }
2675
2676        /* restore HTW registers */
2677        if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2678                kvm_restore_gc0_pwbase(cop0);
2679                kvm_restore_gc0_pwfield(cop0);
2680                kvm_restore_gc0_pwsize(cop0);
2681                kvm_restore_gc0_pwctl(cop0);
2682        }
2683
2684        /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2685        if (cpu_has_guestctl2)
2686                write_c0_guestctl2(
2687                        cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2688
2689        /*
2690         * We should clear linked load bit to break interrupted atomics. This
2691         * prevents a SC on the next VCPU from succeeding by matching a LL on
2692         * the previous VCPU.
2693         */
2694        if (vcpu->kvm->created_vcpus > 1)
2695                write_gc0_lladdr(0);
2696
2697        return 0;
2698}
2699
2700static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2701{
2702        struct mips_coproc *cop0 = vcpu->arch.cop0;
2703
2704        if (current->flags & PF_VCPU)
2705                kvm_vz_vcpu_save_wired(vcpu);
2706
2707        kvm_lose_fpu(vcpu);
2708
2709        kvm_save_gc0_index(cop0);
2710        kvm_save_gc0_entrylo0(cop0);
2711        kvm_save_gc0_entrylo1(cop0);
2712        kvm_save_gc0_context(cop0);
2713        if (cpu_guest_has_contextconfig)
2714                kvm_save_gc0_contextconfig(cop0);
2715#ifdef CONFIG_64BIT
2716        kvm_save_gc0_xcontext(cop0);
2717        if (cpu_guest_has_contextconfig)
2718                kvm_save_gc0_xcontextconfig(cop0);
2719#endif
2720        kvm_save_gc0_pagemask(cop0);
2721        kvm_save_gc0_pagegrain(cop0);
2722        kvm_save_gc0_wired(cop0);
2723        /* allow wired TLB entries to be overwritten */
2724        clear_gc0_wired(MIPSR6_WIRED_WIRED);
2725        kvm_save_gc0_hwrena(cop0);
2726        kvm_save_gc0_badvaddr(cop0);
2727        kvm_save_gc0_entryhi(cop0);
2728        kvm_save_gc0_status(cop0);
2729        kvm_save_gc0_intctl(cop0);
2730        kvm_save_gc0_epc(cop0);
2731        kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2732        if (cpu_guest_has_userlocal)
2733                kvm_save_gc0_userlocal(cop0);
2734
2735        /* only save implemented config registers */
2736        kvm_save_gc0_config(cop0);
2737        if (cpu_guest_has_conf1)
2738                kvm_save_gc0_config1(cop0);
2739        if (cpu_guest_has_conf2)
2740                kvm_save_gc0_config2(cop0);
2741        if (cpu_guest_has_conf3)
2742                kvm_save_gc0_config3(cop0);
2743        if (cpu_guest_has_conf4)
2744                kvm_save_gc0_config4(cop0);
2745        if (cpu_guest_has_conf5)
2746                kvm_save_gc0_config5(cop0);
2747        if (cpu_guest_has_conf6)
2748                kvm_save_gc0_config6(cop0);
2749        if (cpu_guest_has_conf7)
2750                kvm_save_gc0_config7(cop0);
2751
2752        kvm_save_gc0_errorepc(cop0);
2753
2754        /* save KScratch registers if enabled in guest */
2755        if (cpu_guest_has_conf4) {
2756                if (cpu_guest_has_kscr(2))
2757                        kvm_save_gc0_kscratch1(cop0);
2758                if (cpu_guest_has_kscr(3))
2759                        kvm_save_gc0_kscratch2(cop0);
2760                if (cpu_guest_has_kscr(4))
2761                        kvm_save_gc0_kscratch3(cop0);
2762                if (cpu_guest_has_kscr(5))
2763                        kvm_save_gc0_kscratch4(cop0);
2764                if (cpu_guest_has_kscr(6))
2765                        kvm_save_gc0_kscratch5(cop0);
2766                if (cpu_guest_has_kscr(7))
2767                        kvm_save_gc0_kscratch6(cop0);
2768        }
2769
2770        if (cpu_guest_has_badinstr)
2771                kvm_save_gc0_badinstr(cop0);
2772        if (cpu_guest_has_badinstrp)
2773                kvm_save_gc0_badinstrp(cop0);
2774
2775        if (cpu_guest_has_segments) {
2776                kvm_save_gc0_segctl0(cop0);
2777                kvm_save_gc0_segctl1(cop0);
2778                kvm_save_gc0_segctl2(cop0);
2779        }
2780
2781        /* save HTW registers if enabled in guest */
2782        if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2783            kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2784                kvm_save_gc0_pwbase(cop0);
2785                kvm_save_gc0_pwfield(cop0);
2786                kvm_save_gc0_pwsize(cop0);
2787                kvm_save_gc0_pwctl(cop0);
2788        }
2789
2790        kvm_vz_save_timer(vcpu);
2791
2792        /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2793        if (cpu_has_guestctl2)
2794                cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2795                        read_c0_guestctl2();
2796
2797        return 0;
2798}
2799
2800/**
2801 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2802 * @size:       Number of guest VTLB entries (0 < @size <= root VTLB entries).
2803 *
2804 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2805 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2806 * entries in the root VTLB.
2807 *
2808 * Returns:     The resulting guest VTLB size.
2809 */
2810static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2811{
2812        unsigned int config4 = 0, ret = 0, limit;
2813
2814        /* Write MMUSize - 1 into guest Config registers */
2815        if (cpu_guest_has_conf1)
2816                change_gc0_config1(MIPS_CONF1_TLBS,
2817                                   (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2818        if (cpu_guest_has_conf4) {
2819                config4 = read_gc0_config4();
2820                if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2821                    MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2822                        config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2823                        config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2824                                MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2825                } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2826                           MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2827                        config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2828                        config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2829                                MIPS_CONF4_MMUSIZEEXT_SHIFT;
2830                }
2831                write_gc0_config4(config4);
2832        }
2833
2834        /*
2835         * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2836         * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2837         * not dropped)
2838         */
2839        if (cpu_has_mips_r6) {
2840                limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2841                                                MIPSR6_WIRED_LIMIT_SHIFT;
2842                if (size - 1 <= limit)
2843                        limit = 0;
2844                write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2845        }
2846
2847        /* Read back MMUSize - 1 */
2848        back_to_back_c0_hazard();
2849        if (cpu_guest_has_conf1)
2850                ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2851                                                MIPS_CONF1_TLBS_SHIFT;
2852        if (config4) {
2853                if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2854                    MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2855                        ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2856                                MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2857                                MIPS_CONF1_TLBS_SIZE;
2858                else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2859                         MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2860                        ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2861                                MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2862                                MIPS_CONF1_TLBS_SIZE;
2863        }
2864        return ret + 1;
2865}
2866
2867static int kvm_vz_hardware_enable(void)
2868{
2869        unsigned int mmu_size, guest_mmu_size, ftlb_size;
2870        u64 guest_cvmctl, cvmvmconfig;
2871
2872        switch (current_cpu_type()) {
2873        case CPU_CAVIUM_OCTEON3:
2874                /* Set up guest timer/perfcount IRQ lines */
2875                guest_cvmctl = read_gc0_cvmctl();
2876                guest_cvmctl &= ~CVMCTL_IPTI;
2877                guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2878                guest_cvmctl &= ~CVMCTL_IPPCI;
2879                guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2880                write_gc0_cvmctl(guest_cvmctl);
2881
2882                cvmvmconfig = read_c0_cvmvmconfig();
2883                /* No I/O hole translation. */
2884                cvmvmconfig |= CVMVMCONF_DGHT;
2885                /* Halve the root MMU size */
2886                mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2887                            >> CVMVMCONF_MMUSIZEM1_S) + 1;
2888                guest_mmu_size = mmu_size / 2;
2889                mmu_size -= guest_mmu_size;
2890                cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2891                cvmvmconfig |= mmu_size - 1;
2892                write_c0_cvmvmconfig(cvmvmconfig);
2893
2894                /* Update our records */
2895                current_cpu_data.tlbsize = mmu_size;
2896                current_cpu_data.tlbsizevtlb = mmu_size;
2897                current_cpu_data.guest.tlbsize = guest_mmu_size;
2898
2899                /* Flush moved entries in new (guest) context */
2900                kvm_vz_local_flush_guesttlb_all();
2901                break;
2902        default:
2903                /*
2904                 * ImgTec cores tend to use a shared root/guest TLB. To avoid
2905                 * overlap of root wired and guest entries, the guest TLB may
2906                 * need resizing.
2907                 */
2908                mmu_size = current_cpu_data.tlbsizevtlb;
2909                ftlb_size = current_cpu_data.tlbsize - mmu_size;
2910
2911                /* Try switching to maximum guest VTLB size for flush */
2912                guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2913                current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2914                kvm_vz_local_flush_guesttlb_all();
2915
2916                /*
2917                 * Reduce to make space for root wired entries and at least 2
2918                 * root non-wired entries. This does assume that long-term wired
2919                 * entries won't be added later.
2920                 */
2921                guest_mmu_size = mmu_size - num_wired_entries() - 2;
2922                guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2923                current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2924
2925                /*
2926                 * Write the VTLB size, but if another CPU has already written,
2927                 * check it matches or we won't provide a consistent view to the
2928                 * guest. If this ever happens it suggests an asymmetric number
2929                 * of wired entries.
2930                 */
2931                if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2932                    WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2933                         "Available guest VTLB size mismatch"))
2934                        return -EINVAL;
2935                break;
2936        }
2937
2938        /*
2939         * Enable virtualization features granting guest direct control of
2940         * certain features:
2941         * CP0=1:       Guest coprocessor 0 context.
2942         * AT=Guest:    Guest MMU.
2943         * CG=1:        Hit (virtual address) CACHE operations (optional).
2944         * CF=1:        Guest Config registers.
2945         * CGI=1:       Indexed flush CACHE operations (optional).
2946         */
2947        write_c0_guestctl0(MIPS_GCTL0_CP0 |
2948                           (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2949                           MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2950        if (cpu_has_guestctl0ext) {
2951                if (current_cpu_type() != CPU_LOONGSON64)
2952                        set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2953                else
2954                        clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2955        }
2956
2957        if (cpu_has_guestid) {
2958                write_c0_guestctl1(0);
2959                kvm_vz_local_flush_roottlb_all_guests();
2960
2961                GUESTID_MASK = current_cpu_data.guestid_mask;
2962                GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2963                GUESTID_VERSION_MASK = ~GUESTID_MASK;
2964
2965                current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2966        }
2967
2968        /* clear any pending injected virtual guest interrupts */
2969        if (cpu_has_guestctl2)
2970                clear_c0_guestctl2(0x3f << 10);
2971
2972#ifdef CONFIG_CPU_LOONGSON64
2973        /* Control guest CCA attribute */
2974        if (cpu_has_csr())
2975                csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2976#endif
2977
2978        return 0;
2979}
2980
2981static void kvm_vz_hardware_disable(void)
2982{
2983        u64 cvmvmconfig;
2984        unsigned int mmu_size;
2985
2986        /* Flush any remaining guest TLB entries */
2987        kvm_vz_local_flush_guesttlb_all();
2988
2989        switch (current_cpu_type()) {
2990        case CPU_CAVIUM_OCTEON3:
2991                /*
2992                 * Allocate whole TLB for root. Existing guest TLB entries will
2993                 * change ownership to the root TLB. We should be safe though as
2994                 * they've already been flushed above while in guest TLB.
2995                 */
2996                cvmvmconfig = read_c0_cvmvmconfig();
2997                mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2998                            >> CVMVMCONF_MMUSIZEM1_S) + 1;
2999                cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
3000                cvmvmconfig |= mmu_size - 1;
3001                write_c0_cvmvmconfig(cvmvmconfig);
3002
3003                /* Update our records */
3004                current_cpu_data.tlbsize = mmu_size;
3005                current_cpu_data.tlbsizevtlb = mmu_size;
3006                current_cpu_data.guest.tlbsize = 0;
3007
3008                /* Flush moved entries in new (root) context */
3009                local_flush_tlb_all();
3010                break;
3011        }
3012
3013        if (cpu_has_guestid) {
3014                write_c0_guestctl1(0);
3015                kvm_vz_local_flush_roottlb_all_guests();
3016        }
3017}
3018
3019static int kvm_vz_check_extension(struct kvm *kvm, long ext)
3020{
3021        int r;
3022
3023        switch (ext) {
3024        case KVM_CAP_MIPS_VZ:
3025                /* we wouldn't be here unless cpu_has_vz */
3026                r = 1;
3027                break;
3028#ifdef CONFIG_64BIT
3029        case KVM_CAP_MIPS_64BIT:
3030                /* We support 64-bit registers/operations and addresses */
3031                r = 2;
3032                break;
3033#endif
3034        case KVM_CAP_IOEVENTFD:
3035                r = 1;
3036                break;
3037        default:
3038                r = 0;
3039                break;
3040        }
3041
3042        return r;
3043}
3044
3045static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
3046{
3047        int i;
3048
3049        for_each_possible_cpu(i)
3050                vcpu->arch.vzguestid[i] = 0;
3051
3052        return 0;
3053}
3054
3055static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3056{
3057        int cpu;
3058
3059        /*
3060         * If the VCPU is freed and reused as another VCPU, we don't want the
3061         * matching pointer wrongly hanging around in last_vcpu[] or
3062         * last_exec_vcpu[].
3063         */
3064        for_each_possible_cpu(cpu) {
3065                if (last_vcpu[cpu] == vcpu)
3066                        last_vcpu[cpu] = NULL;
3067                if (last_exec_vcpu[cpu] == vcpu)
3068                        last_exec_vcpu[cpu] = NULL;
3069        }
3070}
3071
3072static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3073{
3074        struct mips_coproc *cop0 = vcpu->arch.cop0;
3075        unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
3076
3077        /*
3078         * Start off the timer at the same frequency as the host timer, but the
3079         * soft timer doesn't handle frequencies greater than 1GHz yet.
3080         */
3081        if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3082                count_hz = mips_hpt_frequency;
3083        kvm_mips_init_count(vcpu, count_hz);
3084
3085        /*
3086         * Initialize guest register state to valid architectural reset state.
3087         */
3088
3089        /* PageGrain */
3090        if (cpu_has_mips_r5 || cpu_has_mips_r6)
3091                kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3092        /* Wired */
3093        if (cpu_has_mips_r6)
3094                kvm_write_sw_gc0_wired(cop0,
3095                                       read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3096        /* Status */
3097        kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3098        if (cpu_has_mips_r5 || cpu_has_mips_r6)
3099                kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3100        /* IntCtl */
3101        kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3102                                (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3103        /* PRId */
3104        kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3105        /* EBase */
3106        kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3107        /* Config */
3108        kvm_save_gc0_config(cop0);
3109        /* architecturally writable (e.g. from guest) */
3110        kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3111                                 _page_cachable_default >> _CACHE_SHIFT);
3112        /* architecturally read only, but maybe writable from root */
3113        kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3114        if (cpu_guest_has_conf1) {
3115                kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3116                /* Config1 */
3117                kvm_save_gc0_config1(cop0);
3118                /* architecturally read only, but maybe writable from root */
3119                kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2    |
3120                                               MIPS_CONF1_MD    |
3121                                               MIPS_CONF1_PC    |
3122                                               MIPS_CONF1_WR    |
3123                                               MIPS_CONF1_CA    |
3124                                               MIPS_CONF1_FP);
3125        }
3126        if (cpu_guest_has_conf2) {
3127                kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3128                /* Config2 */
3129                kvm_save_gc0_config2(cop0);
3130        }
3131        if (cpu_guest_has_conf3) {
3132                kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3133                /* Config3 */
3134                kvm_save_gc0_config3(cop0);
3135                /* architecturally writable (e.g. from guest) */
3136                kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3137                /* architecturally read only, but maybe writable from root */
3138                kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA   |
3139                                               MIPS_CONF3_BPG   |
3140                                               MIPS_CONF3_ULRI  |
3141                                               MIPS_CONF3_DSP   |
3142                                               MIPS_CONF3_CTXTC |
3143                                               MIPS_CONF3_ITL   |
3144                                               MIPS_CONF3_LPA   |
3145                                               MIPS_CONF3_VEIC  |
3146                                               MIPS_CONF3_VINT  |
3147                                               MIPS_CONF3_SP    |
3148                                               MIPS_CONF3_CDMM  |
3149                                               MIPS_CONF3_MT    |
3150                                               MIPS_CONF3_SM    |
3151                                               MIPS_CONF3_TL);
3152        }
3153        if (cpu_guest_has_conf4) {
3154                kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3155                /* Config4 */
3156                kvm_save_gc0_config4(cop0);
3157        }
3158        if (cpu_guest_has_conf5) {
3159                kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3160                /* Config5 */
3161                kvm_save_gc0_config5(cop0);
3162                /* architecturally writable (e.g. from guest) */
3163                kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K     |
3164                                               MIPS_CONF5_CV    |
3165                                               MIPS_CONF5_MSAEN |
3166                                               MIPS_CONF5_UFE   |
3167                                               MIPS_CONF5_FRE   |
3168                                               MIPS_CONF5_SBRI  |
3169                                               MIPS_CONF5_UFR);
3170                /* architecturally read only, but maybe writable from root */
3171                kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3172        }
3173
3174        if (cpu_guest_has_contextconfig) {
3175                /* ContextConfig */
3176                kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3177#ifdef CONFIG_64BIT
3178                /* XContextConfig */
3179                /* bits SEGBITS-13+3:4 set */
3180                kvm_write_sw_gc0_xcontextconfig(cop0,
3181                                        ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3182#endif
3183        }
3184
3185        /* Implementation dependent, use the legacy layout */
3186        if (cpu_guest_has_segments) {
3187                /* SegCtl0, SegCtl1, SegCtl2 */
3188                kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3189                kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3190                                (_page_cachable_default >> _CACHE_SHIFT) <<
3191                                                (16 + MIPS_SEGCFG_C_SHIFT));
3192                kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3193        }
3194
3195        /* reset HTW registers */
3196        if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3197                /* PWField */
3198                kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3199                /* PWSize */
3200                kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3201        }
3202
3203        /* start with no pending virtual guest interrupts */
3204        if (cpu_has_guestctl2)
3205                cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3206
3207        /* Put PC at reset vector */
3208        vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3209
3210        return 0;
3211}
3212
3213static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
3214{
3215        if (!cpu_has_guestid) {
3216                /*
3217                 * For each CPU there is a single GPA ASID used by all VCPUs in
3218                 * the VM, so it doesn't make sense for the VCPUs to handle
3219                 * invalidation of these ASIDs individually.
3220                 *
3221                 * Instead mark all CPUs as needing ASID invalidation in
3222                 * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
3223                 * kick any running VCPUs so they check asid_flush_mask.
3224                 */
3225                cpumask_setall(&kvm->arch.asid_flush_mask);
3226        }
3227}
3228
3229static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
3230{
3231        int cpu = smp_processor_id();
3232        int preserve_guest_tlb;
3233
3234        preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3235
3236        if (preserve_guest_tlb)
3237                kvm_vz_vcpu_save_wired(vcpu);
3238
3239        kvm_vz_vcpu_load_tlb(vcpu, cpu);
3240
3241        if (preserve_guest_tlb)
3242                kvm_vz_vcpu_load_wired(vcpu);
3243}
3244
3245static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
3246{
3247        int cpu = smp_processor_id();
3248        int r;
3249
3250        kvm_vz_acquire_htimer(vcpu);
3251        /* Check if we have any exceptions/interrupts pending */
3252        kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3253
3254        kvm_vz_check_requests(vcpu, cpu);
3255        kvm_vz_vcpu_load_tlb(vcpu, cpu);
3256        kvm_vz_vcpu_load_wired(vcpu);
3257
3258        r = vcpu->arch.vcpu_run(vcpu);
3259
3260        kvm_vz_vcpu_save_wired(vcpu);
3261
3262        return r;
3263}
3264
3265static struct kvm_mips_callbacks kvm_vz_callbacks = {
3266        .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3267        .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3268        .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3269        .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3270        .handle_addr_err_st = kvm_trap_vz_no_handler,
3271        .handle_addr_err_ld = kvm_trap_vz_no_handler,
3272        .handle_syscall = kvm_trap_vz_no_handler,
3273        .handle_res_inst = kvm_trap_vz_no_handler,
3274        .handle_break = kvm_trap_vz_no_handler,
3275        .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3276        .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3277
3278        .hardware_enable = kvm_vz_hardware_enable,
3279        .hardware_disable = kvm_vz_hardware_disable,
3280        .check_extension = kvm_vz_check_extension,
3281        .vcpu_init = kvm_vz_vcpu_init,
3282        .vcpu_uninit = kvm_vz_vcpu_uninit,
3283        .vcpu_setup = kvm_vz_vcpu_setup,
3284        .prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
3285        .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3286        .queue_timer_int = kvm_vz_queue_timer_int_cb,
3287        .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3288        .queue_io_int = kvm_vz_queue_io_int_cb,
3289        .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3290        .irq_deliver = kvm_vz_irq_deliver_cb,
3291        .irq_clear = kvm_vz_irq_clear_cb,
3292        .num_regs = kvm_vz_num_regs,
3293        .copy_reg_indices = kvm_vz_copy_reg_indices,
3294        .get_one_reg = kvm_vz_get_one_reg,
3295        .set_one_reg = kvm_vz_set_one_reg,
3296        .vcpu_load = kvm_vz_vcpu_load,
3297        .vcpu_put = kvm_vz_vcpu_put,
3298        .vcpu_run = kvm_vz_vcpu_run,
3299        .vcpu_reenter = kvm_vz_vcpu_reenter,
3300};
3301
3302int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3303{
3304        if (!cpu_has_vz)
3305                return -ENODEV;
3306
3307        /*
3308         * VZ requires at least 2 KScratch registers, so it should have been
3309         * possible to allocate pgd_reg.
3310         */
3311        if (WARN(pgd_reg == -1,
3312                 "pgd_reg not allocated even though cpu_has_vz\n"))
3313                return -ENODEV;
3314
3315        pr_info("Starting KVM with MIPS VZ extensions\n");
3316
3317        *install_callbacks = &kvm_vz_callbacks;
3318        return 0;
3319}
3320
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.