linux/arch/powerpc/kvm/powerpc.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright IBM Corp. 2007
  16 *
  17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  18 *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
  19 */
  20
  21#include <linux/errno.h>
  22#include <linux/err.h>
  23#include <linux/kvm_host.h>
  24#include <linux/module.h>
  25#include <linux/vmalloc.h>
  26#include <linux/fs.h>
  27#include <asm/cputable.h>
  28#include <asm/uaccess.h>
  29#include <asm/kvm_ppc.h>
  30#include <asm/tlbflush.h>
  31
  32
  33gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
  34{
  35        return gfn;
  36}
  37
  38int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
  39{
  40        return !!(v->arch.pending_exceptions);
  41}
  42
  43int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
  44{
  45        return !(v->arch.msr & MSR_WE);
  46}
  47
  48
  49int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
  50{
  51        enum emulation_result er;
  52        int r;
  53
  54        er = kvmppc_emulate_instruction(run, vcpu);
  55        switch (er) {
  56        case EMULATE_DONE:
  57                /* Future optimization: only reload non-volatiles if they were
  58                 * actually modified. */
  59                r = RESUME_GUEST_NV;
  60                break;
  61        case EMULATE_DO_MMIO:
  62                run->exit_reason = KVM_EXIT_MMIO;
  63                /* We must reload nonvolatiles because "update" load/store
  64                 * instructions modify register state. */
  65                /* Future optimization: only reload non-volatiles if they were
  66                 * actually modified. */
  67                r = RESUME_HOST_NV;
  68                break;
  69        case EMULATE_FAIL:
  70                /* XXX Deliver Program interrupt to guest. */
  71                printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
  72                       vcpu->arch.last_inst);
  73                r = RESUME_HOST;
  74                break;
  75        default:
  76                BUG();
  77        }
  78
  79        return r;
  80}
  81
  82void kvm_arch_hardware_enable(void *garbage)
  83{
  84}
  85
  86void kvm_arch_hardware_disable(void *garbage)
  87{
  88}
  89
  90int kvm_arch_hardware_setup(void)
  91{
  92        return 0;
  93}
  94
  95void kvm_arch_hardware_unsetup(void)
  96{
  97}
  98
  99void kvm_arch_check_processor_compat(void *rtn)
 100{
 101        int r;
 102
 103        if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
 104                r = 0;
 105        else
 106                r = -ENOTSUPP;
 107
 108        *(int *)rtn = r;
 109}
 110
 111struct kvm *kvm_arch_create_vm(void)
 112{
 113        struct kvm *kvm;
 114
 115        kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
 116        if (!kvm)
 117                return ERR_PTR(-ENOMEM);
 118
 119        return kvm;
 120}
 121
 122static void kvmppc_free_vcpus(struct kvm *kvm)
 123{
 124        unsigned int i;
 125
 126        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
 127                if (kvm->vcpus[i]) {
 128                        kvm_arch_vcpu_free(kvm->vcpus[i]);
 129                        kvm->vcpus[i] = NULL;
 130                }
 131        }
 132}
 133
 134void kvm_arch_destroy_vm(struct kvm *kvm)
 135{
 136        kvmppc_free_vcpus(kvm);
 137        kvm_free_physmem(kvm);
 138        kfree(kvm);
 139}
 140
 141int kvm_dev_ioctl_check_extension(long ext)
 142{
 143        int r;
 144
 145        switch (ext) {
 146        case KVM_CAP_USER_MEMORY:
 147                r = 1;
 148                break;
 149        case KVM_CAP_COALESCED_MMIO:
 150                r = KVM_COALESCED_MMIO_PAGE_OFFSET;
 151                break;
 152        default:
 153                r = 0;
 154                break;
 155        }
 156        return r;
 157
 158}
 159
 160long kvm_arch_dev_ioctl(struct file *filp,
 161                        unsigned int ioctl, unsigned long arg)
 162{
 163        return -EINVAL;
 164}
 165
 166int kvm_arch_set_memory_region(struct kvm *kvm,
 167                               struct kvm_userspace_memory_region *mem,
 168                               struct kvm_memory_slot old,
 169                               int user_alloc)
 170{
 171        return 0;
 172}
 173
 174void kvm_arch_flush_shadow(struct kvm *kvm)
 175{
 176}
 177
 178struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
 179{
 180        struct kvm_vcpu *vcpu;
 181        int err;
 182
 183        vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
 184        if (!vcpu) {
 185                err = -ENOMEM;
 186                goto out;
 187        }
 188
 189        err = kvm_vcpu_init(vcpu, kvm, id);
 190        if (err)
 191                goto free_vcpu;
 192
 193        return vcpu;
 194
 195free_vcpu:
 196        kmem_cache_free(kvm_vcpu_cache, vcpu);
 197out:
 198        return ERR_PTR(err);
 199}
 200
 201void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 202{
 203        kvm_vcpu_uninit(vcpu);
 204        kmem_cache_free(kvm_vcpu_cache, vcpu);
 205}
 206
 207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 208{
 209        kvm_arch_vcpu_free(vcpu);
 210}
 211
 212int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 213{
 214        unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
 215
 216        return test_bit(priority, &vcpu->arch.pending_exceptions);
 217}
 218
 219static void kvmppc_decrementer_func(unsigned long data)
 220{
 221        struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
 222
 223        kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
 224
 225        if (waitqueue_active(&vcpu->wq)) {
 226                wake_up_interruptible(&vcpu->wq);
 227                vcpu->stat.halt_wakeup++;
 228        }
 229}
 230
 231int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 232{
 233        setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
 234                    (unsigned long)vcpu);
 235
 236        return 0;
 237}
 238
 239void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 240{
 241        kvmppc_core_destroy_mmu(vcpu);
 242}
 243
 244/* Note: clearing MSR[DE] just means that the debug interrupt will not be
 245 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
 246 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
 247 * will be delivered as an "imprecise debug event" (which is indicated by
 248 * DBSR[IDE].
 249 */
 250static void kvmppc_disable_debug_interrupts(void)
 251{
 252        mtmsr(mfmsr() & ~MSR_DE);
 253}
 254
 255static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu)
 256{
 257        kvmppc_disable_debug_interrupts();
 258
 259        mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]);
 260        mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]);
 261        mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]);
 262        mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]);
 263        mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1);
 264        mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2);
 265        mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0);
 266        mtmsr(vcpu->arch.host_msr);
 267}
 268
 269static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu)
 270{
 271        struct kvm_guest_debug *dbg = &vcpu->guest_debug;
 272        u32 dbcr0 = 0;
 273
 274        vcpu->arch.host_msr = mfmsr();
 275        kvmppc_disable_debug_interrupts();
 276
 277        /* Save host debug register state. */
 278        vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1);
 279        vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2);
 280        vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3);
 281        vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4);
 282        vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0);
 283        vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1);
 284        vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2);
 285
 286        /* set registers up for guest */
 287
 288        if (dbg->bp[0]) {
 289                mtspr(SPRN_IAC1, dbg->bp[0]);
 290                dbcr0 |= DBCR0_IAC1 | DBCR0_IDM;
 291        }
 292        if (dbg->bp[1]) {
 293                mtspr(SPRN_IAC2, dbg->bp[1]);
 294                dbcr0 |= DBCR0_IAC2 | DBCR0_IDM;
 295        }
 296        if (dbg->bp[2]) {
 297                mtspr(SPRN_IAC3, dbg->bp[2]);
 298                dbcr0 |= DBCR0_IAC3 | DBCR0_IDM;
 299        }
 300        if (dbg->bp[3]) {
 301                mtspr(SPRN_IAC4, dbg->bp[3]);
 302                dbcr0 |= DBCR0_IAC4 | DBCR0_IDM;
 303        }
 304
 305        mtspr(SPRN_DBCR0, dbcr0);
 306        mtspr(SPRN_DBCR1, 0);
 307        mtspr(SPRN_DBCR2, 0);
 308}
 309
 310void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 311{
 312        int i;
 313
 314        if (vcpu->guest_debug.enabled)
 315                kvmppc_load_guest_debug_registers(vcpu);
 316
 317        /* Mark every guest entry in the shadow TLB entry modified, so that they
 318         * will all be reloaded on the next vcpu run (instead of being
 319         * demand-faulted). */
 320        for (i = 0; i <= tlb_44x_hwater; i++)
 321                kvmppc_tlbe_set_modified(vcpu, i);
 322}
 323
 324void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 325{
 326        if (vcpu->guest_debug.enabled)
 327                kvmppc_restore_host_debug_state(vcpu);
 328
 329        /* Don't leave guest TLB entries resident when being de-scheduled. */
 330        /* XXX It would be nice to differentiate between heavyweight exit and
 331         * sched_out here, since we could avoid the TLB flush for heavyweight
 332         * exits. */
 333        _tlbia();
 334}
 335
 336int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
 337                                    struct kvm_debug_guest *dbg)
 338{
 339        int i;
 340
 341        vcpu->guest_debug.enabled = dbg->enabled;
 342        if (vcpu->guest_debug.enabled) {
 343                for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) {
 344                        if (dbg->breakpoints[i].enabled)
 345                                vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
 346                        else
 347                                vcpu->guest_debug.bp[i] = 0;
 348                }
 349        }
 350
 351        return 0;
 352}
 353
 354static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
 355                                     struct kvm_run *run)
 356{
 357        u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
 358        *gpr = run->dcr.data;
 359}
 360
 361static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
 362                                      struct kvm_run *run)
 363{
 364        u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
 365
 366        if (run->mmio.len > sizeof(*gpr)) {
 367                printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
 368                return;
 369        }
 370
 371        if (vcpu->arch.mmio_is_bigendian) {
 372                switch (run->mmio.len) {
 373                case 4: *gpr = *(u32 *)run->mmio.data; break;
 374                case 2: *gpr = *(u16 *)run->mmio.data; break;
 375                case 1: *gpr = *(u8 *)run->mmio.data; break;
 376                }
 377        } else {
 378                /* Convert BE data from userland back to LE. */
 379                switch (run->mmio.len) {
 380                case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
 381                case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
 382                case 1: *gpr = *(u8 *)run->mmio.data; break;
 383                }
 384        }
 385}
 386
 387int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
 388                       unsigned int rt, unsigned int bytes, int is_bigendian)
 389{
 390        if (bytes > sizeof(run->mmio.data)) {
 391                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 392                       run->mmio.len);
 393        }
 394
 395        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 396        run->mmio.len = bytes;
 397        run->mmio.is_write = 0;
 398
 399        vcpu->arch.io_gpr = rt;
 400        vcpu->arch.mmio_is_bigendian = is_bigendian;
 401        vcpu->mmio_needed = 1;
 402        vcpu->mmio_is_write = 0;
 403
 404        return EMULATE_DO_MMIO;
 405}
 406
 407int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
 408                        u32 val, unsigned int bytes, int is_bigendian)
 409{
 410        void *data = run->mmio.data;
 411
 412        if (bytes > sizeof(run->mmio.data)) {
 413                printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
 414                       run->mmio.len);
 415        }
 416
 417        run->mmio.phys_addr = vcpu->arch.paddr_accessed;
 418        run->mmio.len = bytes;
 419        run->mmio.is_write = 1;
 420        vcpu->mmio_needed = 1;
 421        vcpu->mmio_is_write = 1;
 422
 423        /* Store the value at the lowest bytes in 'data'. */
 424        if (is_bigendian) {
 425                switch (bytes) {
 426                case 4: *(u32 *)data = val; break;
 427                case 2: *(u16 *)data = val; break;
 428                case 1: *(u8  *)data = val; break;
 429                }
 430        } else {
 431                /* Store LE value into 'data'. */
 432                switch (bytes) {
 433                case 4: st_le32(data, val); break;
 434                case 2: st_le16(data, val); break;
 435                case 1: *(u8 *)data = val; break;
 436                }
 437        }
 438
 439        return EMULATE_DO_MMIO;
 440}
 441
 442int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 443{
 444        int r;
 445        sigset_t sigsaved;
 446
 447        vcpu_load(vcpu);
 448
 449        if (vcpu->sigset_active)
 450                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 451
 452        if (vcpu->mmio_needed) {
 453                if (!vcpu->mmio_is_write)
 454                        kvmppc_complete_mmio_load(vcpu, run);
 455                vcpu->mmio_needed = 0;
 456        } else if (vcpu->arch.dcr_needed) {
 457                if (!vcpu->arch.dcr_is_write)
 458                        kvmppc_complete_dcr_load(vcpu, run);
 459                vcpu->arch.dcr_needed = 0;
 460        }
 461
 462        kvmppc_check_and_deliver_interrupts(vcpu);
 463
 464        local_irq_disable();
 465        kvm_guest_enter();
 466        r = __kvmppc_vcpu_run(run, vcpu);
 467        kvm_guest_exit();
 468        local_irq_enable();
 469
 470        if (vcpu->sigset_active)
 471                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
 472
 473        vcpu_put(vcpu);
 474
 475        return r;
 476}
 477
 478int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
 479{
 480        kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
 481
 482        if (waitqueue_active(&vcpu->wq)) {
 483                wake_up_interruptible(&vcpu->wq);
 484                vcpu->stat.halt_wakeup++;
 485        }
 486
 487        return 0;
 488}
 489
 490int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 491                                    struct kvm_mp_state *mp_state)
 492{
 493        return -EINVAL;
 494}
 495
 496int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 497                                    struct kvm_mp_state *mp_state)
 498{
 499        return -EINVAL;
 500}
 501
 502long kvm_arch_vcpu_ioctl(struct file *filp,
 503                         unsigned int ioctl, unsigned long arg)
 504{
 505        struct kvm_vcpu *vcpu = filp->private_data;
 506        void __user *argp = (void __user *)arg;
 507        long r;
 508
 509        switch (ioctl) {
 510        case KVM_INTERRUPT: {
 511                struct kvm_interrupt irq;
 512                r = -EFAULT;
 513                if (copy_from_user(&irq, argp, sizeof(irq)))
 514                        goto out;
 515                r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
 516                break;
 517        }
 518        default:
 519                r = -EINVAL;
 520        }
 521
 522out:
 523        return r;
 524}
 525
 526int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
 527{
 528        return -ENOTSUPP;
 529}
 530
 531long kvm_arch_vm_ioctl(struct file *filp,
 532                       unsigned int ioctl, unsigned long arg)
 533{
 534        long r;
 535
 536        switch (ioctl) {
 537        default:
 538                r = -EINVAL;
 539        }
 540
 541        return r;
 542}
 543
 544int kvm_arch_init(void *opaque)
 545{
 546        return 0;
 547}
 548
 549void kvm_arch_exit(void)
 550{
 551}
 552