linux/arch/s390/kvm/interrupt.c
<<
>>
Prefs
   1/*
   2 * interrupt.c - handling kvm guest interrupts
   3 *
   4 * Copyright IBM Corp. 2008
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License (version 2 only)
   8 * as published by the Free Software Foundation.
   9 *
  10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  11 */
  12
  13#include <linux/interrupt.h>
  14#include <linux/kvm_host.h>
  15#include <linux/hrtimer.h>
  16#include <linux/signal.h>
  17#include <linux/slab.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/uaccess.h>
  20#include "kvm-s390.h"
  21#include "gaccess.h"
  22
  23static int psw_extint_disabled(struct kvm_vcpu *vcpu)
  24{
  25        return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
  26}
  27
  28static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
  29{
  30        if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
  31            (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
  32            (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
  33                return 0;
  34        return 1;
  35}
  36
  37static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
  38                                      struct kvm_s390_interrupt_info *inti)
  39{
  40        switch (inti->type) {
  41        case KVM_S390_INT_EMERGENCY:
  42                if (psw_extint_disabled(vcpu))
  43                        return 0;
  44                if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
  45                        return 1;
  46                return 0;
  47        case KVM_S390_INT_SERVICE:
  48                if (psw_extint_disabled(vcpu))
  49                        return 0;
  50                if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
  51                        return 1;
  52                return 0;
  53        case KVM_S390_INT_VIRTIO:
  54                if (psw_extint_disabled(vcpu))
  55                        return 0;
  56                if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
  57                        return 1;
  58                return 0;
  59        case KVM_S390_PROGRAM_INT:
  60        case KVM_S390_SIGP_STOP:
  61        case KVM_S390_SIGP_SET_PREFIX:
  62        case KVM_S390_RESTART:
  63                return 1;
  64        default:
  65                BUG();
  66        }
  67        return 0;
  68}
  69
  70static void __set_cpu_idle(struct kvm_vcpu *vcpu)
  71{
  72        BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
  73        atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
  74        set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
  75}
  76
  77static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
  78{
  79        BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
  80        atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
  81        clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
  82}
  83
  84static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
  85{
  86        atomic_clear_mask(CPUSTAT_ECALL_PEND |
  87                CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
  88                &vcpu->arch.sie_block->cpuflags);
  89        vcpu->arch.sie_block->lctl = 0x0000;
  90}
  91
  92static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
  93{
  94        atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
  95}
  96
  97static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
  98                                      struct kvm_s390_interrupt_info *inti)
  99{
 100        switch (inti->type) {
 101        case KVM_S390_INT_EMERGENCY:
 102        case KVM_S390_INT_SERVICE:
 103        case KVM_S390_INT_VIRTIO:
 104                if (psw_extint_disabled(vcpu))
 105                        __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
 106                else
 107                        vcpu->arch.sie_block->lctl |= LCTL_CR0;
 108                break;
 109        case KVM_S390_SIGP_STOP:
 110                __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
 111                break;
 112        default:
 113                BUG();
 114        }
 115}
 116
 117static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
 118                                   struct kvm_s390_interrupt_info *inti)
 119{
 120        const unsigned short table[] = { 2, 4, 4, 6 };
 121        int rc, exception = 0;
 122
 123        switch (inti->type) {
 124        case KVM_S390_INT_EMERGENCY:
 125                VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
 126                vcpu->stat.deliver_emergency_signal++;
 127                rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
 128                if (rc == -EFAULT)
 129                        exception = 1;
 130
 131                rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
 132                         &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 133                if (rc == -EFAULT)
 134                        exception = 1;
 135
 136                rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
 137                        __LC_EXT_NEW_PSW, sizeof(psw_t));
 138                if (rc == -EFAULT)
 139                        exception = 1;
 140                break;
 141
 142        case KVM_S390_INT_SERVICE:
 143                VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
 144                           inti->ext.ext_params);
 145                vcpu->stat.deliver_service_signal++;
 146                rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
 147                if (rc == -EFAULT)
 148                        exception = 1;
 149
 150                rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
 151                         &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 152                if (rc == -EFAULT)
 153                        exception = 1;
 154
 155                rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
 156                        __LC_EXT_NEW_PSW, sizeof(psw_t));
 157                if (rc == -EFAULT)
 158                        exception = 1;
 159
 160                rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
 161                if (rc == -EFAULT)
 162                        exception = 1;
 163                break;
 164
 165        case KVM_S390_INT_VIRTIO:
 166                VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
 167                           inti->ext.ext_params, inti->ext.ext_params2);
 168                vcpu->stat.deliver_virtio_interrupt++;
 169                rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
 170                if (rc == -EFAULT)
 171                        exception = 1;
 172
 173                rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
 174                if (rc == -EFAULT)
 175                        exception = 1;
 176
 177                rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
 178                         &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 179                if (rc == -EFAULT)
 180                        exception = 1;
 181
 182                rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
 183                        __LC_EXT_NEW_PSW, sizeof(psw_t));
 184                if (rc == -EFAULT)
 185                        exception = 1;
 186
 187                rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
 188                if (rc == -EFAULT)
 189                        exception = 1;
 190
 191                rc = put_guest_u64(vcpu, __LC_EXT_PARAMS2,
 192                                   inti->ext.ext_params2);
 193                if (rc == -EFAULT)
 194                        exception = 1;
 195                break;
 196
 197        case KVM_S390_SIGP_STOP:
 198                VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
 199                vcpu->stat.deliver_stop_signal++;
 200                __set_intercept_indicator(vcpu, inti);
 201                break;
 202
 203        case KVM_S390_SIGP_SET_PREFIX:
 204                VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
 205                           inti->prefix.address);
 206                vcpu->stat.deliver_prefix_signal++;
 207                vcpu->arch.sie_block->prefix = inti->prefix.address;
 208                vcpu->arch.sie_block->ihcpu = 0xffff;
 209                break;
 210
 211        case KVM_S390_RESTART:
 212                VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
 213                vcpu->stat.deliver_restart_signal++;
 214                rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
 215                  restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 216                if (rc == -EFAULT)
 217                        exception = 1;
 218
 219                rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
 220                        offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
 221                if (rc == -EFAULT)
 222                        exception = 1;
 223                break;
 224
 225        case KVM_S390_PROGRAM_INT:
 226                VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
 227                           inti->pgm.code,
 228                           table[vcpu->arch.sie_block->ipa >> 14]);
 229                vcpu->stat.deliver_program_int++;
 230                rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
 231                if (rc == -EFAULT)
 232                        exception = 1;
 233
 234                rc = put_guest_u16(vcpu, __LC_PGM_ILC,
 235                        table[vcpu->arch.sie_block->ipa >> 14]);
 236                if (rc == -EFAULT)
 237                        exception = 1;
 238
 239                rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
 240                         &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 241                if (rc == -EFAULT)
 242                        exception = 1;
 243
 244                rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
 245                        __LC_PGM_NEW_PSW, sizeof(psw_t));
 246                if (rc == -EFAULT)
 247                        exception = 1;
 248                break;
 249
 250        default:
 251                BUG();
 252        }
 253        if (exception) {
 254                printk("kvm: The guest lowcore is not mapped during interrupt "
 255                        "delivery, killing userspace\n");
 256                do_exit(SIGKILL);
 257        }
 258}
 259
 260static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
 261{
 262        int rc, exception = 0;
 263
 264        if (psw_extint_disabled(vcpu))
 265                return 0;
 266        if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
 267                return 0;
 268        rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
 269        if (rc == -EFAULT)
 270                exception = 1;
 271        rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
 272                 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
 273        if (rc == -EFAULT)
 274                exception = 1;
 275        rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
 276                __LC_EXT_NEW_PSW, sizeof(psw_t));
 277        if (rc == -EFAULT)
 278                exception = 1;
 279        if (exception) {
 280                printk("kvm: The guest lowcore is not mapped during interrupt "
 281                        "delivery, killing userspace\n");
 282                do_exit(SIGKILL);
 283        }
 284        return 1;
 285}
 286
 287static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
 288{
 289        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 290        struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
 291        struct kvm_s390_interrupt_info  *inti;
 292        int rc = 0;
 293
 294        if (atomic_read(&li->active)) {
 295                spin_lock_bh(&li->lock);
 296                list_for_each_entry(inti, &li->list, list)
 297                        if (__interrupt_is_deliverable(vcpu, inti)) {
 298                                rc = 1;
 299                                break;
 300                        }
 301                spin_unlock_bh(&li->lock);
 302        }
 303
 304        if ((!rc) && atomic_read(&fi->active)) {
 305                spin_lock(&fi->lock);
 306                list_for_each_entry(inti, &fi->list, list)
 307                        if (__interrupt_is_deliverable(vcpu, inti)) {
 308                                rc = 1;
 309                                break;
 310                        }
 311                spin_unlock(&fi->lock);
 312        }
 313
 314        if ((!rc) && (vcpu->arch.sie_block->ckc <
 315                get_clock() + vcpu->arch.sie_block->epoch)) {
 316                if ((!psw_extint_disabled(vcpu)) &&
 317                        (vcpu->arch.sie_block->gcr[0] & 0x800ul))
 318                        rc = 1;
 319        }
 320
 321        return rc;
 322}
 323
 324int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 325{
 326        return 0;
 327}
 328
 329int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
 330{
 331        u64 now, sltime;
 332        DECLARE_WAITQUEUE(wait, current);
 333
 334        vcpu->stat.exit_wait_state++;
 335        if (kvm_cpu_has_interrupt(vcpu))
 336                return 0;
 337
 338        __set_cpu_idle(vcpu);
 339        spin_lock_bh(&vcpu->arch.local_int.lock);
 340        vcpu->arch.local_int.timer_due = 0;
 341        spin_unlock_bh(&vcpu->arch.local_int.lock);
 342
 343        if (psw_interrupts_disabled(vcpu)) {
 344                VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
 345                __unset_cpu_idle(vcpu);
 346                return -EOPNOTSUPP; /* disabled wait */
 347        }
 348
 349        if (psw_extint_disabled(vcpu) ||
 350            (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
 351                VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
 352                goto no_timer;
 353        }
 354
 355        now = get_clock() + vcpu->arch.sie_block->epoch;
 356        if (vcpu->arch.sie_block->ckc < now) {
 357                __unset_cpu_idle(vcpu);
 358                return 0;
 359        }
 360
 361        sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9;
 362
 363        hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
 364        VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 365no_timer:
 366        spin_lock(&vcpu->arch.local_int.float_int->lock);
 367        spin_lock_bh(&vcpu->arch.local_int.lock);
 368        add_wait_queue(&vcpu->arch.local_int.wq, &wait);
 369        while (list_empty(&vcpu->arch.local_int.list) &&
 370                list_empty(&vcpu->arch.local_int.float_int->list) &&
 371                (!vcpu->arch.local_int.timer_due) &&
 372                !signal_pending(current)) {
 373                set_current_state(TASK_INTERRUPTIBLE);
 374                spin_unlock_bh(&vcpu->arch.local_int.lock);
 375                spin_unlock(&vcpu->arch.local_int.float_int->lock);
 376                vcpu_put(vcpu);
 377                schedule();
 378                vcpu_load(vcpu);
 379                spin_lock(&vcpu->arch.local_int.float_int->lock);
 380                spin_lock_bh(&vcpu->arch.local_int.lock);
 381        }
 382        __unset_cpu_idle(vcpu);
 383        __set_current_state(TASK_RUNNING);
 384        remove_wait_queue(&vcpu->arch.local_int.wq, &wait);
 385        spin_unlock_bh(&vcpu->arch.local_int.lock);
 386        spin_unlock(&vcpu->arch.local_int.float_int->lock);
 387        hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
 388        return 0;
 389}
 390
 391void kvm_s390_tasklet(unsigned long parm)
 392{
 393        struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
 394
 395        spin_lock(&vcpu->arch.local_int.lock);
 396        vcpu->arch.local_int.timer_due = 1;
 397        if (waitqueue_active(&vcpu->arch.local_int.wq))
 398                wake_up_interruptible(&vcpu->arch.local_int.wq);
 399        spin_unlock(&vcpu->arch.local_int.lock);
 400}
 401
 402/*
 403 * low level hrtimer wake routine. Because this runs in hardirq context
 404 * we schedule a tasklet to do the real work.
 405 */
 406enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
 407{
 408        struct kvm_vcpu *vcpu;
 409
 410        vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
 411        tasklet_schedule(&vcpu->arch.tasklet);
 412
 413        return HRTIMER_NORESTART;
 414}
 415
 416void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
 417{
 418        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 419        struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
 420        struct kvm_s390_interrupt_info  *n, *inti = NULL;
 421        int deliver;
 422
 423        __reset_intercept_indicators(vcpu);
 424        if (atomic_read(&li->active)) {
 425                do {
 426                        deliver = 0;
 427                        spin_lock_bh(&li->lock);
 428                        list_for_each_entry_safe(inti, n, &li->list, list) {
 429                                if (__interrupt_is_deliverable(vcpu, inti)) {
 430                                        list_del(&inti->list);
 431                                        deliver = 1;
 432                                        break;
 433                                }
 434                                __set_intercept_indicator(vcpu, inti);
 435                        }
 436                        if (list_empty(&li->list))
 437                                atomic_set(&li->active, 0);
 438                        spin_unlock_bh(&li->lock);
 439                        if (deliver) {
 440                                __do_deliver_interrupt(vcpu, inti);
 441                                kfree(inti);
 442                        }
 443                } while (deliver);
 444        }
 445
 446        if ((vcpu->arch.sie_block->ckc <
 447                get_clock() + vcpu->arch.sie_block->epoch))
 448                __try_deliver_ckc_interrupt(vcpu);
 449
 450        if (atomic_read(&fi->active)) {
 451                do {
 452                        deliver = 0;
 453                        spin_lock(&fi->lock);
 454                        list_for_each_entry_safe(inti, n, &fi->list, list) {
 455                                if (__interrupt_is_deliverable(vcpu, inti)) {
 456                                        list_del(&inti->list);
 457                                        deliver = 1;
 458                                        break;
 459                                }
 460                                __set_intercept_indicator(vcpu, inti);
 461                        }
 462                        if (list_empty(&fi->list))
 463                                atomic_set(&fi->active, 0);
 464                        spin_unlock(&fi->lock);
 465                        if (deliver) {
 466                                __do_deliver_interrupt(vcpu, inti);
 467                                kfree(inti);
 468                        }
 469                } while (deliver);
 470        }
 471}
 472
 473int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
 474{
 475        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 476        struct kvm_s390_interrupt_info *inti;
 477
 478        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
 479        if (!inti)
 480                return -ENOMEM;
 481
 482        inti->type = KVM_S390_PROGRAM_INT;
 483        inti->pgm.code = code;
 484
 485        VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
 486        spin_lock_bh(&li->lock);
 487        list_add(&inti->list, &li->list);
 488        atomic_set(&li->active, 1);
 489        BUG_ON(waitqueue_active(&li->wq));
 490        spin_unlock_bh(&li->lock);
 491        return 0;
 492}
 493
 494int kvm_s390_inject_vm(struct kvm *kvm,
 495                       struct kvm_s390_interrupt *s390int)
 496{
 497        struct kvm_s390_local_interrupt *li;
 498        struct kvm_s390_float_interrupt *fi;
 499        struct kvm_s390_interrupt_info *inti;
 500        int sigcpu;
 501
 502        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
 503        if (!inti)
 504                return -ENOMEM;
 505
 506        switch (s390int->type) {
 507        case KVM_S390_INT_VIRTIO:
 508                VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
 509                         s390int->parm, s390int->parm64);
 510                inti->type = s390int->type;
 511                inti->ext.ext_params = s390int->parm;
 512                inti->ext.ext_params2 = s390int->parm64;
 513                break;
 514        case KVM_S390_INT_SERVICE:
 515                VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
 516                inti->type = s390int->type;
 517                inti->ext.ext_params = s390int->parm;
 518                break;
 519        case KVM_S390_PROGRAM_INT:
 520        case KVM_S390_SIGP_STOP:
 521        case KVM_S390_INT_EMERGENCY:
 522        default:
 523                kfree(inti);
 524                return -EINVAL;
 525        }
 526
 527        mutex_lock(&kvm->lock);
 528        fi = &kvm->arch.float_int;
 529        spin_lock(&fi->lock);
 530        list_add_tail(&inti->list, &fi->list);
 531        atomic_set(&fi->active, 1);
 532        sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
 533        if (sigcpu == KVM_MAX_VCPUS) {
 534                do {
 535                        sigcpu = fi->next_rr_cpu++;
 536                        if (sigcpu == KVM_MAX_VCPUS)
 537                                sigcpu = fi->next_rr_cpu = 0;
 538                } while (fi->local_int[sigcpu] == NULL);
 539        }
 540        li = fi->local_int[sigcpu];
 541        spin_lock_bh(&li->lock);
 542        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
 543        if (waitqueue_active(&li->wq))
 544                wake_up_interruptible(&li->wq);
 545        spin_unlock_bh(&li->lock);
 546        spin_unlock(&fi->lock);
 547        mutex_unlock(&kvm->lock);
 548        return 0;
 549}
 550
 551int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
 552                         struct kvm_s390_interrupt *s390int)
 553{
 554        struct kvm_s390_local_interrupt *li;
 555        struct kvm_s390_interrupt_info *inti;
 556
 557        inti = kzalloc(sizeof(*inti), GFP_KERNEL);
 558        if (!inti)
 559                return -ENOMEM;
 560
 561        switch (s390int->type) {
 562        case KVM_S390_PROGRAM_INT:
 563                if (s390int->parm & 0xffff0000) {
 564                        kfree(inti);
 565                        return -EINVAL;
 566                }
 567                inti->type = s390int->type;
 568                inti->pgm.code = s390int->parm;
 569                VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
 570                           s390int->parm);
 571                break;
 572        case KVM_S390_SIGP_SET_PREFIX:
 573                inti->prefix.address = s390int->parm;
 574                inti->type = s390int->type;
 575                VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
 576                           s390int->parm);
 577                break;
 578        case KVM_S390_SIGP_STOP:
 579        case KVM_S390_RESTART:
 580        case KVM_S390_INT_EMERGENCY:
 581                VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
 582                inti->type = s390int->type;
 583                break;
 584        case KVM_S390_INT_VIRTIO:
 585        case KVM_S390_INT_SERVICE:
 586        default:
 587                kfree(inti);
 588                return -EINVAL;
 589        }
 590
 591        mutex_lock(&vcpu->kvm->lock);
 592        li = &vcpu->arch.local_int;
 593        spin_lock_bh(&li->lock);
 594        if (inti->type == KVM_S390_PROGRAM_INT)
 595                list_add(&inti->list, &li->list);
 596        else
 597                list_add_tail(&inti->list, &li->list);
 598        atomic_set(&li->active, 1);
 599        if (inti->type == KVM_S390_SIGP_STOP)
 600                li->action_bits |= ACTION_STOP_ON_STOP;
 601        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
 602        if (waitqueue_active(&li->wq))
 603                wake_up_interruptible(&vcpu->arch.local_int.wq);
 604        spin_unlock_bh(&li->lock);
 605        mutex_unlock(&vcpu->kvm->lock);
 606        return 0;
 607}
 608