linux/arch/x86/kvm/i8254.c
<<
>>
Prefs
   1/*
   2 * 8253/8254 interval timer emulation
   3 *
   4 * Copyright (c) 2003-2004 Fabrice Bellard
   5 * Copyright (c) 2006 Intel Corporation
   6 * Copyright (c) 2007 Keir Fraser, XenSource Inc
   7 * Copyright (c) 2008 Intel Corporation
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a copy
  10 * of this software and associated documentation files (the "Software"), to deal
  11 * in the Software without restriction, including without limitation the rights
  12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13 * copies of the Software, and to permit persons to whom the Software is
  14 * furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25 * THE SOFTWARE.
  26 *
  27 * Authors:
  28 *   Sheng Yang <sheng.yang@intel.com>
  29 *   Based on QEMU and Xen.
  30 */
  31
  32#include <linux/kvm_host.h>
  33
  34#include "irq.h"
  35#include "i8254.h"
  36
  37#ifndef CONFIG_X86_64
  38#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
  39#else
  40#define mod_64(x, y) ((x) % (y))
  41#endif
  42
  43#define RW_STATE_LSB 1
  44#define RW_STATE_MSB 2
  45#define RW_STATE_WORD0 3
  46#define RW_STATE_WORD1 4
  47
  48/* Compute with 96 bit intermediate result: (a*b)/c */
  49static u64 muldiv64(u64 a, u32 b, u32 c)
  50{
  51        union {
  52                u64 ll;
  53                struct {
  54                        u32 low, high;
  55                } l;
  56        } u, res;
  57        u64 rl, rh;
  58
  59        u.ll = a;
  60        rl = (u64)u.l.low * (u64)b;
  61        rh = (u64)u.l.high * (u64)b;
  62        rh += (rl >> 32);
  63        res.l.high = div64_u64(rh, c);
  64        res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
  65        return res.ll;
  66}
  67
  68static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
  69{
  70        struct kvm_kpit_channel_state *c =
  71                &kvm->arch.vpit->pit_state.channels[channel];
  72
  73        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
  74
  75        switch (c->mode) {
  76        default:
  77        case 0:
  78        case 4:
  79                /* XXX: just disable/enable counting */
  80                break;
  81        case 1:
  82        case 2:
  83        case 3:
  84        case 5:
  85                /* Restart counting on rising edge. */
  86                if (c->gate < val)
  87                        c->count_load_time = ktime_get();
  88                break;
  89        }
  90
  91        c->gate = val;
  92}
  93
  94static int pit_get_gate(struct kvm *kvm, int channel)
  95{
  96        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
  97
  98        return kvm->arch.vpit->pit_state.channels[channel].gate;
  99}
 100
 101static int pit_get_count(struct kvm *kvm, int channel)
 102{
 103        struct kvm_kpit_channel_state *c =
 104                &kvm->arch.vpit->pit_state.channels[channel];
 105        s64 d, t;
 106        int counter;
 107
 108        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
 109
 110        t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
 111        d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
 112
 113        switch (c->mode) {
 114        case 0:
 115        case 1:
 116        case 4:
 117        case 5:
 118                counter = (c->count - d) & 0xffff;
 119                break;
 120        case 3:
 121                /* XXX: may be incorrect for odd counts */
 122                counter = c->count - (mod_64((2 * d), c->count));
 123                break;
 124        default:
 125                counter = c->count - mod_64(d, c->count);
 126                break;
 127        }
 128        return counter;
 129}
 130
 131static int pit_get_out(struct kvm *kvm, int channel)
 132{
 133        struct kvm_kpit_channel_state *c =
 134                &kvm->arch.vpit->pit_state.channels[channel];
 135        s64 d, t;
 136        int out;
 137
 138        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
 139
 140        t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
 141        d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
 142
 143        switch (c->mode) {
 144        default:
 145        case 0:
 146                out = (d >= c->count);
 147                break;
 148        case 1:
 149                out = (d < c->count);
 150                break;
 151        case 2:
 152                out = ((mod_64(d, c->count) == 0) && (d != 0));
 153                break;
 154        case 3:
 155                out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
 156                break;
 157        case 4:
 158        case 5:
 159                out = (d == c->count);
 160                break;
 161        }
 162
 163        return out;
 164}
 165
 166static void pit_latch_count(struct kvm *kvm, int channel)
 167{
 168        struct kvm_kpit_channel_state *c =
 169                &kvm->arch.vpit->pit_state.channels[channel];
 170
 171        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
 172
 173        if (!c->count_latched) {
 174                c->latched_count = pit_get_count(kvm, channel);
 175                c->count_latched = c->rw_mode;
 176        }
 177}
 178
 179static void pit_latch_status(struct kvm *kvm, int channel)
 180{
 181        struct kvm_kpit_channel_state *c =
 182                &kvm->arch.vpit->pit_state.channels[channel];
 183
 184        WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
 185
 186        if (!c->status_latched) {
 187                /* TODO: Return NULL COUNT (bit 6). */
 188                c->status = ((pit_get_out(kvm, channel) << 7) |
 189                                (c->rw_mode << 4) |
 190                                (c->mode << 1) |
 191                                c->bcd);
 192                c->status_latched = 1;
 193        }
 194}
 195
 196static int __pit_timer_fn(struct kvm_kpit_state *ps)
 197{
 198        struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
 199        struct kvm_kpit_timer *pt = &ps->pit_timer;
 200
 201        if (!atomic_inc_and_test(&pt->pending))
 202                set_bit(KVM_REQ_PENDING_TIMER, &vcpu0->requests);
 203        if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
 204                vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 205                wake_up_interruptible(&vcpu0->wq);
 206        }
 207
 208        pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
 209        pt->scheduled = ktime_to_ns(pt->timer.expires);
 210
 211        return (pt->period == 0 ? 0 : 1);
 212}
 213
 214int pit_has_pending_timer(struct kvm_vcpu *vcpu)
 215{
 216        struct kvm_pit *pit = vcpu->kvm->arch.vpit;
 217
 218        if (pit && vcpu->vcpu_id == 0 && pit->pit_state.inject_pending)
 219                return atomic_read(&pit->pit_state.pit_timer.pending);
 220
 221        return 0;
 222}
 223
 224static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
 225{
 226        struct kvm_kpit_state *ps;
 227        int restart_timer = 0;
 228
 229        ps = container_of(data, struct kvm_kpit_state, pit_timer.timer);
 230
 231        restart_timer = __pit_timer_fn(ps);
 232
 233        if (restart_timer)
 234                return HRTIMER_RESTART;
 235        else
 236                return HRTIMER_NORESTART;
 237}
 238
 239void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 240{
 241        struct kvm_pit *pit = vcpu->kvm->arch.vpit;
 242        struct hrtimer *timer;
 243
 244        if (vcpu->vcpu_id != 0 || !pit)
 245                return;
 246
 247        timer = &pit->pit_state.pit_timer.timer;
 248        if (hrtimer_cancel(timer))
 249                hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
 250}
 251
 252static void destroy_pit_timer(struct kvm_kpit_timer *pt)
 253{
 254        pr_debug("pit: execute del timer!\n");
 255        hrtimer_cancel(&pt->timer);
 256}
 257
 258static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period)
 259{
 260        s64 interval;
 261
 262        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 263
 264        pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
 265
 266        /* TODO The new value only affected after the retriggered */
 267        hrtimer_cancel(&pt->timer);
 268        pt->period = (is_period == 0) ? 0 : interval;
 269        pt->timer.function = pit_timer_fn;
 270        atomic_set(&pt->pending, 0);
 271
 272        hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
 273                      HRTIMER_MODE_ABS);
 274}
 275
 276static void pit_load_count(struct kvm *kvm, int channel, u32 val)
 277{
 278        struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
 279
 280        WARN_ON(!mutex_is_locked(&ps->lock));
 281
 282        pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
 283
 284        /*
 285         * Though spec said the state of 8254 is undefined after power-up,
 286         * seems some tricky OS like Windows XP depends on IRQ0 interrupt
 287         * when booting up.
 288         * So here setting initialize rate for it, and not a specific number
 289         */
 290        if (val == 0)
 291                val = 0x10000;
 292
 293        ps->channels[channel].count_load_time = ktime_get();
 294        ps->channels[channel].count = val;
 295
 296        if (channel != 0)
 297                return;
 298
 299        /* Two types of timer
 300         * mode 1 is one shot, mode 2 is period, otherwise del timer */
 301        switch (ps->channels[0].mode) {
 302        case 1:
 303        /* FIXME: enhance mode 4 precision */
 304        case 4:
 305                create_pit_timer(&ps->pit_timer, val, 0);
 306                break;
 307        case 2:
 308        case 3:
 309                create_pit_timer(&ps->pit_timer, val, 1);
 310                break;
 311        default:
 312                destroy_pit_timer(&ps->pit_timer);
 313        }
 314}
 315
 316void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
 317{
 318        mutex_lock(&kvm->arch.vpit->pit_state.lock);
 319        pit_load_count(kvm, channel, val);
 320        mutex_unlock(&kvm->arch.vpit->pit_state.lock);
 321}
 322
 323static void pit_ioport_write(struct kvm_io_device *this,
 324                             gpa_t addr, int len, const void *data)
 325{
 326        struct kvm_pit *pit = (struct kvm_pit *)this->private;
 327        struct kvm_kpit_state *pit_state = &pit->pit_state;
 328        struct kvm *kvm = pit->kvm;
 329        int channel, access;
 330        struct kvm_kpit_channel_state *s;
 331        u32 val = *(u32 *) data;
 332
 333        val  &= 0xff;
 334        addr &= KVM_PIT_CHANNEL_MASK;
 335
 336        mutex_lock(&pit_state->lock);
 337
 338        if (val != 0)
 339                pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
 340                          (unsigned int)addr, len, val);
 341
 342        if (addr == 3) {
 343                channel = val >> 6;
 344                if (channel == 3) {
 345                        /* Read-Back Command. */
 346                        for (channel = 0; channel < 3; channel++) {
 347                                s = &pit_state->channels[channel];
 348                                if (val & (2 << channel)) {
 349                                        if (!(val & 0x20))
 350                                                pit_latch_count(kvm, channel);
 351                                        if (!(val & 0x10))
 352                                                pit_latch_status(kvm, channel);
 353                                }
 354                        }
 355                } else {
 356                        /* Select Counter <channel>. */
 357                        s = &pit_state->channels[channel];
 358                        access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
 359                        if (access == 0) {
 360                                pit_latch_count(kvm, channel);
 361                        } else {
 362                                s->rw_mode = access;
 363                                s->read_state = access;
 364                                s->write_state = access;
 365                                s->mode = (val >> 1) & 7;
 366                                if (s->mode > 5)
 367                                        s->mode -= 4;
 368                                s->bcd = val & 1;
 369                        }
 370                }
 371        } else {
 372                /* Write Count. */
 373                s = &pit_state->channels[addr];
 374                switch (s->write_state) {
 375                default:
 376                case RW_STATE_LSB:
 377                        pit_load_count(kvm, addr, val);
 378                        break;
 379                case RW_STATE_MSB:
 380                        pit_load_count(kvm, addr, val << 8);
 381                        break;
 382                case RW_STATE_WORD0:
 383                        s->write_latch = val;
 384                        s->write_state = RW_STATE_WORD1;
 385                        break;
 386                case RW_STATE_WORD1:
 387                        pit_load_count(kvm, addr, s->write_latch | (val << 8));
 388                        s->write_state = RW_STATE_WORD0;
 389                        break;
 390                }
 391        }
 392
 393        mutex_unlock(&pit_state->lock);
 394}
 395
 396static void pit_ioport_read(struct kvm_io_device *this,
 397                            gpa_t addr, int len, void *data)
 398{
 399        struct kvm_pit *pit = (struct kvm_pit *)this->private;
 400        struct kvm_kpit_state *pit_state = &pit->pit_state;
 401        struct kvm *kvm = pit->kvm;
 402        int ret, count;
 403        struct kvm_kpit_channel_state *s;
 404
 405        addr &= KVM_PIT_CHANNEL_MASK;
 406        s = &pit_state->channels[addr];
 407
 408        mutex_lock(&pit_state->lock);
 409
 410        if (s->status_latched) {
 411                s->status_latched = 0;
 412                ret = s->status;
 413        } else if (s->count_latched) {
 414                switch (s->count_latched) {
 415                default:
 416                case RW_STATE_LSB:
 417                        ret = s->latched_count & 0xff;
 418                        s->count_latched = 0;
 419                        break;
 420                case RW_STATE_MSB:
 421                        ret = s->latched_count >> 8;
 422                        s->count_latched = 0;
 423                        break;
 424                case RW_STATE_WORD0:
 425                        ret = s->latched_count & 0xff;
 426                        s->count_latched = RW_STATE_MSB;
 427                        break;
 428                }
 429        } else {
 430                switch (s->read_state) {
 431                default:
 432                case RW_STATE_LSB:
 433                        count = pit_get_count(kvm, addr);
 434                        ret = count & 0xff;
 435                        break;
 436                case RW_STATE_MSB:
 437                        count = pit_get_count(kvm, addr);
 438                        ret = (count >> 8) & 0xff;
 439                        break;
 440                case RW_STATE_WORD0:
 441                        count = pit_get_count(kvm, addr);
 442                        ret = count & 0xff;
 443                        s->read_state = RW_STATE_WORD1;
 444                        break;
 445                case RW_STATE_WORD1:
 446                        count = pit_get_count(kvm, addr);
 447                        ret = (count >> 8) & 0xff;
 448                        s->read_state = RW_STATE_WORD0;
 449                        break;
 450                }
 451        }
 452
 453        if (len > sizeof(ret))
 454                len = sizeof(ret);
 455        memcpy(data, (char *)&ret, len);
 456
 457        mutex_unlock(&pit_state->lock);
 458}
 459
 460static int pit_in_range(struct kvm_io_device *this, gpa_t addr,
 461                        int len, int is_write)
 462{
 463        return ((addr >= KVM_PIT_BASE_ADDRESS) &&
 464                (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
 465}
 466
 467static void speaker_ioport_write(struct kvm_io_device *this,
 468                                 gpa_t addr, int len, const void *data)
 469{
 470        struct kvm_pit *pit = (struct kvm_pit *)this->private;
 471        struct kvm_kpit_state *pit_state = &pit->pit_state;
 472        struct kvm *kvm = pit->kvm;
 473        u32 val = *(u32 *) data;
 474
 475        mutex_lock(&pit_state->lock);
 476        pit_state->speaker_data_on = (val >> 1) & 1;
 477        pit_set_gate(kvm, 2, val & 1);
 478        mutex_unlock(&pit_state->lock);
 479}
 480
 481static void speaker_ioport_read(struct kvm_io_device *this,
 482                                gpa_t addr, int len, void *data)
 483{
 484        struct kvm_pit *pit = (struct kvm_pit *)this->private;
 485        struct kvm_kpit_state *pit_state = &pit->pit_state;
 486        struct kvm *kvm = pit->kvm;
 487        unsigned int refresh_clock;
 488        int ret;
 489
 490        /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
 491        refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
 492
 493        mutex_lock(&pit_state->lock);
 494        ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
 495                (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
 496        if (len > sizeof(ret))
 497                len = sizeof(ret);
 498        memcpy(data, (char *)&ret, len);
 499        mutex_unlock(&pit_state->lock);
 500}
 501
 502static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
 503                            int len, int is_write)
 504{
 505        return (addr == KVM_SPEAKER_BASE_ADDRESS);
 506}
 507
 508void kvm_pit_reset(struct kvm_pit *pit)
 509{
 510        int i;
 511        struct kvm_kpit_channel_state *c;
 512
 513        mutex_lock(&pit->pit_state.lock);
 514        for (i = 0; i < 3; i++) {
 515                c = &pit->pit_state.channels[i];
 516                c->mode = 0xff;
 517                c->gate = (i != 2);
 518                pit_load_count(pit->kvm, i, 0);
 519        }
 520        mutex_unlock(&pit->pit_state.lock);
 521
 522        atomic_set(&pit->pit_state.pit_timer.pending, 0);
 523        pit->pit_state.inject_pending = 1;
 524}
 525
 526struct kvm_pit *kvm_create_pit(struct kvm *kvm)
 527{
 528        struct kvm_pit *pit;
 529        struct kvm_kpit_state *pit_state;
 530
 531        pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
 532        if (!pit)
 533                return NULL;
 534
 535        mutex_init(&pit->pit_state.lock);
 536        mutex_lock(&pit->pit_state.lock);
 537
 538        /* Initialize PIO device */
 539        pit->dev.read = pit_ioport_read;
 540        pit->dev.write = pit_ioport_write;
 541        pit->dev.in_range = pit_in_range;
 542        pit->dev.private = pit;
 543        kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
 544
 545        pit->speaker_dev.read = speaker_ioport_read;
 546        pit->speaker_dev.write = speaker_ioport_write;
 547        pit->speaker_dev.in_range = speaker_in_range;
 548        pit->speaker_dev.private = pit;
 549        kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
 550
 551        kvm->arch.vpit = pit;
 552        pit->kvm = kvm;
 553
 554        pit_state = &pit->pit_state;
 555        pit_state->pit = pit;
 556        hrtimer_init(&pit_state->pit_timer.timer,
 557                     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
 558        mutex_unlock(&pit->pit_state.lock);
 559
 560        kvm_pit_reset(pit);
 561
 562        return pit;
 563}
 564
 565void kvm_free_pit(struct kvm *kvm)
 566{
 567        struct hrtimer *timer;
 568
 569        if (kvm->arch.vpit) {
 570                mutex_lock(&kvm->arch.vpit->pit_state.lock);
 571                timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
 572                hrtimer_cancel(timer);
 573                mutex_unlock(&kvm->arch.vpit->pit_state.lock);
 574                kfree(kvm->arch.vpit);
 575        }
 576}
 577
 578static void __inject_pit_timer_intr(struct kvm *kvm)
 579{
 580        mutex_lock(&kvm->lock);
 581        kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
 582        kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0);
 583        kvm_pic_set_irq(pic_irqchip(kvm), 0, 1);
 584        kvm_pic_set_irq(pic_irqchip(kvm), 0, 0);
 585        mutex_unlock(&kvm->lock);
 586}
 587
 588void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
 589{
 590        struct kvm_pit *pit = vcpu->kvm->arch.vpit;
 591        struct kvm *kvm = vcpu->kvm;
 592        struct kvm_kpit_state *ps;
 593
 594        if (vcpu && pit) {
 595                ps = &pit->pit_state;
 596
 597                /* Try to inject pending interrupts when:
 598                 * 1. Pending exists
 599                 * 2. Last interrupt was accepted or waited for too long time*/
 600                if (atomic_read(&ps->pit_timer.pending) &&
 601                    (ps->inject_pending ||
 602                    (jiffies - ps->last_injected_time
 603                                >= KVM_MAX_PIT_INTR_INTERVAL))) {
 604                        ps->inject_pending = 0;
 605                        __inject_pit_timer_intr(kvm);
 606                        ps->last_injected_time = jiffies;
 607                }
 608        }
 609}
 610
 611void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
 612{
 613        struct kvm_arch *arch = &vcpu->kvm->arch;
 614        struct kvm_kpit_state *ps;
 615
 616        if (vcpu && arch->vpit) {
 617                ps = &arch->vpit->pit_state;
 618                if (atomic_read(&ps->pit_timer.pending) &&
 619                (((arch->vpic->pics[0].imr & 1) == 0 &&
 620                  arch->vpic->pics[0].irq_base == vec) ||
 621                  (arch->vioapic->redirtbl[0].fields.vector == vec &&
 622                  arch->vioapic->redirtbl[0].fields.mask != 1))) {
 623                        ps->inject_pending = 1;
 624                        atomic_dec(&ps->pit_timer.pending);
 625                        ps->channels[0].count_load_time = ktime_get();
 626                }
 627        }
 628}
 629