linux/arch/ia64/kvm/vmm_ivt.S
<<
>>
Prefs
   1/*
   2 * /ia64/kvm_ivt.S
   3 *
   4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
   5 *      Stephane Eranian <eranian@hpl.hp.com>
   6 *      David Mosberger <davidm@hpl.hp.com>
   7 * Copyright (C) 2000, 2002-2003 Intel Co
   8 *      Asit Mallick <asit.k.mallick@intel.com>
   9 *      Suresh Siddha <suresh.b.siddha@intel.com>
  10 *      Kenneth Chen <kenneth.w.chen@intel.com>
  11 *      Fenghua Yu <fenghua.yu@intel.com>
  12 *
  13 *
  14 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
  15 * for SMP
  16 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
  17 * handler now uses virtual PT.
  18 *
  19 * 07/6/20 Xuefei Xu  (Anthony Xu) (anthony.xu@intel.com)
  20 *              Supporting Intel virtualization architecture
  21 *
  22 */
  23
  24/*
  25 * This file defines the interruption vector table used by the CPU.
  26 * It does not include one entry per possible cause of interruption.
  27 *
  28 * The first 20 entries of the table contain 64 bundles each while the
  29 * remaining 48 entries contain only 16 bundles each.
  30 *
  31 * The 64 bundles are used to allow inlining the whole handler for
  32 * critical
  33 * interruptions like TLB misses.
  34 *
  35 *  For each entry, the comment is as follows:
  36 *
  37 *              // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
  38 *              (12,51)
  39 *  entry offset ----/     /         /                  /
  40 *  /
  41 *  entry number ---------/         /                  /
  42 *  /
  43 *  size of the entry -------------/                  /
  44 *  /
  45 *  vector name -------------------------------------/
  46 *  /
  47 *  interruptions triggering this vector
  48 *  ----------------------/
  49 *
  50 * The table is 32KB in size and must be aligned on 32KB
  51 * boundary.
  52 * (The CPU ignores the 15 lower bits of the address)
  53 *
  54 * Table is based upon EAS2.6 (Oct 1999)
  55 */
  56
  57
  58#include <asm/asmmacro.h>
  59#include <asm/cache.h>
  60#include <asm/pgtable.h>
  61
  62#include "asm-offsets.h"
  63#include "vcpu.h"
  64#include "kvm_minstate.h"
  65#include "vti.h"
  66
  67#if 1
  68# define PSR_DEFAULT_BITS   psr.ac
  69#else
  70# define PSR_DEFAULT_BITS   0
  71#endif
  72
  73
  74#define KVM_FAULT(n)    \
  75    kvm_fault_##n:;          \
  76    mov r19=n;;          \
  77    br.sptk.many kvm_fault_##n;         \
  78    ;;                  \
  79
  80
  81#define KVM_REFLECT(n)    \
  82    mov r31=pr;           \
  83    mov r19=n;       /* prepare to save predicates */ \
  84    mov r29=cr.ipsr;      \
  85    ;;      \
  86    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
  87(p7)br.sptk.many kvm_dispatch_reflection;        \
  88    br.sptk.many kvm_panic;      \
  89
  90
  91GLOBAL_ENTRY(kvm_panic)
  92    br.sptk.many kvm_panic
  93    ;;
  94END(kvm_panic)
  95
  96
  97
  98
  99
 100    .section .text.ivt,"ax"
 101
 102    .align 32768    // align on 32KB boundary
 103    .global kvm_ia64_ivt
 104kvm_ia64_ivt:
 105///////////////////////////////////////////////////////////////
 106// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
 107ENTRY(kvm_vhpt_miss)
 108    KVM_FAULT(0)
 109END(kvm_vhpt_miss)
 110
 111
 112    .org kvm_ia64_ivt+0x400
 113////////////////////////////////////////////////////////////////
 114// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
 115ENTRY(kvm_itlb_miss)
 116    mov r31 = pr
 117    mov r29=cr.ipsr;
 118    ;;
 119    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 120    (p6) br.sptk kvm_alt_itlb_miss
 121    mov r19 = 1
 122    br.sptk kvm_itlb_miss_dispatch
 123    KVM_FAULT(1);
 124END(kvm_itlb_miss)
 125
 126    .org kvm_ia64_ivt+0x0800
 127//////////////////////////////////////////////////////////////////
 128// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
 129ENTRY(kvm_dtlb_miss)
 130    mov r31 = pr
 131    mov r29=cr.ipsr;
 132    ;;
 133    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
 134(p6)br.sptk kvm_alt_dtlb_miss
 135    br.sptk kvm_dtlb_miss_dispatch
 136END(kvm_dtlb_miss)
 137
 138     .org kvm_ia64_ivt+0x0c00
 139////////////////////////////////////////////////////////////////////
 140// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
 141ENTRY(kvm_alt_itlb_miss)
 142    mov r16=cr.ifa    // get address that caused the TLB miss
 143    ;;
 144    movl r17=PAGE_KERNEL
 145    mov r24=cr.ipsr
 146    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
 147    ;;
 148    and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
 149    ;;
 150    or r19=r17,r19      // insert PTE control bits into r19
 151    ;;
 152    movl r20=IA64_GRANULE_SHIFT<<2
 153    ;;
 154    mov cr.itir=r20
 155    ;;
 156    itc.i r19           // insert the TLB entry
 157    mov pr=r31,-1
 158    rfi
 159END(kvm_alt_itlb_miss)
 160
 161    .org kvm_ia64_ivt+0x1000
 162/////////////////////////////////////////////////////////////////////
 163// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
 164ENTRY(kvm_alt_dtlb_miss)
 165    mov r16=cr.ifa              // get address that caused the TLB miss
 166    ;;
 167    movl r17=PAGE_KERNEL
 168    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
 169    mov r24=cr.ipsr
 170    ;;
 171    and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
 172    ;;
 173    or r19=r19,r17      // insert PTE control bits into r19
 174    ;;
 175    movl r20=IA64_GRANULE_SHIFT<<2
 176    ;;
 177    mov cr.itir=r20
 178    ;;
 179    itc.d r19           // insert the TLB entry
 180    mov pr=r31,-1
 181    rfi
 182END(kvm_alt_dtlb_miss)
 183
 184    .org kvm_ia64_ivt+0x1400
 185//////////////////////////////////////////////////////////////////////
 186// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
 187ENTRY(kvm_nested_dtlb_miss)
 188    KVM_FAULT(5)
 189END(kvm_nested_dtlb_miss)
 190
 191    .org kvm_ia64_ivt+0x1800
 192/////////////////////////////////////////////////////////////////////
 193// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
 194ENTRY(kvm_ikey_miss)
 195    KVM_REFLECT(6)
 196END(kvm_ikey_miss)
 197
 198    .org kvm_ia64_ivt+0x1c00
 199/////////////////////////////////////////////////////////////////////
 200// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
 201ENTRY(kvm_dkey_miss)
 202    KVM_REFLECT(7)
 203END(kvm_dkey_miss)
 204
 205    .org kvm_ia64_ivt+0x2000
 206////////////////////////////////////////////////////////////////////
 207// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
 208ENTRY(kvm_dirty_bit)
 209    KVM_REFLECT(8)
 210END(kvm_dirty_bit)
 211
 212    .org kvm_ia64_ivt+0x2400
 213////////////////////////////////////////////////////////////////////
 214// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
 215ENTRY(kvm_iaccess_bit)
 216    KVM_REFLECT(9)
 217END(kvm_iaccess_bit)
 218
 219    .org kvm_ia64_ivt+0x2800
 220///////////////////////////////////////////////////////////////////
 221// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
 222ENTRY(kvm_daccess_bit)
 223    KVM_REFLECT(10)
 224END(kvm_daccess_bit)
 225
 226    .org kvm_ia64_ivt+0x2c00
 227/////////////////////////////////////////////////////////////////
 228// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
 229ENTRY(kvm_break_fault)
 230    mov r31=pr
 231    mov r19=11
 232    mov r29=cr.ipsr
 233    ;;
 234    KVM_SAVE_MIN_WITH_COVER_R19
 235    ;;
 236    alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
 237    mov out0=cr.ifa
 238    mov out2=cr.isr     // FIXME: pity to make this slow access twice
 239    mov out3=cr.iim     // FIXME: pity to make this slow access twice
 240    adds r3=8,r2                // set up second base pointer
 241    ;;
 242    ssm psr.ic
 243    ;;
 244    srlz.i                  // guarantee that interruption collection is on
 245    ;;
 246    //(p15)ssm psr.i               // restore psr.i
 247    addl r14=@gprel(ia64_leave_hypervisor),gp
 248    ;;
 249    KVM_SAVE_REST
 250    mov rp=r14
 251    ;;
 252    adds out1=16,sp
 253    br.call.sptk.many b6=kvm_ia64_handle_break
 254    ;;
 255END(kvm_break_fault)
 256
 257    .org kvm_ia64_ivt+0x3000
 258/////////////////////////////////////////////////////////////////
 259// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
 260ENTRY(kvm_interrupt)
 261    mov r31=pr          // prepare to save predicates
 262    mov r19=12
 263    mov r29=cr.ipsr
 264    ;;
 265    tbit.z p6,p7=r29,IA64_PSR_VM_BIT
 266    tbit.z p0,p15=r29,IA64_PSR_I_BIT
 267    ;;
 268(p7) br.sptk kvm_dispatch_interrupt
 269    ;;
 270    mov r27=ar.rsc              /* M */
 271    mov r20=r1                  /* A */
 272    mov r25=ar.unat             /* M */
 273    mov r26=ar.pfs              /* I */
 274    mov r28=cr.iip              /* M */
 275    cover                       /* B (or nothing) */
 276    ;;
 277    mov r1=sp
 278    ;;
 279    invala                      /* M */
 280    mov r30=cr.ifs
 281    ;;
 282    addl r1=-VMM_PT_REGS_SIZE,r1
 283    ;;
 284    adds r17=2*L1_CACHE_BYTES,r1        /* really: biggest cache-line size */
 285    adds r16=PT(CR_IPSR),r1
 286    ;;
 287    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
 288    st8 [r16]=r29                       /* save cr.ipsr */
 289    ;;
 290    lfetch.fault.excl.nt1 [r17]
 291    mov r29=b0
 292    ;;
 293    adds r16=PT(R8),r1          /* initialize first base pointer */
 294    adds r17=PT(R9),r1          /* initialize second base pointer */
 295    mov r18=r0                  /* make sure r18 isn't NaT */
 296    ;;
 297.mem.offset 0,0; st8.spill [r16]=r8,16
 298.mem.offset 8,0; st8.spill [r17]=r9,16
 299        ;;
 300.mem.offset 0,0; st8.spill [r16]=r10,24
 301.mem.offset 8,0; st8.spill [r17]=r11,24
 302        ;;
 303    st8 [r16]=r28,16            /* save cr.iip */
 304    st8 [r17]=r30,16            /* save cr.ifs */
 305    mov r8=ar.fpsr              /* M */
 306    mov r9=ar.csd
 307    mov r10=ar.ssd
 308    movl r11=FPSR_DEFAULT       /* L-unit */
 309    ;;
 310    st8 [r16]=r25,16            /* save ar.unat */
 311    st8 [r17]=r26,16            /* save ar.pfs */
 312    shl r18=r18,16              /* compute ar.rsc to be used for "loadrs" */
 313    ;;
 314    st8 [r16]=r27,16            /* save ar.rsc */
 315    adds r17=16,r17             /* skip over ar_rnat field */
 316    ;;
 317    st8 [r17]=r31,16            /* save predicates */
 318    adds r16=16,r16             /* skip over ar_bspstore field */
 319    ;;
 320    st8 [r16]=r29,16            /* save b0 */
 321    st8 [r17]=r18,16            /* save ar.rsc value for "loadrs" */
 322    ;;
 323.mem.offset 0,0; st8.spill [r16]=r20,16    /* save original r1 */
 324.mem.offset 8,0; st8.spill [r17]=r12,16
 325    adds r12=-16,r1
 326    /* switch to kernel memory stack (with 16 bytes of scratch) */
 327    ;;
 328.mem.offset 0,0; st8.spill [r16]=r13,16
 329.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
 330    ;;
 331.mem.offset 0,0; st8.spill [r16]=r15,16
 332.mem.offset 8,0; st8.spill [r17]=r14,16
 333    dep r14=-1,r0,60,4
 334    ;;
 335.mem.offset 0,0; st8.spill [r16]=r2,16
 336.mem.offset 8,0; st8.spill [r17]=r3,16
 337    adds r2=VMM_PT_REGS_R16_OFFSET,r1
 338    adds r14 = VMM_VCPU_GP_OFFSET,r13
 339    ;;
 340    mov r8=ar.ccv
 341    ld8 r14 = [r14]
 342    ;;
 343    mov r1=r14       /* establish kernel global pointer */
 344    ;;                                          \
 345    bsw.1
 346    ;;
 347    alloc r14=ar.pfs,0,0,1,0    // must be first in an insn group
 348    mov out0=r13
 349    ;;
 350    ssm psr.ic
 351    ;;
 352    srlz.i
 353    ;;
 354    //(p15) ssm psr.i
 355    adds r3=8,r2                // set up second base pointer for SAVE_REST
 356    srlz.i                      // ensure everybody knows psr.ic is back on
 357    ;;
 358.mem.offset 0,0; st8.spill [r2]=r16,16
 359.mem.offset 8,0; st8.spill [r3]=r17,16
 360    ;;
 361.mem.offset 0,0; st8.spill [r2]=r18,16
 362.mem.offset 8,0; st8.spill [r3]=r19,16
 363    ;;
 364.mem.offset 0,0; st8.spill [r2]=r20,16
 365.mem.offset 8,0; st8.spill [r3]=r21,16
 366    mov r18=b6
 367    ;;
 368.mem.offset 0,0; st8.spill [r2]=r22,16
 369.mem.offset 8,0; st8.spill [r3]=r23,16
 370    mov r19=b7
 371    ;;
 372.mem.offset 0,0; st8.spill [r2]=r24,16
 373.mem.offset 8,0; st8.spill [r3]=r25,16
 374    ;;
 375.mem.offset 0,0; st8.spill [r2]=r26,16
 376.mem.offset 8,0; st8.spill [r3]=r27,16
 377    ;;
 378.mem.offset 0,0; st8.spill [r2]=r28,16
 379.mem.offset 8,0; st8.spill [r3]=r29,16
 380    ;;
 381.mem.offset 0,0; st8.spill [r2]=r30,16
 382.mem.offset 8,0; st8.spill [r3]=r31,32
 383    ;;
 384    mov ar.fpsr=r11       /* M-unit */
 385    st8 [r2]=r8,8         /* ar.ccv */
 386    adds r24=PT(B6)-PT(F7),r3
 387    ;;
 388    stf.spill [r2]=f6,32
 389    stf.spill [r3]=f7,32
 390    ;;
 391    stf.spill [r2]=f8,32
 392    stf.spill [r3]=f9,32
 393    ;;
 394    stf.spill [r2]=f10
 395    stf.spill [r3]=f11
 396    adds r25=PT(B7)-PT(F11),r3
 397    ;;
 398    st8 [r24]=r18,16       /* b6 */
 399    st8 [r25]=r19,16       /* b7 */
 400    ;;
 401    st8 [r24]=r9           /* ar.csd */
 402    st8 [r25]=r10          /* ar.ssd */
 403    ;;
 404    srlz.d              // make sure we see the effect of cr.ivr
 405    addl r14=@gprel(ia64_leave_nested),gp
 406    ;;
 407    mov rp=r14
 408    br.call.sptk.many b6=kvm_ia64_handle_irq
 409    ;;
 410END(kvm_interrupt)
 411
 412    .global kvm_dispatch_vexirq
 413    .org kvm_ia64_ivt+0x3400
 414//////////////////////////////////////////////////////////////////////
 415// 0x3400 Entry 13 (size 64 bundles) Reserved
 416ENTRY(kvm_virtual_exirq)
 417    mov r31=pr
 418    mov r19=13
 419    mov r30 =r0
 420    ;;
 421kvm_dispatch_vexirq:
 422    cmp.eq p6,p0 = 1,r30
 423    ;;
 424(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
 425    ;;
 426(p6)ld8 r1 = [r29]
 427    ;;
 428    KVM_SAVE_MIN_WITH_COVER_R19
 429    alloc r14=ar.pfs,0,0,1,0
 430    mov out0=r13
 431
 432    ssm psr.ic
 433    ;;
 434    srlz.i                  // guarantee that interruption collection is on
 435    ;;
 436    //(p15) ssm psr.i               // restore psr.i
 437    adds r3=8,r2                // set up second base pointer
 438    ;;
 439    KVM_SAVE_REST
 440    addl r14=@gprel(ia64_leave_hypervisor),gp
 441    ;;
 442    mov rp=r14
 443    br.call.sptk.many b6=kvm_vexirq
 444END(kvm_virtual_exirq)
 445
 446    .org kvm_ia64_ivt+0x3800
 447/////////////////////////////////////////////////////////////////////
 448// 0x3800 Entry 14 (size 64 bundles) Reserved
 449    KVM_FAULT(14)
 450    // this code segment is from 2.6.16.13
 451
 452
 453    .org kvm_ia64_ivt+0x3c00
 454///////////////////////////////////////////////////////////////////////
 455// 0x3c00 Entry 15 (size 64 bundles) Reserved
 456    KVM_FAULT(15)
 457
 458
 459    .org kvm_ia64_ivt+0x4000
 460///////////////////////////////////////////////////////////////////////
 461// 0x4000 Entry 16 (size 64 bundles) Reserved
 462    KVM_FAULT(16)
 463
 464    .org kvm_ia64_ivt+0x4400
 465//////////////////////////////////////////////////////////////////////
 466// 0x4400 Entry 17 (size 64 bundles) Reserved
 467    KVM_FAULT(17)
 468
 469    .org kvm_ia64_ivt+0x4800
 470//////////////////////////////////////////////////////////////////////
 471// 0x4800 Entry 18 (size 64 bundles) Reserved
 472    KVM_FAULT(18)
 473
 474    .org kvm_ia64_ivt+0x4c00
 475//////////////////////////////////////////////////////////////////////
 476// 0x4c00 Entry 19 (size 64 bundles) Reserved
 477    KVM_FAULT(19)
 478
 479    .org kvm_ia64_ivt+0x5000
 480//////////////////////////////////////////////////////////////////////
 481// 0x5000 Entry 20 (size 16 bundles) Page Not Present
 482ENTRY(kvm_page_not_present)
 483    KVM_REFLECT(20)
 484END(kvm_page_not_present)
 485
 486    .org kvm_ia64_ivt+0x5100
 487///////////////////////////////////////////////////////////////////////
 488// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
 489ENTRY(kvm_key_permission)
 490    KVM_REFLECT(21)
 491END(kvm_key_permission)
 492
 493    .org kvm_ia64_ivt+0x5200
 494//////////////////////////////////////////////////////////////////////
 495// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
 496ENTRY(kvm_iaccess_rights)
 497    KVM_REFLECT(22)
 498END(kvm_iaccess_rights)
 499
 500    .org kvm_ia64_ivt+0x5300
 501//////////////////////////////////////////////////////////////////////
 502// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
 503ENTRY(kvm_daccess_rights)
 504    KVM_REFLECT(23)
 505END(kvm_daccess_rights)
 506
 507    .org kvm_ia64_ivt+0x5400
 508/////////////////////////////////////////////////////////////////////
 509// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
 510ENTRY(kvm_general_exception)
 511   KVM_REFLECT(24)
 512   KVM_FAULT(24)
 513END(kvm_general_exception)
 514
 515    .org kvm_ia64_ivt+0x5500
 516//////////////////////////////////////////////////////////////////////
 517// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
 518ENTRY(kvm_disabled_fp_reg)
 519    KVM_REFLECT(25)
 520END(kvm_disabled_fp_reg)
 521
 522    .org kvm_ia64_ivt+0x5600
 523////////////////////////////////////////////////////////////////////
 524// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
 525ENTRY(kvm_nat_consumption)
 526    KVM_REFLECT(26)
 527END(kvm_nat_consumption)
 528
 529    .org kvm_ia64_ivt+0x5700
 530/////////////////////////////////////////////////////////////////////
 531// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
 532ENTRY(kvm_speculation_vector)
 533    KVM_REFLECT(27)
 534END(kvm_speculation_vector)
 535
 536    .org kvm_ia64_ivt+0x5800
 537/////////////////////////////////////////////////////////////////////
 538// 0x5800 Entry 28 (size 16 bundles) Reserved
 539    KVM_FAULT(28)
 540
 541    .org kvm_ia64_ivt+0x5900
 542///////////////////////////////////////////////////////////////////
 543// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
 544ENTRY(kvm_debug_vector)
 545    KVM_FAULT(29)
 546END(kvm_debug_vector)
 547
 548    .org kvm_ia64_ivt+0x5a00
 549///////////////////////////////////////////////////////////////
 550// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
 551ENTRY(kvm_unaligned_access)
 552    KVM_REFLECT(30)
 553END(kvm_unaligned_access)
 554
 555    .org kvm_ia64_ivt+0x5b00
 556//////////////////////////////////////////////////////////////////////
 557// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
 558ENTRY(kvm_unsupported_data_reference)
 559    KVM_REFLECT(31)
 560END(kvm_unsupported_data_reference)
 561
 562    .org kvm_ia64_ivt+0x5c00
 563////////////////////////////////////////////////////////////////////
 564// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
 565ENTRY(kvm_floating_point_fault)
 566    KVM_REFLECT(32)
 567END(kvm_floating_point_fault)
 568
 569    .org kvm_ia64_ivt+0x5d00
 570/////////////////////////////////////////////////////////////////////
 571// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
 572ENTRY(kvm_floating_point_trap)
 573    KVM_REFLECT(33)
 574END(kvm_floating_point_trap)
 575
 576    .org kvm_ia64_ivt+0x5e00
 577//////////////////////////////////////////////////////////////////////
 578// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
 579ENTRY(kvm_lower_privilege_trap)
 580    KVM_REFLECT(34)
 581END(kvm_lower_privilege_trap)
 582
 583    .org kvm_ia64_ivt+0x5f00
 584//////////////////////////////////////////////////////////////////////
 585// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
 586ENTRY(kvm_taken_branch_trap)
 587    KVM_REFLECT(35)
 588END(kvm_taken_branch_trap)
 589
 590    .org kvm_ia64_ivt+0x6000
 591////////////////////////////////////////////////////////////////////
 592// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
 593ENTRY(kvm_single_step_trap)
 594    KVM_REFLECT(36)
 595END(kvm_single_step_trap)
 596    .global kvm_virtualization_fault_back
 597    .org kvm_ia64_ivt+0x6100
 598/////////////////////////////////////////////////////////////////////
 599// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
 600ENTRY(kvm_virtualization_fault)
 601    mov r31=pr
 602    adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
 603    ;;
 604    st8 [r16] = r1
 605    adds r17 = VMM_VCPU_GP_OFFSET, r21
 606    ;;
 607    ld8 r1 = [r17]
 608    cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
 609    cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
 610    cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
 611    cmp.eq p9,p0=EVENT_RSM,r24
 612    cmp.eq p10,p0=EVENT_SSM,r24
 613    cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
 614    cmp.eq p12,p0=EVENT_THASH,r24
 615    (p6) br.dptk.many kvm_asm_mov_from_ar
 616    (p7) br.dptk.many kvm_asm_mov_from_rr
 617    (p8) br.dptk.many kvm_asm_mov_to_rr
 618    (p9) br.dptk.many kvm_asm_rsm
 619    (p10) br.dptk.many kvm_asm_ssm
 620    (p11) br.dptk.many kvm_asm_mov_to_psr
 621    (p12) br.dptk.many kvm_asm_thash
 622    ;;
 623kvm_virtualization_fault_back:
 624    adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
 625    ;;
 626    ld8 r1 = [r16]
 627    ;;
 628    mov r19=37
 629    adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
 630    adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
 631    ;;
 632    st8 [r16] = r24
 633    st8 [r17] = r25
 634    ;;
 635    cmp.ne p6,p0=EVENT_RFI, r24
 636    (p6) br.sptk kvm_dispatch_virtualization_fault
 637    ;;
 638    adds r18=VMM_VPD_BASE_OFFSET,r21
 639    ;;
 640    ld8 r18=[r18]
 641    ;;
 642    adds r18=VMM_VPD_VIFS_OFFSET,r18
 643    ;;
 644    ld8 r18=[r18]
 645    ;;
 646    tbit.z p6,p0=r18,63
 647    (p6) br.sptk kvm_dispatch_virtualization_fault
 648    ;;
 649    //if vifs.v=1 desert current register frame
 650    alloc r18=ar.pfs,0,0,0,0
 651    br.sptk kvm_dispatch_virtualization_fault
 652END(kvm_virtualization_fault)
 653
 654    .org kvm_ia64_ivt+0x6200
 655//////////////////////////////////////////////////////////////
 656// 0x6200 Entry 38 (size 16 bundles) Reserved
 657    KVM_FAULT(38)
 658
 659    .org kvm_ia64_ivt+0x6300
 660/////////////////////////////////////////////////////////////////
 661// 0x6300 Entry 39 (size 16 bundles) Reserved
 662    KVM_FAULT(39)
 663
 664    .org kvm_ia64_ivt+0x6400
 665/////////////////////////////////////////////////////////////////
 666// 0x6400 Entry 40 (size 16 bundles) Reserved
 667    KVM_FAULT(40)
 668
 669    .org kvm_ia64_ivt+0x6500
 670//////////////////////////////////////////////////////////////////
 671// 0x6500 Entry 41 (size 16 bundles) Reserved
 672    KVM_FAULT(41)
 673
 674    .org kvm_ia64_ivt+0x6600
 675//////////////////////////////////////////////////////////////////
 676// 0x6600 Entry 42 (size 16 bundles) Reserved
 677    KVM_FAULT(42)
 678
 679    .org kvm_ia64_ivt+0x6700
 680//////////////////////////////////////////////////////////////////
 681// 0x6700 Entry 43 (size 16 bundles) Reserved
 682    KVM_FAULT(43)
 683
 684    .org kvm_ia64_ivt+0x6800
 685//////////////////////////////////////////////////////////////////
 686// 0x6800 Entry 44 (size 16 bundles) Reserved
 687    KVM_FAULT(44)
 688
 689    .org kvm_ia64_ivt+0x6900
 690///////////////////////////////////////////////////////////////////
 691// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
 692//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
 693ENTRY(kvm_ia32_exception)
 694    KVM_FAULT(45)
 695END(kvm_ia32_exception)
 696
 697    .org kvm_ia64_ivt+0x6a00
 698////////////////////////////////////////////////////////////////////
 699// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
 700ENTRY(kvm_ia32_intercept)
 701    KVM_FAULT(47)
 702END(kvm_ia32_intercept)
 703
 704    .org kvm_ia64_ivt+0x6c00
 705/////////////////////////////////////////////////////////////////////
 706// 0x6c00 Entry 48 (size 16 bundles) Reserved
 707    KVM_FAULT(48)
 708
 709    .org kvm_ia64_ivt+0x6d00
 710//////////////////////////////////////////////////////////////////////
 711// 0x6d00 Entry 49 (size 16 bundles) Reserved
 712    KVM_FAULT(49)
 713
 714    .org kvm_ia64_ivt+0x6e00
 715//////////////////////////////////////////////////////////////////////
 716// 0x6e00 Entry 50 (size 16 bundles) Reserved
 717    KVM_FAULT(50)
 718
 719    .org kvm_ia64_ivt+0x6f00
 720/////////////////////////////////////////////////////////////////////
 721// 0x6f00 Entry 51 (size 16 bundles) Reserved
 722    KVM_FAULT(52)
 723
 724    .org kvm_ia64_ivt+0x7100
 725////////////////////////////////////////////////////////////////////
 726// 0x7100 Entry 53 (size 16 bundles) Reserved
 727    KVM_FAULT(53)
 728
 729    .org kvm_ia64_ivt+0x7200
 730/////////////////////////////////////////////////////////////////////
 731// 0x7200 Entry 54 (size 16 bundles) Reserved
 732    KVM_FAULT(54)
 733
 734    .org kvm_ia64_ivt+0x7300
 735////////////////////////////////////////////////////////////////////
 736// 0x7300 Entry 55 (size 16 bundles) Reserved
 737    KVM_FAULT(55)
 738
 739    .org kvm_ia64_ivt+0x7400
 740////////////////////////////////////////////////////////////////////
 741// 0x7400 Entry 56 (size 16 bundles) Reserved
 742    KVM_FAULT(56)
 743
 744    .org kvm_ia64_ivt+0x7500
 745/////////////////////////////////////////////////////////////////////
 746// 0x7500 Entry 57 (size 16 bundles) Reserved
 747    KVM_FAULT(57)
 748
 749    .org kvm_ia64_ivt+0x7600
 750/////////////////////////////////////////////////////////////////////
 751// 0x7600 Entry 58 (size 16 bundles) Reserved
 752    KVM_FAULT(58)
 753
 754    .org kvm_ia64_ivt+0x7700
 755////////////////////////////////////////////////////////////////////
 756// 0x7700 Entry 59 (size 16 bundles) Reserved
 757    KVM_FAULT(59)
 758
 759    .org kvm_ia64_ivt+0x7800
 760////////////////////////////////////////////////////////////////////
 761// 0x7800 Entry 60 (size 16 bundles) Reserved
 762    KVM_FAULT(60)
 763
 764    .org kvm_ia64_ivt+0x7900
 765/////////////////////////////////////////////////////////////////////
 766// 0x7900 Entry 61 (size 16 bundles) Reserved
 767    KVM_FAULT(61)
 768
 769    .org kvm_ia64_ivt+0x7a00
 770/////////////////////////////////////////////////////////////////////
 771// 0x7a00 Entry 62 (size 16 bundles) Reserved
 772    KVM_FAULT(62)
 773
 774    .org kvm_ia64_ivt+0x7b00
 775/////////////////////////////////////////////////////////////////////
 776// 0x7b00 Entry 63 (size 16 bundles) Reserved
 777    KVM_FAULT(63)
 778
 779    .org kvm_ia64_ivt+0x7c00
 780////////////////////////////////////////////////////////////////////
 781// 0x7c00 Entry 64 (size 16 bundles) Reserved
 782    KVM_FAULT(64)
 783
 784    .org kvm_ia64_ivt+0x7d00
 785/////////////////////////////////////////////////////////////////////
 786// 0x7d00 Entry 65 (size 16 bundles) Reserved
 787    KVM_FAULT(65)
 788
 789    .org kvm_ia64_ivt+0x7e00
 790/////////////////////////////////////////////////////////////////////
 791// 0x7e00 Entry 66 (size 16 bundles) Reserved
 792    KVM_FAULT(66)
 793
 794    .org kvm_ia64_ivt+0x7f00
 795////////////////////////////////////////////////////////////////////
 796// 0x7f00 Entry 67 (size 16 bundles) Reserved
 797    KVM_FAULT(67)
 798
 799    .org kvm_ia64_ivt+0x8000
 800// There is no particular reason for this code to be here, other than that
 801// there happens to be space here that would go unused otherwise.  If this
 802// fault ever gets "unreserved", simply moved the following code to a more
 803// suitable spot...
 804
 805
 806ENTRY(kvm_dtlb_miss_dispatch)
 807    mov r19 = 2
 808    KVM_SAVE_MIN_WITH_COVER_R19
 809    alloc r14=ar.pfs,0,0,3,0
 810    mov out0=cr.ifa
 811    mov out1=r15
 812    adds r3=8,r2                // set up second base pointer
 813    ;;
 814    ssm psr.ic
 815    ;;
 816    srlz.i                  // guarantee that interruption collection is on
 817    ;;
 818    //(p15) ssm psr.i               // restore psr.i
 819    addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
 820    ;;
 821    KVM_SAVE_REST
 822    KVM_SAVE_EXTRA
 823    mov rp=r14
 824    ;;
 825    adds out2=16,r12
 826    br.call.sptk.many b6=kvm_page_fault
 827END(kvm_dtlb_miss_dispatch)
 828
 829ENTRY(kvm_itlb_miss_dispatch)
 830
 831    KVM_SAVE_MIN_WITH_COVER_R19
 832    alloc r14=ar.pfs,0,0,3,0
 833    mov out0=cr.ifa
 834    mov out1=r15
 835    adds r3=8,r2                // set up second base pointer
 836    ;;
 837    ssm psr.ic
 838    ;;
 839    srlz.i                  // guarantee that interruption collection is on
 840    ;;
 841    //(p15) ssm psr.i               // restore psr.i
 842    addl r14=@gprel(ia64_leave_hypervisor),gp
 843    ;;
 844    KVM_SAVE_REST
 845    mov rp=r14
 846    ;;
 847    adds out2=16,r12
 848    br.call.sptk.many b6=kvm_page_fault
 849END(kvm_itlb_miss_dispatch)
 850
 851ENTRY(kvm_dispatch_reflection)
 852    /*
 853     * Input:
 854     *  psr.ic: off
 855     *  r19:    intr type (offset into ivt, see ia64_int.h)
 856     *  r31:    contains saved predicates (pr)
 857     */
 858    KVM_SAVE_MIN_WITH_COVER_R19
 859    alloc r14=ar.pfs,0,0,5,0
 860    mov out0=cr.ifa
 861    mov out1=cr.isr
 862    mov out2=cr.iim
 863    mov out3=r15
 864    adds r3=8,r2                // set up second base pointer
 865    ;;
 866    ssm psr.ic
 867    ;;
 868    srlz.i                  // guarantee that interruption collection is on
 869    ;;
 870    //(p15) ssm psr.i               // restore psr.i
 871    addl r14=@gprel(ia64_leave_hypervisor),gp
 872    ;;
 873    KVM_SAVE_REST
 874    mov rp=r14
 875    ;;
 876    adds out4=16,r12
 877    br.call.sptk.many b6=reflect_interruption
 878END(kvm_dispatch_reflection)
 879
 880ENTRY(kvm_dispatch_virtualization_fault)
 881    adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
 882    adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
 883    ;;
 884    st8 [r16] = r24
 885    st8 [r17] = r25
 886    ;;
 887    KVM_SAVE_MIN_WITH_COVER_R19
 888    ;;
 889    alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
 890    mov out0=r13        //vcpu
 891    adds r3=8,r2                // set up second base pointer
 892    ;;
 893    ssm psr.ic
 894    ;;
 895    srlz.i                  // guarantee that interruption collection is on
 896    ;;
 897    //(p15) ssm psr.i               // restore psr.i
 898    addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
 899    ;;
 900    KVM_SAVE_REST
 901    KVM_SAVE_EXTRA
 902    mov rp=r14
 903    ;;
 904    adds out1=16,sp         //regs
 905    br.call.sptk.many b6=kvm_emulate
 906END(kvm_dispatch_virtualization_fault)
 907
 908
 909ENTRY(kvm_dispatch_interrupt)
 910    KVM_SAVE_MIN_WITH_COVER_R19 // uses r31; defines r2 and r3
 911    ;;
 912    alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
 913    //mov out0=cr.ivr           // pass cr.ivr as first arg
 914    adds r3=8,r2                // set up second base pointer for SAVE_REST
 915    ;;
 916    ssm psr.ic
 917    ;;
 918    srlz.i
 919    ;;
 920    //(p15) ssm psr.i
 921    addl r14=@gprel(ia64_leave_hypervisor),gp
 922    ;;
 923    KVM_SAVE_REST
 924    mov rp=r14
 925    ;;
 926    mov out0=r13                // pass pointer to pt_regs as second arg
 927    br.call.sptk.many b6=kvm_ia64_handle_irq
 928END(kvm_dispatch_interrupt)
 929
 930
 931
 932
 933GLOBAL_ENTRY(ia64_leave_nested)
 934        rsm psr.i
 935        ;;
 936        adds r21=PT(PR)+16,r12
 937        ;;
 938        lfetch [r21],PT(CR_IPSR)-PT(PR)
 939        adds r2=PT(B6)+16,r12
 940        adds r3=PT(R16)+16,r12
 941        ;;
 942        lfetch [r21]
 943        ld8 r28=[r2],8          // load b6
 944        adds r29=PT(R24)+16,r12
 945
 946        ld8.fill r16=[r3]
 947        adds r3=PT(AR_CSD)-PT(R16),r3
 948        adds r30=PT(AR_CCV)+16,r12
 949        ;;
 950        ld8.fill r24=[r29]
 951        ld8 r15=[r30]           // load ar.ccv
 952        ;;
 953        ld8 r29=[r2],16         // load b7
 954        ld8 r30=[r3],16         // load ar.csd
 955        ;;
 956        ld8 r31=[r2],16         // load ar.ssd
 957        ld8.fill r8=[r3],16
 958        ;;
 959        ld8.fill r9=[r2],16
 960        ld8.fill r10=[r3],PT(R17)-PT(R10)
 961        ;;
 962        ld8.fill r11=[r2],PT(R18)-PT(R11)
 963        ld8.fill r17=[r3],16
 964        ;;
 965        ld8.fill r18=[r2],16
 966        ld8.fill r19=[r3],16
 967        ;;
 968        ld8.fill r20=[r2],16
 969        ld8.fill r21=[r3],16
 970        mov ar.csd=r30
 971        mov ar.ssd=r31
 972        ;;
 973        rsm psr.i | psr.ic
 974        // initiate turning off of interrupt and interruption collection
 975        invala                  // invalidate ALAT
 976        ;;
 977        srlz.i
 978        ;;
 979        ld8.fill r22=[r2],24
 980        ld8.fill r23=[r3],24
 981        mov b6=r28
 982        ;;
 983        ld8.fill r25=[r2],16
 984        ld8.fill r26=[r3],16
 985        mov b7=r29
 986        ;;
 987        ld8.fill r27=[r2],16
 988        ld8.fill r28=[r3],16
 989        ;;
 990        ld8.fill r29=[r2],16
 991        ld8.fill r30=[r3],24
 992        ;;
 993        ld8.fill r31=[r2],PT(F9)-PT(R31)
 994        adds r3=PT(F10)-PT(F6),r3
 995        ;;
 996        ldf.fill f9=[r2],PT(F6)-PT(F9)
 997        ldf.fill f10=[r3],PT(F8)-PT(F10)
 998        ;;
 999        ldf.fill f6=[r2],PT(F7)-PT(F6)
1000        ;;
1001        ldf.fill f7=[r2],PT(F11)-PT(F7)
1002        ldf.fill f8=[r3],32
1003        ;;
1004        srlz.i                  // ensure interruption collection is off
1005        mov ar.ccv=r15
1006        ;;
1007        bsw.0   // switch back to bank 0 (no stop bit required beforehand...)
1008        ;;
1009        ldf.fill f11=[r2]
1010//      mov r18=r13
1011//    mov r21=r13
1012        adds r16=PT(CR_IPSR)+16,r12
1013        adds r17=PT(CR_IIP)+16,r12
1014        ;;
1015        ld8 r29=[r16],16        // load cr.ipsr
1016        ld8 r28=[r17],16        // load cr.iip
1017        ;;
1018        ld8 r30=[r16],16        // load cr.ifs
1019        ld8 r25=[r17],16        // load ar.unat
1020        ;;
1021        ld8 r26=[r16],16        // load ar.pfs
1022        ld8 r27=[r17],16        // load ar.rsc
1023        cmp.eq p9,p0=r0,r0
1024        // set p9 to indicate that we should restore cr.ifs
1025        ;;
1026        ld8 r24=[r16],16        // load ar.rnat (may be garbage)
1027        ld8 r23=[r17],16// load ar.bspstore (may be garbage)
1028        ;;
1029        ld8 r31=[r16],16        // load predicates
1030        ld8 r22=[r17],16        // load b0
1031        ;;
1032        ld8 r19=[r16],16        // load ar.rsc value for "loadrs"
1033        ld8.fill r1=[r17],16    // load r1
1034        ;;
1035        ld8.fill r12=[r16],16
1036        ld8.fill r13=[r17],16
1037        ;;
1038        ld8 r20=[r16],16        // ar.fpsr
1039        ld8.fill r15=[r17],16
1040        ;;
1041        ld8.fill r14=[r16],16
1042        ld8.fill r2=[r17]
1043        ;;
1044        ld8.fill r3=[r16]
1045        ;;
1046        mov r16=ar.bsp          // get existing backing store pointer
1047        ;;
1048        mov b0=r22
1049        mov ar.pfs=r26
1050        mov cr.ifs=r30
1051        mov cr.ipsr=r29
1052        mov ar.fpsr=r20
1053        mov cr.iip=r28
1054        ;;
1055        mov ar.rsc=r27
1056        mov ar.unat=r25
1057        mov pr=r31,-1
1058        rfi
1059END(ia64_leave_nested)
1060
1061
1062
1063GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
1064    /*
1065     * work.need_resched etc. mustn't get changed
1066     *by this CPU before it returns to
1067    ;;
1068     * user- or fsys-mode, hence we disable interrupts early on:
1069     */
1070    adds r2 = PT(R4)+16,r12
1071    adds r3 = PT(R5)+16,r12
1072    adds r8 = PT(EML_UNAT)+16,r12
1073    ;;
1074    ld8 r8 = [r8]
1075    ;;
1076    mov ar.unat=r8
1077    ;;
1078    ld8.fill r4=[r2],16    //load r4
1079    ld8.fill r5=[r3],16    //load r5
1080    ;;
1081    ld8.fill r6=[r2]    //load r6
1082    ld8.fill r7=[r3]    //load r7
1083    ;;
1084END(ia64_leave_hypervisor_prepare)
1085//fall through
1086GLOBAL_ENTRY(ia64_leave_hypervisor)
1087    rsm psr.i
1088    ;;
1089    br.call.sptk.many b0=leave_hypervisor_tail
1090    ;;
1091    adds r20=PT(PR)+16,r12
1092    adds r8=PT(EML_UNAT)+16,r12
1093    ;;
1094    ld8 r8=[r8]
1095    ;;
1096    mov ar.unat=r8
1097    ;;
1098    lfetch [r20],PT(CR_IPSR)-PT(PR)
1099    adds r2 = PT(B6)+16,r12
1100    adds r3 = PT(B7)+16,r12
1101    ;;
1102    lfetch [r20]
1103    ;;
1104    ld8 r24=[r2],16        /* B6 */
1105    ld8 r25=[r3],16        /* B7 */
1106    ;;
1107    ld8 r26=[r2],16        /* ar_csd */
1108    ld8 r27=[r3],16        /* ar_ssd */
1109    mov b6 = r24
1110    ;;
1111    ld8.fill r8=[r2],16
1112    ld8.fill r9=[r3],16
1113    mov b7 = r25
1114    ;;
1115    mov ar.csd = r26
1116    mov ar.ssd = r27
1117    ;;
1118    ld8.fill r10=[r2],PT(R15)-PT(R10)
1119    ld8.fill r11=[r3],PT(R14)-PT(R11)
1120    ;;
1121    ld8.fill r15=[r2],PT(R16)-PT(R15)
1122    ld8.fill r14=[r3],PT(R17)-PT(R14)
1123    ;;
1124    ld8.fill r16=[r2],16
1125    ld8.fill r17=[r3],16
1126    ;;
1127    ld8.fill r18=[r2],16
1128    ld8.fill r19=[r3],16
1129    ;;
1130    ld8.fill r20=[r2],16
1131    ld8.fill r21=[r3],16
1132    ;;
1133    ld8.fill r22=[r2],16
1134    ld8.fill r23=[r3],16
1135    ;;
1136    ld8.fill r24=[r2],16
1137    ld8.fill r25=[r3],16
1138    ;;
1139    ld8.fill r26=[r2],16
1140    ld8.fill r27=[r3],16
1141    ;;
1142    ld8.fill r28=[r2],16
1143    ld8.fill r29=[r3],16
1144    ;;
1145    ld8.fill r30=[r2],PT(F6)-PT(R30)
1146    ld8.fill r31=[r3],PT(F7)-PT(R31)
1147    ;;
1148    rsm psr.i | psr.ic
1149    // initiate turning off of interrupt and interruption collection
1150    invala          // invalidate ALAT
1151    ;;
1152    srlz.i          // ensure interruption collection is off
1153    ;;
1154    bsw.0
1155    ;;
1156    adds r16 = PT(CR_IPSR)+16,r12
1157    adds r17 = PT(CR_IIP)+16,r12
1158    mov r21=r13         // get current
1159    ;;
1160    ld8 r31=[r16],16    // load cr.ipsr
1161    ld8 r30=[r17],16    // load cr.iip
1162    ;;
1163    ld8 r29=[r16],16    // load cr.ifs
1164    ld8 r28=[r17],16    // load ar.unat
1165    ;;
1166    ld8 r27=[r16],16    // load ar.pfs
1167    ld8 r26=[r17],16    // load ar.rsc
1168    ;;
1169    ld8 r25=[r16],16    // load ar.rnat
1170    ld8 r24=[r17],16    // load ar.bspstore
1171    ;;
1172    ld8 r23=[r16],16    // load predicates
1173    ld8 r22=[r17],16    // load b0
1174    ;;
1175    ld8 r20=[r16],16    // load ar.rsc value for "loadrs"
1176    ld8.fill r1=[r17],16    //load r1
1177    ;;
1178    ld8.fill r12=[r16],16    //load r12
1179    ld8.fill r13=[r17],PT(R2)-PT(R13)    //load r13
1180    ;;
1181    ld8 r19=[r16],PT(R3)-PT(AR_FPSR)    //load ar_fpsr
1182    ld8.fill r2=[r17],PT(AR_CCV)-PT(R2)    //load r2
1183    ;;
1184    ld8.fill r3=[r16]   //load r3
1185    ld8 r18=[r17]       //load ar_ccv
1186    ;;
1187    mov ar.fpsr=r19
1188    mov ar.ccv=r18
1189    shr.u r18=r20,16
1190    ;;
1191kvm_rbs_switch:
1192    mov r19=96
1193
1194kvm_dont_preserve_current_frame:
1195/*
1196    * To prevent leaking bits between the hypervisor and guest domain,
1197    * we must clear the stacked registers in the "invalid" partition here.
1198    * 5 registers/cycle on McKinley).
1199    */
1200#   define pRecurse     p6
1201#   define pReturn      p7
1202#   define Nregs        14
1203
1204    alloc loc0=ar.pfs,2,Nregs-2,2,0
1205    shr.u loc1=r18,9            // RNaTslots <= floor(dirtySize / (64*8))
1206    sub r19=r19,r18             // r19 = (physStackedSize + 8) - dirtySize
1207    ;;
1208    mov ar.rsc=r20              // load ar.rsc to be used for "loadrs"
1209    shladd in0=loc1,3,r19
1210    mov in1=0
1211    ;;
1212    TEXT_ALIGN(32)
1213kvm_rse_clear_invalid:
1214    alloc loc0=ar.pfs,2,Nregs-2,2,0
1215    cmp.lt pRecurse,p0=Nregs*8,in0
1216    // if more than Nregs regs left to clear, (re)curse
1217    add out0=-Nregs*8,in0
1218    add out1=1,in1              // increment recursion count
1219    mov loc1=0
1220    mov loc2=0
1221    ;;
1222    mov loc3=0
1223    mov loc4=0
1224    mov loc5=0
1225    mov loc6=0
1226    mov loc7=0
1227(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
1228    ;;
1229    mov loc8=0
1230    mov loc9=0
1231    cmp.ne pReturn,p0=r0,in1
1232    // if recursion count != 0, we need to do a br.ret
1233    mov loc10=0
1234    mov loc11=0
1235(pReturn) br.ret.dptk.many b0
1236
1237#       undef pRecurse
1238#       undef pReturn
1239
1240// loadrs has already been shifted
1241    alloc r16=ar.pfs,0,0,0,0    // drop current register frame
1242    ;;
1243    loadrs
1244    ;;
1245    mov ar.bspstore=r24
1246    ;;
1247    mov ar.unat=r28
1248    mov ar.rnat=r25
1249    mov ar.rsc=r26
1250    ;;
1251    mov cr.ipsr=r31
1252    mov cr.iip=r30
1253    mov cr.ifs=r29
1254    mov ar.pfs=r27
1255    adds r18=VMM_VPD_BASE_OFFSET,r21
1256    ;;
1257    ld8 r18=[r18]   //vpd
1258    adds r17=VMM_VCPU_ISR_OFFSET,r21
1259    ;;
1260    ld8 r17=[r17]
1261    adds r19=VMM_VPD_VPSR_OFFSET,r18
1262    ;;
1263    ld8 r19=[r19]        //vpsr
1264    mov r25=r18
1265    adds r16= VMM_VCPU_GP_OFFSET,r21
1266    ;;
1267    ld8 r16= [r16] // Put gp in r24
1268    movl r24=@gprel(ia64_vmm_entry)  // calculate return address
1269    ;;
1270    add  r24=r24,r16
1271    ;;
1272    br.sptk.many  kvm_vps_sync_write       // call the service
1273    ;;
1274END(ia64_leave_hypervisor)
1275// fall through
1276GLOBAL_ENTRY(ia64_vmm_entry)
1277/*
1278 *  must be at bank 0
1279 *  parameter:
1280 *  r17:cr.isr
1281 *  r18:vpd
1282 *  r19:vpsr
1283 *  r22:b0
1284 *  r23:predicate
1285 */
1286    mov r24=r22
1287    mov r25=r18
1288    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
1289    (p1) br.cond.sptk.few kvm_vps_resume_normal
1290    (p2) br.cond.sptk.many kvm_vps_resume_handler
1291    ;;
1292END(ia64_vmm_entry)
1293
1294
1295
1296/*
1297 * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
1298 *                  u64 arg3, u64 arg4, u64 arg5,
1299 *                  u64 arg6, u64 arg7);
1300 *
1301 * XXX: The currently defined services use only 4 args at the max. The
1302 *  rest are not consumed.
1303 */
1304GLOBAL_ENTRY(ia64_call_vsa)
1305    .regstk 4,4,0,0
1306
1307rpsave  =   loc0
1308pfssave =   loc1
1309psrsave =   loc2
1310entry   =   loc3
1311hostret =   r24
1312
1313    alloc   pfssave=ar.pfs,4,4,0,0
1314    mov rpsave=rp
1315    adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
1316    ;;
1317    ld8 entry=[entry]
13181:  mov hostret=ip
1319    mov r25=in1         // copy arguments
1320    mov r26=in2
1321    mov r27=in3
1322    mov psrsave=psr
1323    ;;
1324    tbit.nz p6,p0=psrsave,14    // IA64_PSR_I
1325    tbit.nz p7,p0=psrsave,13    // IA64_PSR_IC
1326    ;;
1327    add hostret=2f-1b,hostret   // calculate return address
1328    add entry=entry,in0
1329    ;;
1330    rsm psr.i | psr.ic
1331    ;;
1332    srlz.i
1333    mov b6=entry
1334    br.cond.sptk b6         // call the service
13352:
1336    // Architectural sequence for enabling interrupts if necessary
1337(p7)    ssm psr.ic
1338    ;;
1339(p7)    srlz.i
1340    ;;
1341//(p6)    ssm psr.i
1342    ;;
1343    mov rp=rpsave
1344    mov ar.pfs=pfssave
1345    mov r8=r31
1346    ;;
1347    srlz.d
1348    br.ret.sptk rp
1349
1350END(ia64_call_vsa)
1351
1352#define  INIT_BSPSTORE  ((4<<30)-(12<<20)-0x100)
1353
1354GLOBAL_ENTRY(vmm_reset_entry)
1355    //set up ipsr, iip, vpd.vpsr, dcr
1356    // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
1357    // For DCR: all bits 0
1358    bsw.0
1359    ;;
1360    mov r21 =r13
1361    adds r14=-VMM_PT_REGS_SIZE, r12
1362    ;;
1363    movl r6=0x501008826000      // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
1364    movl r10=0x8000000000000000
1365    adds r16=PT(CR_IIP), r14
1366    adds r20=PT(R1), r14
1367    ;;
1368    rsm psr.ic | psr.i
1369    ;;
1370    srlz.i
1371    ;;
1372    mov ar.rsc = 0
1373    ;;
1374    flushrs
1375    ;;
1376    mov ar.bspstore = 0
1377    // clear BSPSTORE
1378    ;;
1379    mov cr.ipsr=r6
1380    mov cr.ifs=r10
1381    ld8 r4 = [r16] // Set init iip for first run.
1382    ld8 r1 = [r20]
1383    ;;
1384    mov cr.iip=r4
1385    adds r16=VMM_VPD_BASE_OFFSET,r13
1386    ;;
1387    ld8 r18=[r16]
1388    ;;
1389    adds r19=VMM_VPD_VPSR_OFFSET,r18
1390    ;;
1391    ld8 r19=[r19]
1392    mov r17=r0
1393    mov r22=r0
1394    mov r23=r0
1395    br.cond.sptk ia64_vmm_entry
1396    br.ret.sptk  b0
1397END(vmm_reset_entry)
1398
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.