1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/kvm_host.h>
22#include "kvm_cache_regs.h"
23#include "kvm_emulate.h"
24#include <linux/stringify.h>
25#include <asm/debugreg.h>
26#include <asm/nospec-branch.h>
27
28#include "x86.h"
29#include "tss.h"
30#include "mmu.h"
31#include "pmu.h"
32
33
34
35
36#define OpNone 0ull
37#define OpImplicit 1ull
38#define OpReg 2ull
39#define OpMem 3ull
40#define OpAcc 4ull
41#define OpDI 5ull
42#define OpMem64 6ull
43#define OpImmUByte 7ull
44#define OpDX 8ull
45#define OpCL 9ull
46#define OpImmByte 10ull
47#define OpOne 11ull
48#define OpImm 12ull
49#define OpMem16 13ull
50#define OpMem32 14ull
51#define OpImmU 15ull
52#define OpSI 16ull
53#define OpImmFAddr 17ull
54#define OpMemFAddr 18ull
55#define OpImmU16 19ull
56#define OpES 20ull
57#define OpCS 21ull
58#define OpSS 22ull
59#define OpDS 23ull
60#define OpFS 24ull
61#define OpGS 25ull
62#define OpMem8 26ull
63#define OpImm64 27ull
64#define OpXLat 28ull
65#define OpAccLo 29ull
66#define OpAccHi 30ull
67
68#define OpBits 5
69#define OpMask ((1ull << OpBits) - 1)
70
71
72
73
74
75
76
77
78
79
80
81#define ByteOp (1<<0)
82
83#define DstShift 1
84#define ImplicitOps (OpImplicit << DstShift)
85#define DstReg (OpReg << DstShift)
86#define DstMem (OpMem << DstShift)
87#define DstAcc (OpAcc << DstShift)
88#define DstDI (OpDI << DstShift)
89#define DstMem64 (OpMem64 << DstShift)
90#define DstMem16 (OpMem16 << DstShift)
91#define DstImmUByte (OpImmUByte << DstShift)
92#define DstDX (OpDX << DstShift)
93#define DstAccLo (OpAccLo << DstShift)
94#define DstMask (OpMask << DstShift)
95
96#define SrcShift 6
97#define SrcNone (OpNone << SrcShift)
98#define SrcReg (OpReg << SrcShift)
99#define SrcMem (OpMem << SrcShift)
100#define SrcMem16 (OpMem16 << SrcShift)
101#define SrcMem32 (OpMem32 << SrcShift)
102#define SrcImm (OpImm << SrcShift)
103#define SrcImmByte (OpImmByte << SrcShift)
104#define SrcOne (OpOne << SrcShift)
105#define SrcImmUByte (OpImmUByte << SrcShift)
106#define SrcImmU (OpImmU << SrcShift)
107#define SrcSI (OpSI << SrcShift)
108#define SrcXLat (OpXLat << SrcShift)
109#define SrcImmFAddr (OpImmFAddr << SrcShift)
110#define SrcMemFAddr (OpMemFAddr << SrcShift)
111#define SrcAcc (OpAcc << SrcShift)
112#define SrcImmU16 (OpImmU16 << SrcShift)
113#define SrcImm64 (OpImm64 << SrcShift)
114#define SrcDX (OpDX << SrcShift)
115#define SrcMem8 (OpMem8 << SrcShift)
116#define SrcAccHi (OpAccHi << SrcShift)
117#define SrcMask (OpMask << SrcShift)
118#define BitOp (1<<11)
119#define MemAbs (1<<12)
120#define String (1<<13)
121#define Stack (1<<14)
122#define GroupMask (7<<15)
123#define Group (1<<15)
124#define GroupDual (2<<15)
125#define Prefix (3<<15)
126#define RMExt (4<<15)
127#define Escape (5<<15)
128#define InstrDual (6<<15)
129#define ModeDual (7<<15)
130#define Sse (1<<18)
131
132#define ModRM (1<<19)
133
134#define Mov (1<<20)
135
136#define Prot (1<<21)
137#define EmulateOnUD (1<<22)
138#define NoAccess (1<<23)
139#define Op3264 (1<<24)
140#define Undefined (1<<25)
141#define Lock (1<<26)
142#define Priv (1<<27)
143#define No64 (1<<28)
144#define PageTable (1 << 29)
145#define NotImpl (1 << 30)
146
147#define Src2Shift (31)
148#define Src2None (OpNone << Src2Shift)
149#define Src2Mem (OpMem << Src2Shift)
150#define Src2CL (OpCL << Src2Shift)
151#define Src2ImmByte (OpImmByte << Src2Shift)
152#define Src2One (OpOne << Src2Shift)
153#define Src2Imm (OpImm << Src2Shift)
154#define Src2ES (OpES << Src2Shift)
155#define Src2CS (OpCS << Src2Shift)
156#define Src2SS (OpSS << Src2Shift)
157#define Src2DS (OpDS << Src2Shift)
158#define Src2FS (OpFS << Src2Shift)
159#define Src2GS (OpGS << Src2Shift)
160#define Src2Mask (OpMask << Src2Shift)
161#define Mmx ((u64)1 << 40)
162#define AlignMask ((u64)7 << 41)
163#define Aligned ((u64)1 << 41)
164#define Unaligned ((u64)2 << 41)
165#define Avx ((u64)3 << 41)
166#define Aligned16 ((u64)4 << 41)
167#define Fastop ((u64)1 << 44)
168#define NoWrite ((u64)1 << 45)
169#define SrcWrite ((u64)1 << 46)
170#define NoMod ((u64)1 << 47)
171#define Intercept ((u64)1 << 48)
172#define CheckPerm ((u64)1 << 49)
173#define PrivUD ((u64)1 << 51)
174#define NearBranch ((u64)1 << 52)
175#define No16 ((u64)1 << 53)
176#define IncSP ((u64)1 << 54)
177#define TwoMemOp ((u64)1 << 55)
178
179#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
180
181#define X2(x...) x, x
182#define X3(x...) X2(x), x
183#define X4(x...) X2(x), X2(x)
184#define X5(x...) X4(x), x
185#define X6(x...) X4(x), X2(x)
186#define X7(x...) X4(x), X3(x)
187#define X8(x...) X4(x), X4(x)
188#define X16(x...) X8(x), X8(x)
189
190#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
191#define FASTOP_SIZE 8
192
193struct opcode {
194 u64 flags : 56;
195 u64 intercept : 8;
196 union {
197 int (*execute)(struct x86_emulate_ctxt *ctxt);
198 const struct opcode *group;
199 const struct group_dual *gdual;
200 const struct gprefix *gprefix;
201 const struct escape *esc;
202 const struct instr_dual *idual;
203 const struct mode_dual *mdual;
204 void (*fastop)(struct fastop *fake);
205 } u;
206 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
207};
208
209struct group_dual {
210 struct opcode mod012[8];
211 struct opcode mod3[8];
212};
213
214struct gprefix {
215 struct opcode pfx_no;
216 struct opcode pfx_66;
217 struct opcode pfx_f2;
218 struct opcode pfx_f3;
219};
220
221struct escape {
222 struct opcode op[8];
223 struct opcode high[64];
224};
225
226struct instr_dual {
227 struct opcode mod012;
228 struct opcode mod3;
229};
230
231struct mode_dual {
232 struct opcode mode32;
233 struct opcode mode64;
234};
235
236#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
237
238enum x86_transfer_type {
239 X86_TRANSFER_NONE,
240 X86_TRANSFER_CALL_JMP,
241 X86_TRANSFER_RET,
242 X86_TRANSFER_TASK_SWITCH,
243};
244
245static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
246{
247 if (!(ctxt->regs_valid & (1 << nr))) {
248 ctxt->regs_valid |= 1 << nr;
249 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
250 }
251 return ctxt->_regs[nr];
252}
253
254static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
255{
256 ctxt->regs_valid |= 1 << nr;
257 ctxt->regs_dirty |= 1 << nr;
258 return &ctxt->_regs[nr];
259}
260
261static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
262{
263 reg_read(ctxt, nr);
264 return reg_write(ctxt, nr);
265}
266
267static void writeback_registers(struct x86_emulate_ctxt *ctxt)
268{
269 unsigned reg;
270
271 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
272 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
273}
274
275static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
276{
277 ctxt->regs_dirty = 0;
278 ctxt->regs_valid = 0;
279}
280
281
282
283
284
285#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
286 X86_EFLAGS_PF|X86_EFLAGS_CF)
287
288#ifdef CONFIG_X86_64
289#define ON64(x) x
290#else
291#define ON64(x)
292#endif
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
308
309#define __FOP_FUNC(name) \
310 ".align " __stringify(FASTOP_SIZE) " \n\t" \
311 ".type " name ", @function \n\t" \
312 name ":\n\t"
313
314#define FOP_FUNC(name) \
315 __FOP_FUNC(#name)
316
317#define __FOP_RET(name) \
318 "ret \n\t" \
319 ".size " name ", .-" name "\n\t"
320
321#define FOP_RET(name) \
322 __FOP_RET(#name)
323
324#define FOP_START(op) \
325 extern void em_##op(struct fastop *fake); \
326 asm(".pushsection .text, \"ax\" \n\t" \
327 ".global em_" #op " \n\t" \
328 ".align " __stringify(FASTOP_SIZE) " \n\t" \
329 "em_" #op ":\n\t"
330
331#define FOP_END \
332 ".popsection")
333
334#define __FOPNOP(name) \
335 __FOP_FUNC(name) \
336 __FOP_RET(name)
337
338#define FOPNOP() \
339 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
340
341#define FOP1E(op, dst) \
342 __FOP_FUNC(#op "_" #dst) \
343 "10: " #op " %" #dst " \n\t" \
344 __FOP_RET(#op "_" #dst)
345
346#define FOP1EEX(op, dst) \
347 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
348
349#define FASTOP1(op) \
350 FOP_START(op) \
351 FOP1E(op##b, al) \
352 FOP1E(op##w, ax) \
353 FOP1E(op##l, eax) \
354 ON64(FOP1E(op##q, rax)) \
355 FOP_END
356
357
358#define FASTOP1SRC2(op, name) \
359 FOP_START(name) \
360 FOP1E(op, cl) \
361 FOP1E(op, cx) \
362 FOP1E(op, ecx) \
363 ON64(FOP1E(op, rcx)) \
364 FOP_END
365
366
367#define FASTOP1SRC2EX(op, name) \
368 FOP_START(name) \
369 FOP1EEX(op, cl) \
370 FOP1EEX(op, cx) \
371 FOP1EEX(op, ecx) \
372 ON64(FOP1EEX(op, rcx)) \
373 FOP_END
374
375#define FOP2E(op, dst, src) \
376 __FOP_FUNC(#op "_" #dst "_" #src) \
377 #op " %" #src ", %" #dst " \n\t" \
378 __FOP_RET(#op "_" #dst "_" #src)
379
380#define FASTOP2(op) \
381 FOP_START(op) \
382 FOP2E(op##b, al, dl) \
383 FOP2E(op##w, ax, dx) \
384 FOP2E(op##l, eax, edx) \
385 ON64(FOP2E(op##q, rax, rdx)) \
386 FOP_END
387
388
389#define FASTOP2W(op) \
390 FOP_START(op) \
391 FOPNOP() \
392 FOP2E(op##w, ax, dx) \
393 FOP2E(op##l, eax, edx) \
394 ON64(FOP2E(op##q, rax, rdx)) \
395 FOP_END
396
397
398#define FASTOP2CL(op) \
399 FOP_START(op) \
400 FOP2E(op##b, al, cl) \
401 FOP2E(op##w, ax, cl) \
402 FOP2E(op##l, eax, cl) \
403 ON64(FOP2E(op##q, rax, cl)) \
404 FOP_END
405
406
407#define FASTOP2R(op, name) \
408 FOP_START(name) \
409 FOP2E(op##b, dl, al) \
410 FOP2E(op##w, dx, ax) \
411 FOP2E(op##l, edx, eax) \
412 ON64(FOP2E(op##q, rdx, rax)) \
413 FOP_END
414
415#define FOP3E(op, dst, src, src2) \
416 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
417 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
418 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
419
420
421#define FASTOP3WCL(op) \
422 FOP_START(op) \
423 FOPNOP() \
424 FOP3E(op##w, ax, dx, cl) \
425 FOP3E(op##l, eax, edx, cl) \
426 ON64(FOP3E(op##q, rax, rdx, cl)) \
427 FOP_END
428
429
430#define FOP_SETCC(op) \
431 ".align 4 \n\t" \
432 ".type " #op ", @function \n\t" \
433 #op ": \n\t" \
434 #op " %al \n\t" \
435 __FOP_RET(#op)
436
437asm(".pushsection .fixup, \"ax\"\n"
438 ".global kvm_fastop_exception \n"
439 "kvm_fastop_exception: xor %esi, %esi; ret\n"
440 ".popsection");
441
442FOP_START(setcc)
443FOP_SETCC(seto)
444FOP_SETCC(setno)
445FOP_SETCC(setc)
446FOP_SETCC(setnc)
447FOP_SETCC(setz)
448FOP_SETCC(setnz)
449FOP_SETCC(setbe)
450FOP_SETCC(setnbe)
451FOP_SETCC(sets)
452FOP_SETCC(setns)
453FOP_SETCC(setp)
454FOP_SETCC(setnp)
455FOP_SETCC(setl)
456FOP_SETCC(setnl)
457FOP_SETCC(setle)
458FOP_SETCC(setnle)
459FOP_END;
460
461FOP_START(salc)
462FOP_FUNC(salc)
463"pushf; sbb %al, %al; popf \n\t"
464FOP_RET(salc)
465FOP_END;
466
467
468
469
470
471#define asm_safe(insn, inoutclob...) \
472({ \
473 int _fault = 0; \
474 \
475 asm volatile("1:" insn "\n" \
476 "2:\n" \
477 ".pushsection .fixup, \"ax\"\n" \
478 "3: movl $1, %[_fault]\n" \
479 " jmp 2b\n" \
480 ".popsection\n" \
481 _ASM_EXTABLE(1b, 3b) \
482 : [_fault] "+qm"(_fault) inoutclob ); \
483 \
484 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
485})
486
487static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
488 enum x86_intercept intercept,
489 enum x86_intercept_stage stage)
490{
491 struct x86_instruction_info info = {
492 .intercept = intercept,
493 .rep_prefix = ctxt->rep_prefix,
494 .modrm_mod = ctxt->modrm_mod,
495 .modrm_reg = ctxt->modrm_reg,
496 .modrm_rm = ctxt->modrm_rm,
497 .src_val = ctxt->src.val64,
498 .dst_val = ctxt->dst.val64,
499 .src_bytes = ctxt->src.bytes,
500 .dst_bytes = ctxt->dst.bytes,
501 .ad_bytes = ctxt->ad_bytes,
502 .next_rip = ctxt->eip,
503 };
504
505 return ctxt->ops->intercept(ctxt, &info, stage);
506}
507
508static void assign_masked(ulong *dest, ulong src, ulong mask)
509{
510 *dest = (*dest & ~mask) | (src & mask);
511}
512
513static void assign_register(unsigned long *reg, u64 val, int bytes)
514{
515
516 switch (bytes) {
517 case 1:
518 *(u8 *)reg = (u8)val;
519 break;
520 case 2:
521 *(u16 *)reg = (u16)val;
522 break;
523 case 4:
524 *reg = (u32)val;
525 break;
526 case 8:
527 *reg = val;
528 break;
529 }
530}
531
532static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
533{
534 return (1UL << (ctxt->ad_bytes << 3)) - 1;
535}
536
537static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
538{
539 u16 sel;
540 struct desc_struct ss;
541
542 if (ctxt->mode == X86EMUL_MODE_PROT64)
543 return ~0UL;
544 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
545 return ~0U >> ((ss.d ^ 1) * 16);
546}
547
548static int stack_size(struct x86_emulate_ctxt *ctxt)
549{
550 return (__fls(stack_mask(ctxt)) + 1) >> 3;
551}
552
553
554static inline unsigned long
555address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
556{
557 if (ctxt->ad_bytes == sizeof(unsigned long))
558 return reg;
559 else
560 return reg & ad_mask(ctxt);
561}
562
563static inline unsigned long
564register_address(struct x86_emulate_ctxt *ctxt, int reg)
565{
566 return address_mask(ctxt, reg_read(ctxt, reg));
567}
568
569static void masked_increment(ulong *reg, ulong mask, int inc)
570{
571 assign_masked(reg, *reg + inc, mask);
572}
573
574static inline void
575register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
576{
577 ulong *preg = reg_rmw(ctxt, reg);
578
579 assign_register(preg, *preg + inc, ctxt->ad_bytes);
580}
581
582static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
583{
584 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
585}
586
587static u32 desc_limit_scaled(struct desc_struct *desc)
588{
589 u32 limit = get_desc_limit(desc);
590
591 return desc->g ? (limit << 12) | 0xfff : limit;
592}
593
594static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
595{
596 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
597 return 0;
598
599 return ctxt->ops->get_cached_segment_base(ctxt, seg);
600}
601
602static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
603 u32 error, bool valid)
604{
605 WARN_ON(vec > 0x1f);
606 ctxt->exception.vector = vec;
607 ctxt->exception.error_code = error;
608 ctxt->exception.error_code_valid = valid;
609 return X86EMUL_PROPAGATE_FAULT;
610}
611
612static int emulate_db(struct x86_emulate_ctxt *ctxt)
613{
614 return emulate_exception(ctxt, DB_VECTOR, 0, false);
615}
616
617static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
618{
619 return emulate_exception(ctxt, GP_VECTOR, err, true);
620}
621
622static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
623{
624 return emulate_exception(ctxt, SS_VECTOR, err, true);
625}
626
627static int emulate_ud(struct x86_emulate_ctxt *ctxt)
628{
629 return emulate_exception(ctxt, UD_VECTOR, 0, false);
630}
631
632static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
633{
634 return emulate_exception(ctxt, TS_VECTOR, err, true);
635}
636
637static int emulate_de(struct x86_emulate_ctxt *ctxt)
638{
639 return emulate_exception(ctxt, DE_VECTOR, 0, false);
640}
641
642static int emulate_nm(struct x86_emulate_ctxt *ctxt)
643{
644 return emulate_exception(ctxt, NM_VECTOR, 0, false);
645}
646
647static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
648{
649 u16 selector;
650 struct desc_struct desc;
651
652 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
653 return selector;
654}
655
656static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
657 unsigned seg)
658{
659 u16 dummy;
660 u32 base3;
661 struct desc_struct desc;
662
663 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
664 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
665}
666
667static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
668{
669 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
670}
671
672static inline bool emul_is_noncanonical_address(u64 la,
673 struct x86_emulate_ctxt *ctxt)
674{
675 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
676}
677
678
679
680
681
682
683
684
685
686
687static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
688{
689 u64 alignment = ctxt->d & AlignMask;
690
691 if (likely(size < 16))
692 return 1;
693
694 switch (alignment) {
695 case Unaligned:
696 case Avx:
697 return 1;
698 case Aligned16:
699 return 16;
700 case Aligned:
701 default:
702 return size;
703 }
704}
705
706static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
707 struct segmented_address addr,
708 unsigned *max_size, unsigned size,
709 bool write, bool fetch,
710 enum x86emul_mode mode, ulong *linear)
711{
712 struct desc_struct desc;
713 bool usable;
714 ulong la;
715 u32 lim;
716 u16 sel;
717 u8 va_bits;
718
719 la = seg_base(ctxt, addr.seg) + addr.ea;
720 *max_size = 0;
721 switch (mode) {
722 case X86EMUL_MODE_PROT64:
723 *linear = la;
724 va_bits = ctxt_virt_addr_bits(ctxt);
725 if (get_canonical(la, va_bits) != la)
726 goto bad;
727
728 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
729 if (size > *max_size)
730 goto bad;
731 break;
732 default:
733 *linear = la = (u32)la;
734 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
735 addr.seg);
736 if (!usable)
737 goto bad;
738
739 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
740 || !(desc.type & 2)) && write)
741 goto bad;
742
743 if (!fetch && (desc.type & 8) && !(desc.type & 2))
744 goto bad;
745 lim = desc_limit_scaled(&desc);
746 if (!(desc.type & 8) && (desc.type & 4)) {
747
748 if (addr.ea <= lim)
749 goto bad;
750 lim = desc.d ? 0xffffffff : 0xffff;
751 }
752 if (addr.ea > lim)
753 goto bad;
754 if (lim == 0xffffffff)
755 *max_size = ~0u;
756 else {
757 *max_size = (u64)lim + 1 - addr.ea;
758 if (size > *max_size)
759 goto bad;
760 }
761 break;
762 }
763 if (la & (insn_alignment(ctxt, size) - 1))
764 return emulate_gp(ctxt, 0);
765 return X86EMUL_CONTINUE;
766bad:
767 if (addr.seg == VCPU_SREG_SS)
768 return emulate_ss(ctxt, 0);
769 else
770 return emulate_gp(ctxt, 0);
771}
772
773static int linearize(struct x86_emulate_ctxt *ctxt,
774 struct segmented_address addr,
775 unsigned size, bool write,
776 ulong *linear)
777{
778 unsigned max_size;
779 return __linearize(ctxt, addr, &max_size, size, write, false,
780 ctxt->mode, linear);
781}
782
783static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
784 enum x86emul_mode mode)
785{
786 ulong linear;
787 int rc;
788 unsigned max_size;
789 struct segmented_address addr = { .seg = VCPU_SREG_CS,
790 .ea = dst };
791
792 if (ctxt->op_bytes != sizeof(unsigned long))
793 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
794 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
795 if (rc == X86EMUL_CONTINUE)
796 ctxt->_eip = addr.ea;
797 return rc;
798}
799
800static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
801{
802 return assign_eip(ctxt, dst, ctxt->mode);
803}
804
805static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
806 const struct desc_struct *cs_desc)
807{
808 enum x86emul_mode mode = ctxt->mode;
809 int rc;
810
811#ifdef CONFIG_X86_64
812 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
813 if (cs_desc->l) {
814 u64 efer = 0;
815
816 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
817 if (efer & EFER_LMA)
818 mode = X86EMUL_MODE_PROT64;
819 } else
820 mode = X86EMUL_MODE_PROT32;
821 }
822#endif
823 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
824 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
825 rc = assign_eip(ctxt, dst, mode);
826 if (rc == X86EMUL_CONTINUE)
827 ctxt->mode = mode;
828 return rc;
829}
830
831static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
832{
833 return assign_eip_near(ctxt, ctxt->_eip + rel);
834}
835
836static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
837 void *data, unsigned size)
838{
839 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
840}
841
842static int linear_write_system(struct x86_emulate_ctxt *ctxt,
843 ulong linear, void *data,
844 unsigned int size)
845{
846 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
847}
848
849static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
850 struct segmented_address addr,
851 void *data,
852 unsigned size)
853{
854 int rc;
855 ulong linear;
856
857 rc = linearize(ctxt, addr, size, false, &linear);
858 if (rc != X86EMUL_CONTINUE)
859 return rc;
860 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
861}
862
863static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
864 struct segmented_address addr,
865 void *data,
866 unsigned int size)
867{
868 int rc;
869 ulong linear;
870
871 rc = linearize(ctxt, addr, size, true, &linear);
872 if (rc != X86EMUL_CONTINUE)
873 return rc;
874 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
875}
876
877
878
879
880
881static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
882{
883 int rc;
884 unsigned size, max_size;
885 unsigned long linear;
886 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
887 struct segmented_address addr = { .seg = VCPU_SREG_CS,
888 .ea = ctxt->eip + cur_size };
889
890
891
892
893
894
895
896
897
898
899
900 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
901 &linear);
902 if (unlikely(rc != X86EMUL_CONTINUE))
903 return rc;
904
905 size = min_t(unsigned, 15UL ^ cur_size, max_size);
906 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
907
908
909
910
911
912
913
914 if (unlikely(size < op_size))
915 return emulate_gp(ctxt, 0);
916
917 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
918 size, &ctxt->exception);
919 if (unlikely(rc != X86EMUL_CONTINUE))
920 return rc;
921 ctxt->fetch.end += size;
922 return X86EMUL_CONTINUE;
923}
924
925static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
926 unsigned size)
927{
928 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
929
930 if (unlikely(done_size < size))
931 return __do_insn_fetch_bytes(ctxt, size - done_size);
932 else
933 return X86EMUL_CONTINUE;
934}
935
936
937#define insn_fetch(_type, _ctxt) \
938({ _type _x; \
939 \
940 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
941 if (rc != X86EMUL_CONTINUE) \
942 goto done; \
943 ctxt->_eip += sizeof(_type); \
944 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
945 ctxt->fetch.ptr += sizeof(_type); \
946 _x; \
947})
948
949#define insn_fetch_arr(_arr, _size, _ctxt) \
950({ \
951 rc = do_insn_fetch_bytes(_ctxt, _size); \
952 if (rc != X86EMUL_CONTINUE) \
953 goto done; \
954 ctxt->_eip += (_size); \
955 memcpy(_arr, ctxt->fetch.ptr, _size); \
956 ctxt->fetch.ptr += (_size); \
957})
958
959
960
961
962
963
964static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
965 int byteop)
966{
967 void *p;
968 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
969
970 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
971 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
972 else
973 p = reg_rmw(ctxt, modrm_reg);
974 return p;
975}
976
977static int read_descriptor(struct x86_emulate_ctxt *ctxt,
978 struct segmented_address addr,
979 u16 *size, unsigned long *address, int op_bytes)
980{
981 int rc;
982
983 if (op_bytes == 2)
984 op_bytes = 3;
985 *address = 0;
986 rc = segmented_read_std(ctxt, addr, size, 2);
987 if (rc != X86EMUL_CONTINUE)
988 return rc;
989 addr.ea += 2;
990 rc = segmented_read_std(ctxt, addr, address, op_bytes);
991 return rc;
992}
993
994FASTOP2(add);
995FASTOP2(or);
996FASTOP2(adc);
997FASTOP2(sbb);
998FASTOP2(and);
999FASTOP2(sub);
1000FASTOP2(xor);
1001FASTOP2(cmp);
1002FASTOP2(test);
1003
1004FASTOP1SRC2(mul, mul_ex);
1005FASTOP1SRC2(imul, imul_ex);
1006FASTOP1SRC2EX(div, div_ex);
1007FASTOP1SRC2EX(idiv, idiv_ex);
1008
1009FASTOP3WCL(shld);
1010FASTOP3WCL(shrd);
1011
1012FASTOP2W(imul);
1013
1014FASTOP1(not);
1015FASTOP1(neg);
1016FASTOP1(inc);
1017FASTOP1(dec);
1018
1019FASTOP2CL(rol);
1020FASTOP2CL(ror);
1021FASTOP2CL(rcl);
1022FASTOP2CL(rcr);
1023FASTOP2CL(shl);
1024FASTOP2CL(shr);
1025FASTOP2CL(sar);
1026
1027FASTOP2W(bsf);
1028FASTOP2W(bsr);
1029FASTOP2W(bt);
1030FASTOP2W(bts);
1031FASTOP2W(btr);
1032FASTOP2W(btc);
1033
1034FASTOP2(xadd);
1035
1036FASTOP2R(cmp, cmp_r);
1037
1038static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1039{
1040
1041 if (ctxt->src.val == 0)
1042 ctxt->dst.type = OP_NONE;
1043 return fastop(ctxt, em_bsf);
1044}
1045
1046static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1047{
1048
1049 if (ctxt->src.val == 0)
1050 ctxt->dst.type = OP_NONE;
1051 return fastop(ctxt, em_bsr);
1052}
1053
1054static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1055{
1056 u8 rc;
1057 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
1058
1059 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1060 asm("push %[flags]; popf; " CALL_NOSPEC
1061 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1062 return rc;
1063}
1064
1065static void fetch_register_operand(struct operand *op)
1066{
1067 switch (op->bytes) {
1068 case 1:
1069 op->val = *(u8 *)op->addr.reg;
1070 break;
1071 case 2:
1072 op->val = *(u16 *)op->addr.reg;
1073 break;
1074 case 4:
1075 op->val = *(u32 *)op->addr.reg;
1076 break;
1077 case 8:
1078 op->val = *(u64 *)op->addr.reg;
1079 break;
1080 }
1081}
1082
1083static int em_fninit(struct x86_emulate_ctxt *ctxt)
1084{
1085 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1086 return emulate_nm(ctxt);
1087
1088 kvm_fpu_get();
1089 asm volatile("fninit");
1090 kvm_fpu_put();
1091 return X86EMUL_CONTINUE;
1092}
1093
1094static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1095{
1096 u16 fcw;
1097
1098 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1099 return emulate_nm(ctxt);
1100
1101 kvm_fpu_get();
1102 asm volatile("fnstcw %0": "+m"(fcw));
1103 kvm_fpu_put();
1104
1105 ctxt->dst.val = fcw;
1106
1107 return X86EMUL_CONTINUE;
1108}
1109
1110static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1111{
1112 u16 fsw;
1113
1114 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1115 return emulate_nm(ctxt);
1116
1117 kvm_fpu_get();
1118 asm volatile("fnstsw %0": "+m"(fsw));
1119 kvm_fpu_put();
1120
1121 ctxt->dst.val = fsw;
1122
1123 return X86EMUL_CONTINUE;
1124}
1125
1126static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1127 struct operand *op)
1128{
1129 unsigned reg = ctxt->modrm_reg;
1130
1131 if (!(ctxt->d & ModRM))
1132 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1133
1134 if (ctxt->d & Sse) {
1135 op->type = OP_XMM;
1136 op->bytes = 16;
1137 op->addr.xmm = reg;
1138 kvm_read_sse_reg(reg, &op->vec_val);
1139 return;
1140 }
1141 if (ctxt->d & Mmx) {
1142 reg &= 7;
1143 op->type = OP_MM;
1144 op->bytes = 8;
1145 op->addr.mm = reg;
1146 return;
1147 }
1148
1149 op->type = OP_REG;
1150 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1151 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1152
1153 fetch_register_operand(op);
1154 op->orig_val = op->val;
1155}
1156
1157static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1158{
1159 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1160 ctxt->modrm_seg = VCPU_SREG_SS;
1161}
1162
1163static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1164 struct operand *op)
1165{
1166 u8 sib;
1167 int index_reg, base_reg, scale;
1168 int rc = X86EMUL_CONTINUE;
1169 ulong modrm_ea = 0;
1170
1171 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8);
1172 index_reg = (ctxt->rex_prefix << 2) & 8;
1173 base_reg = (ctxt->rex_prefix << 3) & 8;
1174
1175 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1176 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1177 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1178 ctxt->modrm_seg = VCPU_SREG_DS;
1179
1180 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1181 op->type = OP_REG;
1182 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1183 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1184 ctxt->d & ByteOp);
1185 if (ctxt->d & Sse) {
1186 op->type = OP_XMM;
1187 op->bytes = 16;
1188 op->addr.xmm = ctxt->modrm_rm;
1189 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1190 return rc;
1191 }
1192 if (ctxt->d & Mmx) {
1193 op->type = OP_MM;
1194 op->bytes = 8;
1195 op->addr.mm = ctxt->modrm_rm & 7;
1196 return rc;
1197 }
1198 fetch_register_operand(op);
1199 return rc;
1200 }
1201
1202 op->type = OP_MEM;
1203
1204 if (ctxt->ad_bytes == 2) {
1205 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1206 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1207 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1208 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1209
1210
1211 switch (ctxt->modrm_mod) {
1212 case 0:
1213 if (ctxt->modrm_rm == 6)
1214 modrm_ea += insn_fetch(u16, ctxt);
1215 break;
1216 case 1:
1217 modrm_ea += insn_fetch(s8, ctxt);
1218 break;
1219 case 2:
1220 modrm_ea += insn_fetch(u16, ctxt);
1221 break;
1222 }
1223 switch (ctxt->modrm_rm) {
1224 case 0:
1225 modrm_ea += bx + si;
1226 break;
1227 case 1:
1228 modrm_ea += bx + di;
1229 break;
1230 case 2:
1231 modrm_ea += bp + si;
1232 break;
1233 case 3:
1234 modrm_ea += bp + di;
1235 break;
1236 case 4:
1237 modrm_ea += si;
1238 break;
1239 case 5:
1240 modrm_ea += di;
1241 break;
1242 case 6:
1243 if (ctxt->modrm_mod != 0)
1244 modrm_ea += bp;
1245 break;
1246 case 7:
1247 modrm_ea += bx;
1248 break;
1249 }
1250 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1251 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1252 ctxt->modrm_seg = VCPU_SREG_SS;
1253 modrm_ea = (u16)modrm_ea;
1254 } else {
1255
1256 if ((ctxt->modrm_rm & 7) == 4) {
1257 sib = insn_fetch(u8, ctxt);
1258 index_reg |= (sib >> 3) & 7;
1259 base_reg |= sib & 7;
1260 scale = sib >> 6;
1261
1262 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1263 modrm_ea += insn_fetch(s32, ctxt);
1264 else {
1265 modrm_ea += reg_read(ctxt, base_reg);
1266 adjust_modrm_seg(ctxt, base_reg);
1267
1268 if ((ctxt->d & IncSP) &&
1269 base_reg == VCPU_REGS_RSP)
1270 modrm_ea += ctxt->op_bytes;
1271 }
1272 if (index_reg != 4)
1273 modrm_ea += reg_read(ctxt, index_reg) << scale;
1274 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1275 modrm_ea += insn_fetch(s32, ctxt);
1276 if (ctxt->mode == X86EMUL_MODE_PROT64)
1277 ctxt->rip_relative = 1;
1278 } else {
1279 base_reg = ctxt->modrm_rm;
1280 modrm_ea += reg_read(ctxt, base_reg);
1281 adjust_modrm_seg(ctxt, base_reg);
1282 }
1283 switch (ctxt->modrm_mod) {
1284 case 1:
1285 modrm_ea += insn_fetch(s8, ctxt);
1286 break;
1287 case 2:
1288 modrm_ea += insn_fetch(s32, ctxt);
1289 break;
1290 }
1291 }
1292 op->addr.mem.ea = modrm_ea;
1293 if (ctxt->ad_bytes != 8)
1294 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1295
1296done:
1297 return rc;
1298}
1299
1300static int decode_abs(struct x86_emulate_ctxt *ctxt,
1301 struct operand *op)
1302{
1303 int rc = X86EMUL_CONTINUE;
1304
1305 op->type = OP_MEM;
1306 switch (ctxt->ad_bytes) {
1307 case 2:
1308 op->addr.mem.ea = insn_fetch(u16, ctxt);
1309 break;
1310 case 4:
1311 op->addr.mem.ea = insn_fetch(u32, ctxt);
1312 break;
1313 case 8:
1314 op->addr.mem.ea = insn_fetch(u64, ctxt);
1315 break;
1316 }
1317done:
1318 return rc;
1319}
1320
1321static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1322{
1323 long sv = 0, mask;
1324
1325 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1326 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1327
1328 if (ctxt->src.bytes == 2)
1329 sv = (s16)ctxt->src.val & (s16)mask;
1330 else if (ctxt->src.bytes == 4)
1331 sv = (s32)ctxt->src.val & (s32)mask;
1332 else
1333 sv = (s64)ctxt->src.val & (s64)mask;
1334
1335 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1336 ctxt->dst.addr.mem.ea + (sv >> 3));
1337 }
1338
1339
1340 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1341}
1342
1343static int read_emulated(struct x86_emulate_ctxt *ctxt,
1344 unsigned long addr, void *dest, unsigned size)
1345{
1346 int rc;
1347 struct read_cache *mc = &ctxt->mem_read;
1348
1349 if (mc->pos < mc->end)
1350 goto read_cached;
1351
1352 WARN_ON((mc->end + size) >= sizeof(mc->data));
1353
1354 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1355 &ctxt->exception);
1356 if (rc != X86EMUL_CONTINUE)
1357 return rc;
1358
1359 mc->end += size;
1360
1361read_cached:
1362 memcpy(dest, mc->data + mc->pos, size);
1363 mc->pos += size;
1364 return X86EMUL_CONTINUE;
1365}
1366
1367static int segmented_read(struct x86_emulate_ctxt *ctxt,
1368 struct segmented_address addr,
1369 void *data,
1370 unsigned size)
1371{
1372 int rc;
1373 ulong linear;
1374
1375 rc = linearize(ctxt, addr, size, false, &linear);
1376 if (rc != X86EMUL_CONTINUE)
1377 return rc;
1378 return read_emulated(ctxt, linear, data, size);
1379}
1380
1381static int segmented_write(struct x86_emulate_ctxt *ctxt,
1382 struct segmented_address addr,
1383 const void *data,
1384 unsigned size)
1385{
1386 int rc;
1387 ulong linear;
1388
1389 rc = linearize(ctxt, addr, size, true, &linear);
1390 if (rc != X86EMUL_CONTINUE)
1391 return rc;
1392 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1393 &ctxt->exception);
1394}
1395
1396static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1397 struct segmented_address addr,
1398 const void *orig_data, const void *data,
1399 unsigned size)
1400{
1401 int rc;
1402 ulong linear;
1403
1404 rc = linearize(ctxt, addr, size, true, &linear);
1405 if (rc != X86EMUL_CONTINUE)
1406 return rc;
1407 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1408 size, &ctxt->exception);
1409}
1410
1411static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1412 unsigned int size, unsigned short port,
1413 void *dest)
1414{
1415 struct read_cache *rc = &ctxt->io_read;
1416
1417 if (rc->pos == rc->end) {
1418 unsigned int in_page, n;
1419 unsigned int count = ctxt->rep_prefix ?
1420 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1421 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1422 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1423 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1424 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1425 if (n == 0)
1426 n = 1;
1427 rc->pos = rc->end = 0;
1428 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1429 return 0;
1430 rc->end = n * size;
1431 }
1432
1433 if (ctxt->rep_prefix && (ctxt->d & String) &&
1434 !(ctxt->eflags & X86_EFLAGS_DF)) {
1435 ctxt->dst.data = rc->data + rc->pos;
1436 ctxt->dst.type = OP_MEM_STR;
1437 ctxt->dst.count = (rc->end - rc->pos) / size;
1438 rc->pos = rc->end;
1439 } else {
1440 memcpy(dest, rc->data + rc->pos, size);
1441 rc->pos += size;
1442 }
1443 return 1;
1444}
1445
1446static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1447 u16 index, struct desc_struct *desc)
1448{
1449 struct desc_ptr dt;
1450 ulong addr;
1451
1452 ctxt->ops->get_idt(ctxt, &dt);
1453
1454 if (dt.size < index * 8 + 7)
1455 return emulate_gp(ctxt, index << 3 | 0x2);
1456
1457 addr = dt.address + index * 8;
1458 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1459}
1460
1461static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1462 u16 selector, struct desc_ptr *dt)
1463{
1464 const struct x86_emulate_ops *ops = ctxt->ops;
1465 u32 base3 = 0;
1466
1467 if (selector & 1 << 2) {
1468 struct desc_struct desc;
1469 u16 sel;
1470
1471 memset(dt, 0, sizeof(*dt));
1472 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1473 VCPU_SREG_LDTR))
1474 return;
1475
1476 dt->size = desc_limit_scaled(&desc);
1477 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1478 } else
1479 ops->get_gdt(ctxt, dt);
1480}
1481
1482static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1483 u16 selector, ulong *desc_addr_p)
1484{
1485 struct desc_ptr dt;
1486 u16 index = selector >> 3;
1487 ulong addr;
1488
1489 get_descriptor_table_ptr(ctxt, selector, &dt);
1490
1491 if (dt.size < index * 8 + 7)
1492 return emulate_gp(ctxt, selector & 0xfffc);
1493
1494 addr = dt.address + index * 8;
1495
1496#ifdef CONFIG_X86_64
1497 if (addr >> 32 != 0) {
1498 u64 efer = 0;
1499
1500 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1501 if (!(efer & EFER_LMA))
1502 addr &= (u32)-1;
1503 }
1504#endif
1505
1506 *desc_addr_p = addr;
1507 return X86EMUL_CONTINUE;
1508}
1509
1510
1511static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1512 u16 selector, struct desc_struct *desc,
1513 ulong *desc_addr_p)
1514{
1515 int rc;
1516
1517 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1518 if (rc != X86EMUL_CONTINUE)
1519 return rc;
1520
1521 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1522}
1523
1524
1525static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1526 u16 selector, struct desc_struct *desc)
1527{
1528 int rc;
1529 ulong addr;
1530
1531 rc = get_descriptor_ptr(ctxt, selector, &addr);
1532 if (rc != X86EMUL_CONTINUE)
1533 return rc;
1534
1535 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1536}
1537
1538static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1539 u16 selector, int seg, u8 cpl,
1540 enum x86_transfer_type transfer,
1541 struct desc_struct *desc)
1542{
1543 struct desc_struct seg_desc, old_desc;
1544 u8 dpl, rpl;
1545 unsigned err_vec = GP_VECTOR;
1546 u32 err_code = 0;
1547 bool null_selector = !(selector & ~0x3);
1548 ulong desc_addr;
1549 int ret;
1550 u16 dummy;
1551 u32 base3 = 0;
1552
1553 memset(&seg_desc, 0, sizeof(seg_desc));
1554
1555 if (ctxt->mode == X86EMUL_MODE_REAL) {
1556
1557
1558 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1559 set_desc_base(&seg_desc, selector << 4);
1560 goto load;
1561 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1562
1563 set_desc_base(&seg_desc, selector << 4);
1564 set_desc_limit(&seg_desc, 0xffff);
1565 seg_desc.type = 3;
1566 seg_desc.p = 1;
1567 seg_desc.s = 1;
1568 seg_desc.dpl = 3;
1569 goto load;
1570 }
1571
1572 rpl = selector & 3;
1573
1574
1575 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1576 goto exception;
1577
1578
1579 if (null_selector) {
1580 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1581 goto exception;
1582
1583 if (seg == VCPU_SREG_SS) {
1584 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1585 goto exception;
1586
1587
1588
1589
1590
1591 seg_desc.type = 3;
1592 seg_desc.p = 1;
1593 seg_desc.s = 1;
1594 seg_desc.dpl = cpl;
1595 seg_desc.d = 1;
1596 seg_desc.g = 1;
1597 }
1598
1599
1600 goto load;
1601 }
1602
1603 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1604 if (ret != X86EMUL_CONTINUE)
1605 return ret;
1606
1607 err_code = selector & 0xfffc;
1608 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1609 GP_VECTOR;
1610
1611
1612 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1613 if (transfer == X86_TRANSFER_CALL_JMP)
1614 return X86EMUL_UNHANDLEABLE;
1615 goto exception;
1616 }
1617
1618 if (!seg_desc.p) {
1619 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1620 goto exception;
1621 }
1622
1623 dpl = seg_desc.dpl;
1624
1625 switch (seg) {
1626 case VCPU_SREG_SS:
1627
1628
1629
1630
1631 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1632 goto exception;
1633 break;
1634 case VCPU_SREG_CS:
1635 if (!(seg_desc.type & 8))
1636 goto exception;
1637
1638 if (seg_desc.type & 4) {
1639
1640 if (dpl > cpl)
1641 goto exception;
1642 } else {
1643
1644 if (rpl > cpl || dpl != cpl)
1645 goto exception;
1646 }
1647
1648 if (seg_desc.d && seg_desc.l) {
1649 u64 efer = 0;
1650
1651 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1652 if (efer & EFER_LMA)
1653 goto exception;
1654 }
1655
1656
1657 selector = (selector & 0xfffc) | cpl;
1658 break;
1659 case VCPU_SREG_TR:
1660 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1661 goto exception;
1662 old_desc = seg_desc;
1663 seg_desc.type |= 2;
1664 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1665 sizeof(seg_desc), &ctxt->exception);
1666 if (ret != X86EMUL_CONTINUE)
1667 return ret;
1668 break;
1669 case VCPU_SREG_LDTR:
1670 if (seg_desc.s || seg_desc.type != 2)
1671 goto exception;
1672 break;
1673 default:
1674
1675
1676
1677
1678
1679 if ((seg_desc.type & 0xa) == 0x8 ||
1680 (((seg_desc.type & 0xc) != 0xc) &&
1681 (rpl > dpl && cpl > dpl)))
1682 goto exception;
1683 break;
1684 }
1685
1686 if (seg_desc.s) {
1687
1688 if (!(seg_desc.type & 1)) {
1689 seg_desc.type |= 1;
1690 ret = write_segment_descriptor(ctxt, selector,
1691 &seg_desc);
1692 if (ret != X86EMUL_CONTINUE)
1693 return ret;
1694 }
1695 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1696 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1697 if (ret != X86EMUL_CONTINUE)
1698 return ret;
1699 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1700 ((u64)base3 << 32), ctxt))
1701 return emulate_gp(ctxt, 0);
1702 }
1703load:
1704 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1705 if (desc)
1706 *desc = seg_desc;
1707 return X86EMUL_CONTINUE;
1708exception:
1709 return emulate_exception(ctxt, err_vec, err_code, true);
1710}
1711
1712static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1713 u16 selector, int seg)
1714{
1715 u8 cpl = ctxt->ops->cpl(ctxt);
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727 if (seg == VCPU_SREG_SS && selector == 3 &&
1728 ctxt->mode == X86EMUL_MODE_PROT64)
1729 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1730
1731 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1732 X86_TRANSFER_NONE, NULL);
1733}
1734
1735static void write_register_operand(struct operand *op)
1736{
1737 return assign_register(op->addr.reg, op->val, op->bytes);
1738}
1739
1740static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1741{
1742 switch (op->type) {
1743 case OP_REG:
1744 write_register_operand(op);
1745 break;
1746 case OP_MEM:
1747 if (ctxt->lock_prefix)
1748 return segmented_cmpxchg(ctxt,
1749 op->addr.mem,
1750 &op->orig_val,
1751 &op->val,
1752 op->bytes);
1753 else
1754 return segmented_write(ctxt,
1755 op->addr.mem,
1756 &op->val,
1757 op->bytes);
1758 break;
1759 case OP_MEM_STR:
1760 return segmented_write(ctxt,
1761 op->addr.mem,
1762 op->data,
1763 op->bytes * op->count);
1764 break;
1765 case OP_XMM:
1766 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1767 break;
1768 case OP_MM:
1769 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1770 break;
1771 case OP_NONE:
1772
1773 break;
1774 default:
1775 break;
1776 }
1777 return X86EMUL_CONTINUE;
1778}
1779
1780static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1781{
1782 struct segmented_address addr;
1783
1784 rsp_increment(ctxt, -bytes);
1785 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1786 addr.seg = VCPU_SREG_SS;
1787
1788 return segmented_write(ctxt, addr, data, bytes);
1789}
1790
1791static int em_push(struct x86_emulate_ctxt *ctxt)
1792{
1793
1794 ctxt->dst.type = OP_NONE;
1795 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1796}
1797
1798static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1799 void *dest, int len)
1800{
1801 int rc;
1802 struct segmented_address addr;
1803
1804 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1805 addr.seg = VCPU_SREG_SS;
1806 rc = segmented_read(ctxt, addr, dest, len);
1807 if (rc != X86EMUL_CONTINUE)
1808 return rc;
1809
1810 rsp_increment(ctxt, len);
1811 return rc;
1812}
1813
1814static int em_pop(struct x86_emulate_ctxt *ctxt)
1815{
1816 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1817}
1818
1819static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1820 void *dest, int len)
1821{
1822 int rc;
1823 unsigned long val, change_mask;
1824 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1825 int cpl = ctxt->ops->cpl(ctxt);
1826
1827 rc = emulate_pop(ctxt, &val, len);
1828 if (rc != X86EMUL_CONTINUE)
1829 return rc;
1830
1831 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1832 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1833 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1834 X86_EFLAGS_AC | X86_EFLAGS_ID;
1835
1836 switch(ctxt->mode) {
1837 case X86EMUL_MODE_PROT64:
1838 case X86EMUL_MODE_PROT32:
1839 case X86EMUL_MODE_PROT16:
1840 if (cpl == 0)
1841 change_mask |= X86_EFLAGS_IOPL;
1842 if (cpl <= iopl)
1843 change_mask |= X86_EFLAGS_IF;
1844 break;
1845 case X86EMUL_MODE_VM86:
1846 if (iopl < 3)
1847 return emulate_gp(ctxt, 0);
1848 change_mask |= X86_EFLAGS_IF;
1849 break;
1850 default:
1851 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1852 break;
1853 }
1854
1855 *(unsigned long *)dest =
1856 (ctxt->eflags & ~change_mask) | (val & change_mask);
1857
1858 return rc;
1859}
1860
1861static int em_popf(struct x86_emulate_ctxt *ctxt)
1862{
1863 ctxt->dst.type = OP_REG;
1864 ctxt->dst.addr.reg = &ctxt->eflags;
1865 ctxt->dst.bytes = ctxt->op_bytes;
1866 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1867}
1868
1869static int em_enter(struct x86_emulate_ctxt *ctxt)
1870{
1871 int rc;
1872 unsigned frame_size = ctxt->src.val;
1873 unsigned nesting_level = ctxt->src2.val & 31;
1874 ulong rbp;
1875
1876 if (nesting_level)
1877 return X86EMUL_UNHANDLEABLE;
1878
1879 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1880 rc = push(ctxt, &rbp, stack_size(ctxt));
1881 if (rc != X86EMUL_CONTINUE)
1882 return rc;
1883 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1884 stack_mask(ctxt));
1885 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1886 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1887 stack_mask(ctxt));
1888 return X86EMUL_CONTINUE;
1889}
1890
1891static int em_leave(struct x86_emulate_ctxt *ctxt)
1892{
1893 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1894 stack_mask(ctxt));
1895 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1896}
1897
1898static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1899{
1900 int seg = ctxt->src2.val;
1901
1902 ctxt->src.val = get_segment_selector(ctxt, seg);
1903 if (ctxt->op_bytes == 4) {
1904 rsp_increment(ctxt, -2);
1905 ctxt->op_bytes = 2;
1906 }
1907
1908 return em_push(ctxt);
1909}
1910
1911static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1912{
1913 int seg = ctxt->src2.val;
1914 unsigned long selector;
1915 int rc;
1916
1917 rc = emulate_pop(ctxt, &selector, 2);
1918 if (rc != X86EMUL_CONTINUE)
1919 return rc;
1920
1921 if (ctxt->modrm_reg == VCPU_SREG_SS)
1922 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1923 if (ctxt->op_bytes > 2)
1924 rsp_increment(ctxt, ctxt->op_bytes - 2);
1925
1926 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1927 return rc;
1928}
1929
1930static int em_pusha(struct x86_emulate_ctxt *ctxt)
1931{
1932 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1933 int rc = X86EMUL_CONTINUE;
1934 int reg = VCPU_REGS_RAX;
1935
1936 while (reg <= VCPU_REGS_RDI) {
1937 (reg == VCPU_REGS_RSP) ?
1938 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1939
1940 rc = em_push(ctxt);
1941 if (rc != X86EMUL_CONTINUE)
1942 return rc;
1943
1944 ++reg;
1945 }
1946
1947 return rc;
1948}
1949
1950static int em_pushf(struct x86_emulate_ctxt *ctxt)
1951{
1952 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1953 return em_push(ctxt);
1954}
1955
1956static int em_popa(struct x86_emulate_ctxt *ctxt)
1957{
1958 int rc = X86EMUL_CONTINUE;
1959 int reg = VCPU_REGS_RDI;
1960 u32 val;
1961
1962 while (reg >= VCPU_REGS_RAX) {
1963 if (reg == VCPU_REGS_RSP) {
1964 rsp_increment(ctxt, ctxt->op_bytes);
1965 --reg;
1966 }
1967
1968 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1969 if (rc != X86EMUL_CONTINUE)
1970 break;
1971 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1972 --reg;
1973 }
1974 return rc;
1975}
1976
1977static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1978{
1979 const struct x86_emulate_ops *ops = ctxt->ops;
1980 int rc;
1981 struct desc_ptr dt;
1982 gva_t cs_addr;
1983 gva_t eip_addr;
1984 u16 cs, eip;
1985
1986
1987 ctxt->src.val = ctxt->eflags;
1988 rc = em_push(ctxt);
1989 if (rc != X86EMUL_CONTINUE)
1990 return rc;
1991
1992 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
1993
1994 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1995 rc = em_push(ctxt);
1996 if (rc != X86EMUL_CONTINUE)
1997 return rc;
1998
1999 ctxt->src.val = ctxt->_eip;
2000 rc = em_push(ctxt);
2001 if (rc != X86EMUL_CONTINUE)
2002 return rc;
2003
2004 ops->get_idt(ctxt, &dt);
2005
2006 eip_addr = dt.address + (irq << 2);
2007 cs_addr = dt.address + (irq << 2) + 2;
2008
2009 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2010 if (rc != X86EMUL_CONTINUE)
2011 return rc;
2012
2013 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2014 if (rc != X86EMUL_CONTINUE)
2015 return rc;
2016
2017 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2018 if (rc != X86EMUL_CONTINUE)
2019 return rc;
2020
2021 ctxt->_eip = eip;
2022
2023 return rc;
2024}
2025
2026int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2027{
2028 int rc;
2029
2030 invalidate_registers(ctxt);
2031 rc = __emulate_int_real(ctxt, irq);
2032 if (rc == X86EMUL_CONTINUE)
2033 writeback_registers(ctxt);
2034 return rc;
2035}
2036
2037static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2038{
2039 switch(ctxt->mode) {
2040 case X86EMUL_MODE_REAL:
2041 return __emulate_int_real(ctxt, irq);
2042 case X86EMUL_MODE_VM86:
2043 case X86EMUL_MODE_PROT16:
2044 case X86EMUL_MODE_PROT32:
2045 case X86EMUL_MODE_PROT64:
2046 default:
2047
2048 return X86EMUL_UNHANDLEABLE;
2049 }
2050}
2051
2052static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2053{
2054 int rc = X86EMUL_CONTINUE;
2055 unsigned long temp_eip = 0;
2056 unsigned long temp_eflags = 0;
2057 unsigned long cs = 0;
2058 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2059 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2060 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2061 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2062 X86_EFLAGS_AC | X86_EFLAGS_ID |
2063 X86_EFLAGS_FIXED;
2064 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2065 X86_EFLAGS_VIP;
2066
2067
2068
2069 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2070
2071 if (rc != X86EMUL_CONTINUE)
2072 return rc;
2073
2074 if (temp_eip & ~0xffff)
2075 return emulate_gp(ctxt, 0);
2076
2077 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2078
2079 if (rc != X86EMUL_CONTINUE)
2080 return rc;
2081
2082 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2083
2084 if (rc != X86EMUL_CONTINUE)
2085 return rc;
2086
2087 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2088
2089 if (rc != X86EMUL_CONTINUE)
2090 return rc;
2091
2092 ctxt->_eip = temp_eip;
2093
2094 if (ctxt->op_bytes == 4)
2095 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2096 else if (ctxt->op_bytes == 2) {
2097 ctxt->eflags &= ~0xffff;
2098 ctxt->eflags |= temp_eflags;
2099 }
2100
2101 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK;
2102 ctxt->eflags |= X86_EFLAGS_FIXED;
2103 ctxt->ops->set_nmi_mask(ctxt, false);
2104
2105 return rc;
2106}
2107
2108static int em_iret(struct x86_emulate_ctxt *ctxt)
2109{
2110 switch(ctxt->mode) {
2111 case X86EMUL_MODE_REAL:
2112 return emulate_iret_real(ctxt);
2113 case X86EMUL_MODE_VM86:
2114 case X86EMUL_MODE_PROT16:
2115 case X86EMUL_MODE_PROT32:
2116 case X86EMUL_MODE_PROT64:
2117 default:
2118
2119 return X86EMUL_UNHANDLEABLE;
2120 }
2121}
2122
2123static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2124{
2125 int rc;
2126 unsigned short sel;
2127 struct desc_struct new_desc;
2128 u8 cpl = ctxt->ops->cpl(ctxt);
2129
2130 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2131
2132 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2133 X86_TRANSFER_CALL_JMP,
2134 &new_desc);
2135 if (rc != X86EMUL_CONTINUE)
2136 return rc;
2137
2138 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2139
2140 if (rc != X86EMUL_CONTINUE)
2141 return X86EMUL_UNHANDLEABLE;
2142
2143 return rc;
2144}
2145
2146static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2147{
2148 return assign_eip_near(ctxt, ctxt->src.val);
2149}
2150
2151static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2152{
2153 int rc;
2154 long int old_eip;
2155
2156 old_eip = ctxt->_eip;
2157 rc = assign_eip_near(ctxt, ctxt->src.val);
2158 if (rc != X86EMUL_CONTINUE)
2159 return rc;
2160 ctxt->src.val = old_eip;
2161 rc = em_push(ctxt);
2162 return rc;
2163}
2164
2165static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2166{
2167 u64 old = ctxt->dst.orig_val64;
2168
2169 if (ctxt->dst.bytes == 16)
2170 return X86EMUL_UNHANDLEABLE;
2171
2172 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2173 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2174 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2175 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2176 ctxt->eflags &= ~X86_EFLAGS_ZF;
2177 } else {
2178 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2179 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2180
2181 ctxt->eflags |= X86_EFLAGS_ZF;
2182 }
2183 return X86EMUL_CONTINUE;
2184}
2185
2186static int em_ret(struct x86_emulate_ctxt *ctxt)
2187{
2188 int rc;
2189 unsigned long eip;
2190
2191 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2192 if (rc != X86EMUL_CONTINUE)
2193 return rc;
2194
2195 return assign_eip_near(ctxt, eip);
2196}
2197
2198static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2199{
2200 int rc;
2201 unsigned long eip, cs;
2202 int cpl = ctxt->ops->cpl(ctxt);
2203 struct desc_struct new_desc;
2204
2205 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2206 if (rc != X86EMUL_CONTINUE)
2207 return rc;
2208 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2209 if (rc != X86EMUL_CONTINUE)
2210 return rc;
2211
2212 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2213 return X86EMUL_UNHANDLEABLE;
2214 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2215 X86_TRANSFER_RET,
2216 &new_desc);
2217 if (rc != X86EMUL_CONTINUE)
2218 return rc;
2219 rc = assign_eip_far(ctxt, eip, &new_desc);
2220
2221 if (rc != X86EMUL_CONTINUE)
2222 return X86EMUL_UNHANDLEABLE;
2223
2224 return rc;
2225}
2226
2227static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2228{
2229 int rc;
2230
2231 rc = em_ret_far(ctxt);
2232 if (rc != X86EMUL_CONTINUE)
2233 return rc;
2234 rsp_increment(ctxt, ctxt->src.val);
2235 return X86EMUL_CONTINUE;
2236}
2237
2238static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2239{
2240
2241 ctxt->dst.orig_val = ctxt->dst.val;
2242 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2243 ctxt->src.orig_val = ctxt->src.val;
2244 ctxt->src.val = ctxt->dst.orig_val;
2245 fastop(ctxt, em_cmp);
2246
2247 if (ctxt->eflags & X86_EFLAGS_ZF) {
2248
2249 ctxt->src.type = OP_NONE;
2250 ctxt->dst.val = ctxt->src.orig_val;
2251 } else {
2252
2253 ctxt->src.type = OP_REG;
2254 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2255 ctxt->src.val = ctxt->dst.orig_val;
2256
2257 ctxt->dst.val = ctxt->dst.orig_val;
2258 }
2259 return X86EMUL_CONTINUE;
2260}
2261
2262static int em_lseg(struct x86_emulate_ctxt *ctxt)
2263{
2264 int seg = ctxt->src2.val;
2265 unsigned short sel;
2266 int rc;
2267
2268 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2269
2270 rc = load_segment_descriptor(ctxt, sel, seg);
2271 if (rc != X86EMUL_CONTINUE)
2272 return rc;
2273
2274 ctxt->dst.val = ctxt->src.val;
2275 return rc;
2276}
2277
2278static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2279{
2280#ifdef CONFIG_X86_64
2281 return ctxt->ops->guest_has_long_mode(ctxt);
2282#else
2283 return false;
2284#endif
2285}
2286
2287static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2288{
2289 desc->g = (flags >> 23) & 1;
2290 desc->d = (flags >> 22) & 1;
2291 desc->l = (flags >> 21) & 1;
2292 desc->avl = (flags >> 20) & 1;
2293 desc->p = (flags >> 15) & 1;
2294 desc->dpl = (flags >> 13) & 3;
2295 desc->s = (flags >> 12) & 1;
2296 desc->type = (flags >> 8) & 15;
2297}
2298
2299static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2300 int n)
2301{
2302 struct desc_struct desc;
2303 int offset;
2304 u16 selector;
2305
2306 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2307
2308 if (n < 3)
2309 offset = 0x7f84 + n * 12;
2310 else
2311 offset = 0x7f2c + (n - 3) * 12;
2312
2313 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2314 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2315 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2316 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2317 return X86EMUL_CONTINUE;
2318}
2319
2320#ifdef CONFIG_X86_64
2321static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2322 int n)
2323{
2324 struct desc_struct desc;
2325 int offset;
2326 u16 selector;
2327 u32 base3;
2328
2329 offset = 0x7e00 + n * 16;
2330
2331 selector = GET_SMSTATE(u16, smstate, offset);
2332 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2333 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2334 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2335 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2336
2337 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2338 return X86EMUL_CONTINUE;
2339}
2340#endif
2341
2342static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2343 u64 cr0, u64 cr3, u64 cr4)
2344{
2345 int bad;
2346 u64 pcid;
2347
2348
2349 pcid = 0;
2350 if (cr4 & X86_CR4_PCIDE) {
2351 pcid = cr3 & 0xfff;
2352 cr3 &= ~0xfff;
2353 }
2354
2355 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2356 if (bad)
2357 return X86EMUL_UNHANDLEABLE;
2358
2359
2360
2361
2362
2363
2364 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2365 if (bad)
2366 return X86EMUL_UNHANDLEABLE;
2367
2368 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2369 if (bad)
2370 return X86EMUL_UNHANDLEABLE;
2371
2372 if (cr4 & X86_CR4_PCIDE) {
2373 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2374 if (bad)
2375 return X86EMUL_UNHANDLEABLE;
2376 if (pcid) {
2377 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2378 if (bad)
2379 return X86EMUL_UNHANDLEABLE;
2380 }
2381
2382 }
2383
2384 return X86EMUL_CONTINUE;
2385}
2386
2387static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2388 const char *smstate)
2389{
2390 struct desc_struct desc;
2391 struct desc_ptr dt;
2392 u16 selector;
2393 u32 val, cr0, cr3, cr4;
2394 int i;
2395
2396 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2397 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2398 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2399 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2400
2401 for (i = 0; i < 8; i++)
2402 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2403
2404 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2405
2406 if (ctxt->ops->set_dr(ctxt, 6, val))
2407 return X86EMUL_UNHANDLEABLE;
2408
2409 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2410
2411 if (ctxt->ops->set_dr(ctxt, 7, val))
2412 return X86EMUL_UNHANDLEABLE;
2413
2414 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2415 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2416 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2417 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2418 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2419
2420 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2421 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2422 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2423 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2424 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2425
2426 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2427 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2428 ctxt->ops->set_gdt(ctxt, &dt);
2429
2430 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2431 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2432 ctxt->ops->set_idt(ctxt, &dt);
2433
2434 for (i = 0; i < 6; i++) {
2435 int r = rsm_load_seg_32(ctxt, smstate, i);
2436 if (r != X86EMUL_CONTINUE)
2437 return r;
2438 }
2439
2440 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2441
2442 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2443
2444 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2445}
2446
2447#ifdef CONFIG_X86_64
2448static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2449 const char *smstate)
2450{
2451 struct desc_struct desc;
2452 struct desc_ptr dt;
2453 u64 val, cr0, cr3, cr4;
2454 u32 base3;
2455 u16 selector;
2456 int i, r;
2457
2458 for (i = 0; i < 16; i++)
2459 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2460
2461 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2462 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2463
2464 val = GET_SMSTATE(u64, smstate, 0x7f68);
2465
2466 if (ctxt->ops->set_dr(ctxt, 6, val))
2467 return X86EMUL_UNHANDLEABLE;
2468
2469 val = GET_SMSTATE(u64, smstate, 0x7f60);
2470
2471 if (ctxt->ops->set_dr(ctxt, 7, val))
2472 return X86EMUL_UNHANDLEABLE;
2473
2474 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2475 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2476 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2477 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2478 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2479
2480 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2481 return X86EMUL_UNHANDLEABLE;
2482
2483 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2484 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2485 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2486 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2487 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2488 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2489
2490 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2491 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2492 ctxt->ops->set_idt(ctxt, &dt);
2493
2494 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2495 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2496 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2497 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2498 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2499 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2500
2501 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2502 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2503 ctxt->ops->set_gdt(ctxt, &dt);
2504
2505 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2506 if (r != X86EMUL_CONTINUE)
2507 return r;
2508
2509 for (i = 0; i < 6; i++) {
2510 r = rsm_load_seg_64(ctxt, smstate, i);
2511 if (r != X86EMUL_CONTINUE)
2512 return r;
2513 }
2514
2515 return X86EMUL_CONTINUE;
2516}
2517#endif
2518
2519static int em_rsm(struct x86_emulate_ctxt *ctxt)
2520{
2521 unsigned long cr0, cr4, efer;
2522 char buf[512];
2523 u64 smbase;
2524 int ret;
2525
2526 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2527 return emulate_ud(ctxt);
2528
2529 smbase = ctxt->ops->get_smbase(ctxt);
2530
2531 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2532 if (ret != X86EMUL_CONTINUE)
2533 return X86EMUL_UNHANDLEABLE;
2534
2535 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2536 ctxt->ops->set_nmi_mask(ctxt, false);
2537
2538 ctxt->ops->exiting_smm(ctxt);
2539
2540
2541
2542
2543
2544
2545 if (emulator_has_longmode(ctxt)) {
2546 struct desc_struct cs_desc;
2547
2548
2549 cr4 = ctxt->ops->get_cr(ctxt, 4);
2550 if (cr4 & X86_CR4_PCIDE)
2551 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2552
2553
2554 memset(&cs_desc, 0, sizeof(cs_desc));
2555 cs_desc.type = 0xb;
2556 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2557 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2558 }
2559
2560
2561 cr0 = ctxt->ops->get_cr(ctxt, 0);
2562 if (cr0 & X86_CR0_PE)
2563 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2564
2565 if (emulator_has_longmode(ctxt)) {
2566
2567 cr4 = ctxt->ops->get_cr(ctxt, 4);
2568 if (cr4 & X86_CR4_PAE)
2569 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2570
2571
2572 efer = 0;
2573 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2574 }
2575
2576
2577
2578
2579
2580
2581 if (ctxt->ops->leave_smm(ctxt, buf))
2582 goto emulate_shutdown;
2583
2584#ifdef CONFIG_X86_64
2585 if (emulator_has_longmode(ctxt))
2586 ret = rsm_load_state_64(ctxt, buf);
2587 else
2588#endif
2589 ret = rsm_load_state_32(ctxt, buf);
2590
2591 if (ret != X86EMUL_CONTINUE)
2592 goto emulate_shutdown;
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602 return X86EMUL_CONTINUE;
2603
2604emulate_shutdown:
2605 ctxt->ops->triple_fault(ctxt);
2606 return X86EMUL_CONTINUE;
2607}
2608
2609static void
2610setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2611 struct desc_struct *cs, struct desc_struct *ss)
2612{
2613 cs->l = 0;
2614 set_desc_base(cs, 0);
2615 cs->g = 1;
2616 set_desc_limit(cs, 0xfffff);
2617 cs->type = 0x0b;
2618 cs->s = 1;
2619 cs->dpl = 0;
2620 cs->p = 1;
2621 cs->d = 1;
2622 cs->avl = 0;
2623
2624 set_desc_base(ss, 0);
2625 set_desc_limit(ss, 0xfffff);
2626 ss->g = 1;
2627 ss->s = 1;
2628 ss->type = 0x03;
2629 ss->d = 1;
2630 ss->dpl = 0;
2631 ss->p = 1;
2632 ss->l = 0;
2633 ss->avl = 0;
2634}
2635
2636static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2637{
2638 u32 eax, ebx, ecx, edx;
2639
2640 eax = ecx = 0;
2641 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2642 return is_guest_vendor_intel(ebx, ecx, edx);
2643}
2644
2645static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2646{
2647 const struct x86_emulate_ops *ops = ctxt->ops;
2648 u32 eax, ebx, ecx, edx;
2649
2650
2651
2652
2653
2654 if (ctxt->mode == X86EMUL_MODE_PROT64)
2655 return true;
2656
2657 eax = 0x00000000;
2658 ecx = 0x00000000;
2659 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2660
2661
2662
2663
2664
2665
2666 if (is_guest_vendor_intel(ebx, ecx, edx))
2667 return false;
2668
2669 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2670 is_guest_vendor_hygon(ebx, ecx, edx))
2671 return true;
2672
2673
2674
2675
2676
2677 return false;
2678}
2679
2680static int em_syscall(struct x86_emulate_ctxt *ctxt)
2681{
2682 const struct x86_emulate_ops *ops = ctxt->ops;
2683 struct desc_struct cs, ss;
2684 u64 msr_data;
2685 u16 cs_sel, ss_sel;
2686 u64 efer = 0;
2687
2688
2689 if (ctxt->mode == X86EMUL_MODE_REAL ||
2690 ctxt->mode == X86EMUL_MODE_VM86)
2691 return emulate_ud(ctxt);
2692
2693 if (!(em_syscall_is_enabled(ctxt)))
2694 return emulate_ud(ctxt);
2695
2696 ops->get_msr(ctxt, MSR_EFER, &efer);
2697 if (!(efer & EFER_SCE))
2698 return emulate_ud(ctxt);
2699
2700 setup_syscalls_segments(ctxt, &cs, &ss);
2701 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2702 msr_data >>= 32;
2703 cs_sel = (u16)(msr_data & 0xfffc);
2704 ss_sel = (u16)(msr_data + 8);
2705
2706 if (efer & EFER_LMA) {
2707 cs.d = 0;
2708 cs.l = 1;
2709 }
2710 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2711 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2712
2713 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2714 if (efer & EFER_LMA) {
2715#ifdef CONFIG_X86_64
2716 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2717
2718 ops->get_msr(ctxt,
2719 ctxt->mode == X86EMUL_MODE_PROT64 ?
2720 MSR_LSTAR : MSR_CSTAR, &msr_data);
2721 ctxt->_eip = msr_data;
2722
2723 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2724 ctxt->eflags &= ~msr_data;
2725 ctxt->eflags |= X86_EFLAGS_FIXED;
2726#endif
2727 } else {
2728
2729 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2730 ctxt->_eip = (u32)msr_data;
2731
2732 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2733 }
2734
2735 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2736 return X86EMUL_CONTINUE;
2737}
2738
2739static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2740{
2741 const struct x86_emulate_ops *ops = ctxt->ops;
2742 struct desc_struct cs, ss;
2743 u64 msr_data;
2744 u16 cs_sel, ss_sel;
2745 u64 efer = 0;
2746
2747 ops->get_msr(ctxt, MSR_EFER, &efer);
2748
2749 if (ctxt->mode == X86EMUL_MODE_REAL)
2750 return emulate_gp(ctxt, 0);
2751
2752
2753
2754
2755
2756 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2757 && !vendor_intel(ctxt))
2758 return emulate_ud(ctxt);
2759
2760
2761 if (ctxt->mode == X86EMUL_MODE_PROT64)
2762 return X86EMUL_UNHANDLEABLE;
2763
2764 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2765 if ((msr_data & 0xfffc) == 0x0)
2766 return emulate_gp(ctxt, 0);
2767
2768 setup_syscalls_segments(ctxt, &cs, &ss);
2769 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2770 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2771 ss_sel = cs_sel + 8;
2772 if (efer & EFER_LMA) {
2773 cs.d = 0;
2774 cs.l = 1;
2775 }
2776
2777 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2778 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2779
2780 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2781 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2782
2783 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2784 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2785 (u32)msr_data;
2786 if (efer & EFER_LMA)
2787 ctxt->mode = X86EMUL_MODE_PROT64;
2788
2789 return X86EMUL_CONTINUE;
2790}
2791
2792static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2793{
2794 const struct x86_emulate_ops *ops = ctxt->ops;
2795 struct desc_struct cs, ss;
2796 u64 msr_data, rcx, rdx;
2797 int usermode;
2798 u16 cs_sel = 0, ss_sel = 0;
2799
2800
2801 if (ctxt->mode == X86EMUL_MODE_REAL ||
2802 ctxt->mode == X86EMUL_MODE_VM86)
2803 return emulate_gp(ctxt, 0);
2804
2805 setup_syscalls_segments(ctxt, &cs, &ss);
2806
2807 if ((ctxt->rex_prefix & 0x8) != 0x0)
2808 usermode = X86EMUL_MODE_PROT64;
2809 else
2810 usermode = X86EMUL_MODE_PROT32;
2811
2812 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2813 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2814
2815 cs.dpl = 3;
2816 ss.dpl = 3;
2817 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2818 switch (usermode) {
2819 case X86EMUL_MODE_PROT32:
2820 cs_sel = (u16)(msr_data + 16);
2821 if ((msr_data & 0xfffc) == 0x0)
2822 return emulate_gp(ctxt, 0);
2823 ss_sel = (u16)(msr_data + 24);
2824 rcx = (u32)rcx;
2825 rdx = (u32)rdx;
2826 break;
2827 case X86EMUL_MODE_PROT64:
2828 cs_sel = (u16)(msr_data + 32);
2829 if (msr_data == 0x0)
2830 return emulate_gp(ctxt, 0);
2831 ss_sel = cs_sel + 8;
2832 cs.d = 0;
2833 cs.l = 1;
2834 if (emul_is_noncanonical_address(rcx, ctxt) ||
2835 emul_is_noncanonical_address(rdx, ctxt))
2836 return emulate_gp(ctxt, 0);
2837 break;
2838 }
2839 cs_sel |= SEGMENT_RPL_MASK;
2840 ss_sel |= SEGMENT_RPL_MASK;
2841
2842 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2843 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2844
2845 ctxt->_eip = rdx;
2846 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2847
2848 return X86EMUL_CONTINUE;
2849}
2850
2851static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2852{
2853 int iopl;
2854 if (ctxt->mode == X86EMUL_MODE_REAL)
2855 return false;
2856 if (ctxt->mode == X86EMUL_MODE_VM86)
2857 return true;
2858 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2859 return ctxt->ops->cpl(ctxt) > iopl;
2860}
2861
2862#define VMWARE_PORT_VMPORT (0x5658)
2863#define VMWARE_PORT_VMRPC (0x5659)
2864
2865static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2866 u16 port, u16 len)
2867{
2868 const struct x86_emulate_ops *ops = ctxt->ops;
2869 struct desc_struct tr_seg;
2870 u32 base3;
2871 int r;
2872 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2873 unsigned mask = (1 << len) - 1;
2874 unsigned long base;
2875
2876
2877
2878
2879
2880 if (enable_vmware_backdoor &&
2881 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2882 return true;
2883
2884 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2885 if (!tr_seg.p)
2886 return false;
2887 if (desc_limit_scaled(&tr_seg) < 103)
2888 return false;
2889 base = get_desc_base(&tr_seg);
2890#ifdef CONFIG_X86_64
2891 base |= ((u64)base3) << 32;
2892#endif
2893 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2894 if (r != X86EMUL_CONTINUE)
2895 return false;
2896 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2897 return false;
2898 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2899 if (r != X86EMUL_CONTINUE)
2900 return false;
2901 if ((perm >> bit_idx) & mask)
2902 return false;
2903 return true;
2904}
2905
2906static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2907 u16 port, u16 len)
2908{
2909 if (ctxt->perm_ok)
2910 return true;
2911
2912 if (emulator_bad_iopl(ctxt))
2913 if (!emulator_io_port_access_allowed(ctxt, port, len))
2914 return false;
2915
2916 ctxt->perm_ok = true;
2917
2918 return true;
2919}
2920
2921static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2922{
2923
2924
2925
2926
2927#ifdef CONFIG_X86_64
2928 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2929 return;
2930
2931 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2932
2933 switch (ctxt->b) {
2934 case 0xa4:
2935 case 0xa5:
2936 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2937 fallthrough;
2938 case 0xaa:
2939 case 0xab:
2940 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2941 }
2942#endif
2943}
2944
2945static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2946 struct tss_segment_16 *tss)
2947{
2948 tss->ip = ctxt->_eip;
2949 tss->flag = ctxt->eflags;
2950 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2951 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2952 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2953 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2954 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2955 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2956 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2957 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2958
2959 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2960 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2961 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2962 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2963 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2964}
2965
2966static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2967 struct tss_segment_16 *tss)
2968{
2969 int ret;
2970 u8 cpl;
2971
2972 ctxt->_eip = tss->ip;
2973 ctxt->eflags = tss->flag | 2;
2974 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2975 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2976 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2977 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2978 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2979 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2980 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2981 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2982
2983
2984
2985
2986
2987 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2988 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2989 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2990 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2991 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2992
2993 cpl = tss->cs & 3;
2994
2995
2996
2997
2998
2999 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3000 X86_TRANSFER_TASK_SWITCH, NULL);
3001 if (ret != X86EMUL_CONTINUE)
3002 return ret;
3003 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3004 X86_TRANSFER_TASK_SWITCH, NULL);
3005 if (ret != X86EMUL_CONTINUE)
3006 return ret;
3007 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3008 X86_TRANSFER_TASK_SWITCH, NULL);
3009 if (ret != X86EMUL_CONTINUE)
3010 return ret;
3011 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3012 X86_TRANSFER_TASK_SWITCH, NULL);
3013 if (ret != X86EMUL_CONTINUE)
3014 return ret;
3015 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3016 X86_TRANSFER_TASK_SWITCH, NULL);
3017 if (ret != X86EMUL_CONTINUE)
3018 return ret;
3019
3020 return X86EMUL_CONTINUE;
3021}
3022
3023static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3024 u16 tss_selector, u16 old_tss_sel,
3025 ulong old_tss_base, struct desc_struct *new_desc)
3026{
3027 struct tss_segment_16 tss_seg;
3028 int ret;
3029 u32 new_tss_base = get_desc_base(new_desc);
3030
3031 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3032 if (ret != X86EMUL_CONTINUE)
3033 return ret;
3034
3035 save_state_to_tss16(ctxt, &tss_seg);
3036
3037 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3038 if (ret != X86EMUL_CONTINUE)
3039 return ret;
3040
3041 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3042 if (ret != X86EMUL_CONTINUE)
3043 return ret;
3044
3045 if (old_tss_sel != 0xffff) {
3046 tss_seg.prev_task_link = old_tss_sel;
3047
3048 ret = linear_write_system(ctxt, new_tss_base,
3049 &tss_seg.prev_task_link,
3050 sizeof(tss_seg.prev_task_link));
3051 if (ret != X86EMUL_CONTINUE)
3052 return ret;
3053 }
3054
3055 return load_state_from_tss16(ctxt, &tss_seg);
3056}
3057
3058static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3059 struct tss_segment_32 *tss)
3060{
3061
3062 tss->eip = ctxt->_eip;
3063 tss->eflags = ctxt->eflags;
3064 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3065 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3066 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3067 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3068 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3069 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3070 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3071 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3072
3073 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3074 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3075 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3076 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3077 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3078 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3079}
3080
3081static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3082 struct tss_segment_32 *tss)
3083{
3084 int ret;
3085 u8 cpl;
3086
3087 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3088 return emulate_gp(ctxt, 0);
3089 ctxt->_eip = tss->eip;
3090 ctxt->eflags = tss->eflags | 2;
3091
3092
3093 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3094 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3095 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3096 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3097 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3098 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3099 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3100 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3101
3102
3103
3104
3105
3106
3107 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3108 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3109 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3110 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3111 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3112 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3113 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3114
3115
3116
3117
3118
3119
3120 if (ctxt->eflags & X86_EFLAGS_VM) {
3121 ctxt->mode = X86EMUL_MODE_VM86;
3122 cpl = 3;
3123 } else {
3124 ctxt->mode = X86EMUL_MODE_PROT32;
3125 cpl = tss->cs & 3;
3126 }
3127
3128
3129
3130
3131
3132 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3133 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3134 if (ret != X86EMUL_CONTINUE)
3135 return ret;
3136 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3137 X86_TRANSFER_TASK_SWITCH, NULL);
3138 if (ret != X86EMUL_CONTINUE)
3139 return ret;
3140 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3141 X86_TRANSFER_TASK_SWITCH, NULL);
3142 if (ret != X86EMUL_CONTINUE)
3143 return ret;
3144 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3145 X86_TRANSFER_TASK_SWITCH, NULL);
3146 if (ret != X86EMUL_CONTINUE)
3147 return ret;
3148 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3149 X86_TRANSFER_TASK_SWITCH, NULL);
3150 if (ret != X86EMUL_CONTINUE)
3151 return ret;
3152 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3153 X86_TRANSFER_TASK_SWITCH, NULL);
3154 if (ret != X86EMUL_CONTINUE)
3155 return ret;
3156 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3157 X86_TRANSFER_TASK_SWITCH, NULL);
3158
3159 return ret;
3160}
3161
3162static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3163 u16 tss_selector, u16 old_tss_sel,
3164 ulong old_tss_base, struct desc_struct *new_desc)
3165{
3166 struct tss_segment_32 tss_seg;
3167 int ret;
3168 u32 new_tss_base = get_desc_base(new_desc);
3169 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3170 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3171
3172 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3173 if (ret != X86EMUL_CONTINUE)
3174 return ret;
3175
3176 save_state_to_tss32(ctxt, &tss_seg);
3177
3178
3179 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3180 ldt_sel_offset - eip_offset);
3181 if (ret != X86EMUL_CONTINUE)
3182 return ret;
3183
3184 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3185 if (ret != X86EMUL_CONTINUE)
3186 return ret;
3187
3188 if (old_tss_sel != 0xffff) {
3189 tss_seg.prev_task_link = old_tss_sel;
3190
3191 ret = linear_write_system(ctxt, new_tss_base,
3192 &tss_seg.prev_task_link,
3193 sizeof(tss_seg.prev_task_link));
3194 if (ret != X86EMUL_CONTINUE)
3195 return ret;
3196 }
3197
3198 return load_state_from_tss32(ctxt, &tss_seg);
3199}
3200
3201static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3202 u16 tss_selector, int idt_index, int reason,
3203 bool has_error_code, u32 error_code)
3204{
3205 const struct x86_emulate_ops *ops = ctxt->ops;
3206 struct desc_struct curr_tss_desc, next_tss_desc;
3207 int ret;
3208 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3209 ulong old_tss_base =
3210 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3211 u32 desc_limit;
3212 ulong desc_addr, dr7;
3213
3214
3215
3216 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3217 if (ret != X86EMUL_CONTINUE)
3218 return ret;
3219 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3220 if (ret != X86EMUL_CONTINUE)
3221 return ret;
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233 if (reason == TASK_SWITCH_GATE) {
3234 if (idt_index != -1) {
3235
3236 struct desc_struct task_gate_desc;
3237 int dpl;
3238
3239 ret = read_interrupt_descriptor(ctxt, idt_index,
3240 &task_gate_desc);
3241 if (ret != X86EMUL_CONTINUE)
3242 return ret;
3243
3244 dpl = task_gate_desc.dpl;
3245 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3246 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3247 }
3248 }
3249
3250 desc_limit = desc_limit_scaled(&next_tss_desc);
3251 if (!next_tss_desc.p ||
3252 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3253 desc_limit < 0x2b)) {
3254 return emulate_ts(ctxt, tss_selector & 0xfffc);
3255 }
3256
3257 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3258 curr_tss_desc.type &= ~(1 << 1);
3259 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3260 }
3261
3262 if (reason == TASK_SWITCH_IRET)
3263 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3264
3265
3266
3267 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3268 old_tss_sel = 0xffff;
3269
3270 if (next_tss_desc.type & 8)
3271 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3272 old_tss_base, &next_tss_desc);
3273 else
3274 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3275 old_tss_base, &next_tss_desc);
3276 if (ret != X86EMUL_CONTINUE)
3277 return ret;
3278
3279 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3280 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3281
3282 if (reason != TASK_SWITCH_IRET) {
3283 next_tss_desc.type |= (1 << 1);
3284 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3285 }
3286
3287 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3288 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3289
3290 if (has_error_code) {
3291 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3292 ctxt->lock_prefix = 0;
3293 ctxt->src.val = (unsigned long) error_code;
3294 ret = em_push(ctxt);
3295 }
3296
3297 ops->get_dr(ctxt, 7, &dr7);
3298 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3299
3300 return ret;
3301}
3302
3303int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3304 u16 tss_selector, int idt_index, int reason,
3305 bool has_error_code, u32 error_code)
3306{
3307 int rc;
3308
3309 invalidate_registers(ctxt);
3310 ctxt->_eip = ctxt->eip;
3311 ctxt->dst.type = OP_NONE;
3312
3313 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3314 has_error_code, error_code);
3315
3316 if (rc == X86EMUL_CONTINUE) {
3317 ctxt->eip = ctxt->_eip;
3318 writeback_registers(ctxt);
3319 }
3320
3321 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3322}
3323
3324static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3325 struct operand *op)
3326{
3327 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3328
3329 register_address_increment(ctxt, reg, df * op->bytes);
3330 op->addr.mem.ea = register_address(ctxt, reg);
3331}
3332
3333static int em_das(struct x86_emulate_ctxt *ctxt)
3334{
3335 u8 al, old_al;
3336 bool af, cf, old_cf;
3337
3338 cf = ctxt->eflags & X86_EFLAGS_CF;
3339 al = ctxt->dst.val;
3340
3341 old_al = al;
3342 old_cf = cf;
3343 cf = false;
3344 af = ctxt->eflags & X86_EFLAGS_AF;
3345 if ((al & 0x0f) > 9 || af) {
3346 al -= 6;
3347 cf = old_cf | (al >= 250);
3348 af = true;
3349 } else {
3350 af = false;
3351 }
3352 if (old_al > 0x99 || old_cf) {
3353 al -= 0x60;
3354 cf = true;
3355 }
3356
3357 ctxt->dst.val = al;
3358
3359 ctxt->src.type = OP_IMM;
3360 ctxt->src.val = 0;
3361 ctxt->src.bytes = 1;
3362 fastop(ctxt, em_or);
3363 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3364 if (cf)
3365 ctxt->eflags |= X86_EFLAGS_CF;
3366 if (af)
3367 ctxt->eflags |= X86_EFLAGS_AF;
3368 return X86EMUL_CONTINUE;
3369}
3370
3371static int em_aam(struct x86_emulate_ctxt *ctxt)
3372{
3373 u8 al, ah;
3374
3375 if (ctxt->src.val == 0)
3376 return emulate_de(ctxt);
3377
3378 al = ctxt->dst.val & 0xff;
3379 ah = al / ctxt->src.val;
3380 al %= ctxt->src.val;
3381
3382 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3383
3384
3385 ctxt->src.type = OP_IMM;
3386 ctxt->src.val = 0;
3387 ctxt->src.bytes = 1;
3388 fastop(ctxt, em_or);
3389
3390 return X86EMUL_CONTINUE;
3391}
3392
3393static int em_aad(struct x86_emulate_ctxt *ctxt)
3394{
3395 u8 al = ctxt->dst.val & 0xff;
3396 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3397
3398 al = (al + (ah * ctxt->src.val)) & 0xff;
3399
3400 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3401
3402
3403 ctxt->src.type = OP_IMM;
3404 ctxt->src.val = 0;
3405 ctxt->src.bytes = 1;
3406 fastop(ctxt, em_or);
3407
3408 return X86EMUL_CONTINUE;
3409}
3410
3411static int em_call(struct x86_emulate_ctxt *ctxt)
3412{
3413 int rc;
3414 long rel = ctxt->src.val;
3415
3416 ctxt->src.val = (unsigned long)ctxt->_eip;
3417 rc = jmp_rel(ctxt, rel);
3418 if (rc != X86EMUL_CONTINUE)
3419 return rc;
3420 return em_push(ctxt);
3421}
3422
3423static int em_call_far(struct x86_emulate_ctxt *ctxt)
3424{
3425 u16 sel, old_cs;
3426 ulong old_eip;
3427 int rc;
3428 struct desc_struct old_desc, new_desc;
3429 const struct x86_emulate_ops *ops = ctxt->ops;
3430 int cpl = ctxt->ops->cpl(ctxt);
3431 enum x86emul_mode prev_mode = ctxt->mode;
3432
3433 old_eip = ctxt->_eip;
3434 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3435
3436 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3437 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3438 X86_TRANSFER_CALL_JMP, &new_desc);
3439 if (rc != X86EMUL_CONTINUE)
3440 return rc;
3441
3442 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3443 if (rc != X86EMUL_CONTINUE)
3444 goto fail;
3445
3446 ctxt->src.val = old_cs;
3447 rc = em_push(ctxt);
3448 if (rc != X86EMUL_CONTINUE)
3449 goto fail;
3450
3451 ctxt->src.val = old_eip;
3452 rc = em_push(ctxt);
3453
3454
3455 if (rc != X86EMUL_CONTINUE) {
3456 pr_warn_once("faulting far call emulation tainted memory\n");
3457 goto fail;
3458 }
3459 return rc;
3460fail:
3461 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3462 ctxt->mode = prev_mode;
3463 return rc;
3464
3465}
3466
3467static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3468{
3469 int rc;
3470 unsigned long eip;
3471
3472 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3473 if (rc != X86EMUL_CONTINUE)
3474 return rc;
3475 rc = assign_eip_near(ctxt, eip);
3476 if (rc != X86EMUL_CONTINUE)
3477 return rc;
3478 rsp_increment(ctxt, ctxt->src.val);
3479 return X86EMUL_CONTINUE;
3480}
3481
3482static int em_xchg(struct x86_emulate_ctxt *ctxt)
3483{
3484
3485 ctxt->src.val = ctxt->dst.val;
3486 write_register_operand(&ctxt->src);
3487
3488
3489 ctxt->dst.val = ctxt->src.orig_val;
3490 ctxt->lock_prefix = 1;
3491 return X86EMUL_CONTINUE;
3492}
3493
3494static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3495{
3496 ctxt->dst.val = ctxt->src2.val;
3497 return fastop(ctxt, em_imul);
3498}
3499
3500static int em_cwd(struct x86_emulate_ctxt *ctxt)
3501{
3502 ctxt->dst.type = OP_REG;
3503 ctxt->dst.bytes = ctxt->src.bytes;
3504 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3505 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3506
3507 return X86EMUL_CONTINUE;
3508}
3509
3510static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3511{
3512 u64 tsc_aux = 0;
3513
3514 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3515 return emulate_ud(ctxt);
3516 ctxt->dst.val = tsc_aux;
3517 return X86EMUL_CONTINUE;
3518}
3519
3520static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3521{
3522 u64 tsc = 0;
3523
3524 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3525 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3526 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3527 return X86EMUL_CONTINUE;
3528}
3529
3530static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3531{
3532 u64 pmc;
3533
3534 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3535 return emulate_gp(ctxt, 0);
3536 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3537 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3538 return X86EMUL_CONTINUE;
3539}
3540
3541static int em_mov(struct x86_emulate_ctxt *ctxt)
3542{
3543 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3544 return X86EMUL_CONTINUE;
3545}
3546
3547static int em_movbe(struct x86_emulate_ctxt *ctxt)
3548{
3549 u16 tmp;
3550
3551 if (!ctxt->ops->guest_has_movbe(ctxt))
3552 return emulate_ud(ctxt);
3553
3554 switch (ctxt->op_bytes) {
3555 case 2:
3556
3557
3558
3559
3560
3561
3562
3563
3564 tmp = (u16)ctxt->src.val;
3565 ctxt->dst.val &= ~0xffffUL;
3566 ctxt->dst.val |= (unsigned long)swab16(tmp);
3567 break;
3568 case 4:
3569 ctxt->dst.val = swab32((u32)ctxt->src.val);
3570 break;
3571 case 8:
3572 ctxt->dst.val = swab64(ctxt->src.val);
3573 break;
3574 default:
3575 BUG();
3576 }
3577 return X86EMUL_CONTINUE;
3578}
3579
3580static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3581{
3582 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3583 return emulate_gp(ctxt, 0);
3584
3585
3586 ctxt->dst.type = OP_NONE;
3587 return X86EMUL_CONTINUE;
3588}
3589
3590static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3591{
3592 unsigned long val;
3593
3594 if (ctxt->mode == X86EMUL_MODE_PROT64)
3595 val = ctxt->src.val & ~0ULL;
3596 else
3597 val = ctxt->src.val & ~0U;
3598
3599
3600 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3601 return emulate_gp(ctxt, 0);
3602
3603
3604 ctxt->dst.type = OP_NONE;
3605 return X86EMUL_CONTINUE;
3606}
3607
3608static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3609{
3610 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3611 u64 msr_data;
3612 int r;
3613
3614 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3615 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3616 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3617
3618 if (r == X86EMUL_IO_NEEDED)
3619 return r;
3620
3621 if (r > 0)
3622 return emulate_gp(ctxt, 0);
3623
3624 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3625}
3626
3627static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3628{
3629 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3630 u64 msr_data;
3631 int r;
3632
3633 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3634
3635 if (r == X86EMUL_IO_NEEDED)
3636 return r;
3637
3638 if (r)
3639 return emulate_gp(ctxt, 0);
3640
3641 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3642 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3643 return X86EMUL_CONTINUE;
3644}
3645
3646static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3647{
3648 if (segment > VCPU_SREG_GS &&
3649 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3650 ctxt->ops->cpl(ctxt) > 0)
3651 return emulate_gp(ctxt, 0);
3652
3653 ctxt->dst.val = get_segment_selector(ctxt, segment);
3654 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3655 ctxt->dst.bytes = 2;
3656 return X86EMUL_CONTINUE;
3657}
3658
3659static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3660{
3661 if (ctxt->modrm_reg > VCPU_SREG_GS)
3662 return emulate_ud(ctxt);
3663
3664 return em_store_sreg(ctxt, ctxt->modrm_reg);
3665}
3666
3667static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3668{
3669 u16 sel = ctxt->src.val;
3670
3671 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3672 return emulate_ud(ctxt);
3673
3674 if (ctxt->modrm_reg == VCPU_SREG_SS)
3675 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3676
3677
3678 ctxt->dst.type = OP_NONE;
3679 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3680}
3681
3682static int em_sldt(struct x86_emulate_ctxt *ctxt)
3683{
3684 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3685}
3686
3687static int em_lldt(struct x86_emulate_ctxt *ctxt)
3688{
3689 u16 sel = ctxt->src.val;
3690
3691
3692 ctxt->dst.type = OP_NONE;
3693 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3694}
3695
3696static int em_str(struct x86_emulate_ctxt *ctxt)
3697{
3698 return em_store_sreg(ctxt, VCPU_SREG_TR);
3699}
3700
3701static int em_ltr(struct x86_emulate_ctxt *ctxt)
3702{
3703 u16 sel = ctxt->src.val;
3704
3705
3706 ctxt->dst.type = OP_NONE;
3707 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3708}
3709
3710static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3711{
3712 int rc;
3713 ulong linear;
3714
3715 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3716 if (rc == X86EMUL_CONTINUE)
3717 ctxt->ops->invlpg(ctxt, linear);
3718
3719 ctxt->dst.type = OP_NONE;
3720 return X86EMUL_CONTINUE;
3721}
3722
3723static int em_clts(struct x86_emulate_ctxt *ctxt)
3724{
3725 ulong cr0;
3726
3727 cr0 = ctxt->ops->get_cr(ctxt, 0);
3728 cr0 &= ~X86_CR0_TS;
3729 ctxt->ops->set_cr(ctxt, 0, cr0);
3730 return X86EMUL_CONTINUE;
3731}
3732
3733static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3734{
3735 int rc = ctxt->ops->fix_hypercall(ctxt);
3736
3737 if (rc != X86EMUL_CONTINUE)
3738 return rc;
3739
3740
3741 ctxt->_eip = ctxt->eip;
3742
3743 ctxt->dst.type = OP_NONE;
3744 return X86EMUL_CONTINUE;
3745}
3746
3747static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3748 void (*get)(struct x86_emulate_ctxt *ctxt,
3749 struct desc_ptr *ptr))
3750{
3751 struct desc_ptr desc_ptr;
3752
3753 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3754 ctxt->ops->cpl(ctxt) > 0)
3755 return emulate_gp(ctxt, 0);
3756
3757 if (ctxt->mode == X86EMUL_MODE_PROT64)
3758 ctxt->op_bytes = 8;
3759 get(ctxt, &desc_ptr);
3760 if (ctxt->op_bytes == 2) {
3761 ctxt->op_bytes = 4;
3762 desc_ptr.address &= 0x00ffffff;
3763 }
3764
3765 ctxt->dst.type = OP_NONE;
3766 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3767 &desc_ptr, 2 + ctxt->op_bytes);
3768}
3769
3770static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3771{
3772 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3773}
3774
3775static int em_sidt(struct x86_emulate_ctxt *ctxt)
3776{
3777 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3778}
3779
3780static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3781{
3782 struct desc_ptr desc_ptr;
3783 int rc;
3784
3785 if (ctxt->mode == X86EMUL_MODE_PROT64)
3786 ctxt->op_bytes = 8;
3787 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3788 &desc_ptr.size, &desc_ptr.address,
3789 ctxt->op_bytes);
3790 if (rc != X86EMUL_CONTINUE)
3791 return rc;
3792 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3793 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3794 return emulate_gp(ctxt, 0);
3795 if (lgdt)
3796 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3797 else
3798 ctxt->ops->set_idt(ctxt, &desc_ptr);
3799
3800 ctxt->dst.type = OP_NONE;
3801 return X86EMUL_CONTINUE;
3802}
3803
3804static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3805{
3806 return em_lgdt_lidt(ctxt, true);
3807}
3808
3809static int em_lidt(struct x86_emulate_ctxt *ctxt)
3810{
3811 return em_lgdt_lidt(ctxt, false);
3812}
3813
3814static int em_smsw(struct x86_emulate_ctxt *ctxt)
3815{
3816 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3817 ctxt->ops->cpl(ctxt) > 0)
3818 return emulate_gp(ctxt, 0);
3819
3820 if (ctxt->dst.type == OP_MEM)
3821 ctxt->dst.bytes = 2;
3822 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3823 return X86EMUL_CONTINUE;
3824}
3825
3826static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3827{
3828 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3829 | (ctxt->src.val & 0x0f));
3830 ctxt->dst.type = OP_NONE;
3831 return X86EMUL_CONTINUE;
3832}
3833
3834static int em_loop(struct x86_emulate_ctxt *ctxt)
3835{
3836 int rc = X86EMUL_CONTINUE;
3837
3838 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3839 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3840 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3841 rc = jmp_rel(ctxt, ctxt->src.val);
3842
3843 return rc;
3844}
3845
3846static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3847{
3848 int rc = X86EMUL_CONTINUE;
3849
3850 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3851 rc = jmp_rel(ctxt, ctxt->src.val);
3852
3853 return rc;
3854}
3855
3856static int em_in(struct x86_emulate_ctxt *ctxt)
3857{
3858 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3859 &ctxt->dst.val))
3860 return X86EMUL_IO_NEEDED;
3861
3862 return X86EMUL_CONTINUE;
3863}
3864
3865static int em_out(struct x86_emulate_ctxt *ctxt)
3866{
3867 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3868 &ctxt->src.val, 1);
3869
3870 ctxt->dst.type = OP_NONE;
3871 return X86EMUL_CONTINUE;
3872}
3873
3874static int em_cli(struct x86_emulate_ctxt *ctxt)
3875{
3876 if (emulator_bad_iopl(ctxt))
3877 return emulate_gp(ctxt, 0);
3878
3879 ctxt->eflags &= ~X86_EFLAGS_IF;
3880 return X86EMUL_CONTINUE;
3881}
3882
3883static int em_sti(struct x86_emulate_ctxt *ctxt)
3884{
3885 if (emulator_bad_iopl(ctxt))
3886 return emulate_gp(ctxt, 0);
3887
3888 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3889 ctxt->eflags |= X86_EFLAGS_IF;
3890 return X86EMUL_CONTINUE;
3891}
3892
3893static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3894{
3895 u32 eax, ebx, ecx, edx;
3896 u64 msr = 0;
3897
3898 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3899 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3900 ctxt->ops->cpl(ctxt)) {
3901 return emulate_gp(ctxt, 0);
3902 }
3903
3904 eax = reg_read(ctxt, VCPU_REGS_RAX);
3905 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3906 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3907 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3908 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3909 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3910 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3911 return X86EMUL_CONTINUE;
3912}
3913
3914static int em_sahf(struct x86_emulate_ctxt *ctxt)
3915{
3916 u32 flags;
3917
3918 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3919 X86_EFLAGS_SF;
3920 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3921
3922 ctxt->eflags &= ~0xffUL;
3923 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3924 return X86EMUL_CONTINUE;
3925}
3926
3927static int em_lahf(struct x86_emulate_ctxt *ctxt)
3928{
3929 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3930 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3931 return X86EMUL_CONTINUE;
3932}
3933
3934static int em_bswap(struct x86_emulate_ctxt *ctxt)
3935{
3936 switch (ctxt->op_bytes) {
3937#ifdef CONFIG_X86_64
3938 case 8:
3939 asm("bswap %0" : "+r"(ctxt->dst.val));
3940 break;
3941#endif
3942 default:
3943 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3944 break;
3945 }
3946 return X86EMUL_CONTINUE;
3947}
3948
3949static int em_clflush(struct x86_emulate_ctxt *ctxt)
3950{
3951
3952 return X86EMUL_CONTINUE;
3953}
3954
3955static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3956{
3957
3958 return X86EMUL_CONTINUE;
3959}
3960
3961static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3962{
3963 ctxt->dst.val = (s32) ctxt->src.val;
3964 return X86EMUL_CONTINUE;
3965}
3966
3967static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3968{
3969 if (!ctxt->ops->guest_has_fxsr(ctxt))
3970 return emulate_ud(ctxt);
3971
3972 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3973 return emulate_nm(ctxt);
3974
3975
3976
3977
3978
3979 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3980 return X86EMUL_UNHANDLEABLE;
3981
3982 return X86EMUL_CONTINUE;
3983}
3984
3985
3986
3987
3988
3989static size_t __fxstate_size(int nregs)
3990{
3991 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
3992}
3993
3994static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
3995{
3996 bool cr4_osfxsr;
3997 if (ctxt->mode == X86EMUL_MODE_PROT64)
3998 return __fxstate_size(16);
3999
4000 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4001 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4002}
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4023{
4024 struct fxregs_state fx_state;
4025 int rc;
4026
4027 rc = check_fxsr(ctxt);
4028 if (rc != X86EMUL_CONTINUE)
4029 return rc;
4030
4031 kvm_fpu_get();
4032
4033 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4034
4035 kvm_fpu_put();
4036
4037 if (rc != X86EMUL_CONTINUE)
4038 return rc;
4039
4040 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4041 fxstate_size(ctxt));
4042}
4043
4044
4045
4046
4047
4048
4049
4050
4051static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4052 const size_t used_size)
4053{
4054 struct fxregs_state fx_tmp;
4055 int rc;
4056
4057 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4058 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4059 __fxstate_size(16) - used_size);
4060
4061 return rc;
4062}
4063
4064static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4065{
4066 struct fxregs_state fx_state;
4067 int rc;
4068 size_t size;
4069
4070 rc = check_fxsr(ctxt);
4071 if (rc != X86EMUL_CONTINUE)
4072 return rc;
4073
4074 size = fxstate_size(ctxt);
4075 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4076 if (rc != X86EMUL_CONTINUE)
4077 return rc;
4078
4079 kvm_fpu_get();
4080
4081 if (size < __fxstate_size(16)) {
4082 rc = fxregs_fixup(&fx_state, size);
4083 if (rc != X86EMUL_CONTINUE)
4084 goto out;
4085 }
4086
4087 if (fx_state.mxcsr >> 16) {
4088 rc = emulate_gp(ctxt, 0);
4089 goto out;
4090 }
4091
4092 if (rc == X86EMUL_CONTINUE)
4093 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4094
4095out:
4096 kvm_fpu_put();
4097
4098 return rc;
4099}
4100
4101static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4102{
4103 u32 eax, ecx, edx;
4104
4105 eax = reg_read(ctxt, VCPU_REGS_RAX);
4106 edx = reg_read(ctxt, VCPU_REGS_RDX);
4107 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4108
4109 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4110 return emulate_gp(ctxt, 0);
4111
4112 return X86EMUL_CONTINUE;
4113}
4114
4115static bool valid_cr(int nr)
4116{
4117 switch (nr) {
4118 case 0:
4119 case 2 ... 4:
4120 case 8:
4121 return true;
4122 default:
4123 return false;
4124 }
4125}
4126
4127static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4128{
4129 if (!valid_cr(ctxt->modrm_reg))
4130 return emulate_ud(ctxt);
4131
4132 return X86EMUL_CONTINUE;
4133}
4134
4135static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4136{
4137 unsigned long dr7;
4138
4139 ctxt->ops->get_dr(ctxt, 7, &dr7);
4140
4141
4142 return dr7 & (1 << 13);
4143}
4144
4145static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4146{
4147 int dr = ctxt->modrm_reg;
4148 u64 cr4;
4149
4150 if (dr > 7)
4151 return emulate_ud(ctxt);
4152
4153 cr4 = ctxt->ops->get_cr(ctxt, 4);
4154 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4155 return emulate_ud(ctxt);
4156
4157 if (check_dr7_gd(ctxt)) {
4158 ulong dr6;
4159
4160 ctxt->ops->get_dr(ctxt, 6, &dr6);
4161 dr6 &= ~DR_TRAP_BITS;
4162 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4163 ctxt->ops->set_dr(ctxt, 6, dr6);
4164 return emulate_db(ctxt);
4165 }
4166
4167 return X86EMUL_CONTINUE;
4168}
4169
4170static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4171{
4172 u64 new_val = ctxt->src.val64;
4173 int dr = ctxt->modrm_reg;
4174
4175 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4176 return emulate_gp(ctxt, 0);
4177
4178 return check_dr_read(ctxt);
4179}
4180
4181static int check_svme(struct x86_emulate_ctxt *ctxt)
4182{
4183 u64 efer = 0;
4184
4185 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4186
4187 if (!(efer & EFER_SVME))
4188 return emulate_ud(ctxt);
4189
4190 return X86EMUL_CONTINUE;
4191}
4192
4193static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4194{
4195 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4196
4197
4198 if (rax & 0xffff000000000000ULL)
4199 return emulate_gp(ctxt, 0);
4200
4201 return check_svme(ctxt);
4202}
4203
4204static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4205{
4206 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4207
4208 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4209 return emulate_ud(ctxt);
4210
4211 return X86EMUL_CONTINUE;
4212}
4213
4214static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4215{
4216 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4217 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4218
4219
4220
4221
4222
4223 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4224 return X86EMUL_CONTINUE;
4225
4226 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4227 ctxt->ops->check_pmc(ctxt, rcx))
4228 return emulate_gp(ctxt, 0);
4229
4230 return X86EMUL_CONTINUE;
4231}
4232
4233static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4234{
4235 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4236 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4237 return emulate_gp(ctxt, 0);
4238
4239 return X86EMUL_CONTINUE;
4240}
4241
4242static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4243{
4244 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4245 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4246 return emulate_gp(ctxt, 0);
4247
4248 return X86EMUL_CONTINUE;
4249}
4250
4251#define D(_y) { .flags = (_y) }
4252#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4253#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4254 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4255#define N D(NotImpl)
4256#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4257#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4258#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4259#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4260#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4261#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4262#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4263#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4264#define II(_f, _e, _i) \
4265 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4266#define IIP(_f, _e, _i, _p) \
4267 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4268 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4269#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4270
4271#define D2bv(_f) D((_f) | ByteOp), D(_f)
4272#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4273#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4274#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4275#define I2bvIP(_f, _e, _i, _p) \
4276 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4277
4278#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4279 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4280 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4281
4282static const struct opcode group7_rm0[] = {
4283 N,
4284 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4285 N, N, N, N, N, N,
4286};
4287
4288static const struct opcode group7_rm1[] = {
4289 DI(SrcNone | Priv, monitor),
4290 DI(SrcNone | Priv, mwait),
4291 N, N, N, N, N, N,
4292};
4293
4294static const struct opcode group7_rm2[] = {
4295 N,
4296 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4297 N, N, N, N, N, N,
4298};
4299
4300static const struct opcode group7_rm3[] = {
4301 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4302 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4303 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4304 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4305 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4306 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4307 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4308 DIP