1 /******************************************************************************
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
6 * Copyright (c) 2005 Keir Fraser
8 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9 * privileged instructions:
11 * Copyright (C) 2006 Qumranet
12 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
20 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 #include <linux/stringify.h>
36 #define OpImplicit 1ull /* No generic decode */
37 #define OpReg 2ull /* Register */
38 #define OpMem 3ull /* Memory */
39 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
40 #define OpDI 5ull /* ES:DI/EDI/RDI */
41 #define OpMem64 6ull /* Memory, 64-bit */
42 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
43 #define OpDX 8ull /* DX register */
44 #define OpCL 9ull /* CL register (for shifts) */
45 #define OpImmByte 10ull /* 8-bit sign extended immediate */
46 #define OpOne 11ull /* Implied 1 */
47 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
48 #define OpMem16 13ull /* Memory operand (16-bit). */
49 #define OpMem32 14ull /* Memory operand (32-bit). */
50 #define OpImmU 15ull /* Immediate operand, zero extended */
51 #define OpSI 16ull /* SI/ESI/RSI */
52 #define OpImmFAddr 17ull /* Immediate far address */
53 #define OpMemFAddr 18ull /* Far address in memory */
54 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
55 #define OpES 20ull /* ES */
56 #define OpCS 21ull /* CS */
57 #define OpSS 22ull /* SS */
58 #define OpDS 23ull /* DS */
59 #define OpFS 24ull /* FS */
60 #define OpGS 25ull /* GS */
61 #define OpMem8 26ull /* 8-bit zero extended memory operand */
62 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
63 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
64 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
65 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
67 #define OpBits 5 /* Width of operand field */
68 #define OpMask ((1ull << OpBits) - 1)
71 * Opcode effective-address decode tables.
72 * Note that we only emulate instructions that have at least one memory
73 * operand (excluding implicit stack references). We assume that stack
74 * references and instruction fetches will never occur in special memory
75 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
79 /* Operand sizes: 8-bit operands or specified/overridden size. */
80 #define ByteOp (1<<0) /* 8-bit operands. */
81 /* Destination operand type. */
83 #define ImplicitOps (OpImplicit << DstShift)
84 #define DstReg (OpReg << DstShift)
85 #define DstMem (OpMem << DstShift)
86 #define DstAcc (OpAcc << DstShift)
87 #define DstDI (OpDI << DstShift)
88 #define DstMem64 (OpMem64 << DstShift)
89 #define DstImmUByte (OpImmUByte << DstShift)
90 #define DstDX (OpDX << DstShift)
91 #define DstAccLo (OpAccLo << DstShift)
92 #define DstMask (OpMask << DstShift)
93 /* Source operand type. */
95 #define SrcNone (OpNone << SrcShift)
96 #define SrcReg (OpReg << SrcShift)
97 #define SrcMem (OpMem << SrcShift)
98 #define SrcMem16 (OpMem16 << SrcShift)
99 #define SrcMem32 (OpMem32 << SrcShift)
100 #define SrcImm (OpImm << SrcShift)
101 #define SrcImmByte (OpImmByte << SrcShift)
102 #define SrcOne (OpOne << SrcShift)
103 #define SrcImmUByte (OpImmUByte << SrcShift)
104 #define SrcImmU (OpImmU << SrcShift)
105 #define SrcSI (OpSI << SrcShift)
106 #define SrcXLat (OpXLat << SrcShift)
107 #define SrcImmFAddr (OpImmFAddr << SrcShift)
108 #define SrcMemFAddr (OpMemFAddr << SrcShift)
109 #define SrcAcc (OpAcc << SrcShift)
110 #define SrcImmU16 (OpImmU16 << SrcShift)
111 #define SrcImm64 (OpImm64 << SrcShift)
112 #define SrcDX (OpDX << SrcShift)
113 #define SrcMem8 (OpMem8 << SrcShift)
114 #define SrcAccHi (OpAccHi << SrcShift)
115 #define SrcMask (OpMask << SrcShift)
116 #define BitOp (1<<11)
117 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
118 #define String (1<<13) /* String instruction (rep capable) */
119 #define Stack (1<<14) /* Stack instruction (push/pop) */
120 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
121 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
122 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
123 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125 #define Escape (5<<15) /* Escape to coprocessor instruction */
126 #define Sse (1<<18) /* SSE Vector instruction */
127 /* Generic ModRM decode. */
128 #define ModRM (1<<19)
129 /* Destination is only written; never read. */
132 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
133 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
134 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
135 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
136 #define Undefined (1<<25) /* No Such Instruction */
137 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
138 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
140 #define PageTable (1 << 29) /* instruction used to write page table */
141 #define NotImpl (1 << 30) /* instruction is not implemented */
142 /* Source 2 operand type */
143 #define Src2Shift (31)
144 #define Src2None (OpNone << Src2Shift)
145 #define Src2Mem (OpMem << Src2Shift)
146 #define Src2CL (OpCL << Src2Shift)
147 #define Src2ImmByte (OpImmByte << Src2Shift)
148 #define Src2One (OpOne << Src2Shift)
149 #define Src2Imm (OpImm << Src2Shift)
150 #define Src2ES (OpES << Src2Shift)
151 #define Src2CS (OpCS << Src2Shift)
152 #define Src2SS (OpSS << Src2Shift)
153 #define Src2DS (OpDS << Src2Shift)
154 #define Src2FS (OpFS << Src2Shift)
155 #define Src2GS (OpGS << Src2Shift)
156 #define Src2Mask (OpMask << Src2Shift)
157 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
158 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
159 #define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
160 #define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
161 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
162 #define NoWrite ((u64)1 << 45) /* No writeback */
163 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
164 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
165 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
166 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167 #define NoBigReal ((u64)1 << 50) /* No big real mode */
168 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
170 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
172 #define X2(x...) x, x
173 #define X3(x...) X2(x), x
174 #define X4(x...) X2(x), X2(x)
175 #define X5(x...) X4(x), x
176 #define X6(x...) X4(x), X2(x)
177 #define X7(x...) X4(x), X3(x)
178 #define X8(x...) X4(x), X4(x)
179 #define X16(x...) X8(x), X8(x)
181 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
182 #define FASTOP_SIZE 8
185 * fastop functions have a special calling convention:
190 * flags: rflags (in/out)
191 * ex: rsi (in:fastop pointer, out:zero if exception)
193 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
194 * different operand sizes can be reached by calculation, rather than a jump
195 * table (which would be bigger than the code).
197 * fastop functions are declared as taking a never-defined fastop parameter,
198 * so they can't be called from C directly.
207 int (*execute)(struct x86_emulate_ctxt *ctxt);
208 const struct opcode *group;
209 const struct group_dual *gdual;
210 const struct gprefix *gprefix;
211 const struct escape *esc;
212 void (*fastop)(struct fastop *fake);
214 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
218 struct opcode mod012[8];
219 struct opcode mod3[8];
223 struct opcode pfx_no;
224 struct opcode pfx_66;
225 struct opcode pfx_f2;
226 struct opcode pfx_f3;
231 struct opcode high[64];
234 /* EFLAGS bit definitions. */
235 #define EFLG_ID (1<<21)
236 #define EFLG_VIP (1<<20)
237 #define EFLG_VIF (1<<19)
238 #define EFLG_AC (1<<18)
239 #define EFLG_VM (1<<17)
240 #define EFLG_RF (1<<16)
241 #define EFLG_IOPL (3<<12)
242 #define EFLG_NT (1<<14)
243 #define EFLG_OF (1<<11)
244 #define EFLG_DF (1<<10)
245 #define EFLG_IF (1<<9)
246 #define EFLG_TF (1<<8)
247 #define EFLG_SF (1<<7)
248 #define EFLG_ZF (1<<6)
249 #define EFLG_AF (1<<4)
250 #define EFLG_PF (1<<2)
251 #define EFLG_CF (1<<0)
253 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
254 #define EFLG_RESERVED_ONE_MASK 2
256 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
258 if (!(ctxt->regs_valid & (1 << nr))) {
259 ctxt->regs_valid |= 1 << nr;
260 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
262 return ctxt->_regs[nr];
265 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
267 ctxt->regs_valid |= 1 << nr;
268 ctxt->regs_dirty |= 1 << nr;
269 return &ctxt->_regs[nr];
272 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
275 return reg_write(ctxt, nr);
278 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
282 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
283 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
286 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
288 ctxt->regs_dirty = 0;
289 ctxt->regs_valid = 0;
293 * These EFLAGS bits are restored from saved value during emulation, and
294 * any changes are written back to the saved value after emulation.
296 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
304 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
306 #define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
307 #define FOP_RET "ret \n\t"
309 #define FOP_START(op) \
310 extern void em_##op(struct fastop *fake); \
311 asm(".pushsection .text, \"ax\" \n\t" \
312 ".global em_" #op " \n\t" \
319 #define FOPNOP() FOP_ALIGN FOP_RET
321 #define FOP1E(op, dst) \
322 FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
324 #define FOP1EEX(op, dst) \
325 FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
327 #define FASTOP1(op) \
332 ON64(FOP1E(op##q, rax)) \
335 /* 1-operand, using src2 (for MUL/DIV r/m) */
336 #define FASTOP1SRC2(op, name) \
341 ON64(FOP1E(op, rcx)) \
344 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
345 #define FASTOP1SRC2EX(op, name) \
350 ON64(FOP1EEX(op, rcx)) \
353 #define FOP2E(op, dst, src) \
354 FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
356 #define FASTOP2(op) \
358 FOP2E(op##b, al, dl) \
359 FOP2E(op##w, ax, dx) \
360 FOP2E(op##l, eax, edx) \
361 ON64(FOP2E(op##q, rax, rdx)) \
364 /* 2 operand, word only */
365 #define FASTOP2W(op) \
368 FOP2E(op##w, ax, dx) \
369 FOP2E(op##l, eax, edx) \
370 ON64(FOP2E(op##q, rax, rdx)) \
373 /* 2 operand, src is CL */
374 #define FASTOP2CL(op) \
376 FOP2E(op##b, al, cl) \
377 FOP2E(op##w, ax, cl) \
378 FOP2E(op##l, eax, cl) \
379 ON64(FOP2E(op##q, rax, cl)) \
382 #define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
385 /* 3-operand, word-only, src2=cl */
386 #define FASTOP3WCL(op) \
389 FOP3E(op##w, ax, dx, cl) \
390 FOP3E(op##l, eax, edx, cl) \
391 ON64(FOP3E(op##q, rax, rdx, cl)) \
394 /* Special case for SETcc - 1 instruction per cc */
395 #define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
397 asm(".global kvm_fastop_exception \n"
398 "kvm_fastop_exception: xor %esi, %esi; ret");
419 FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
422 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
423 enum x86_intercept intercept,
424 enum x86_intercept_stage stage)
426 struct x86_instruction_info info = {
427 .intercept = intercept,
428 .rep_prefix = ctxt->rep_prefix,
429 .modrm_mod = ctxt->modrm_mod,
430 .modrm_reg = ctxt->modrm_reg,
431 .modrm_rm = ctxt->modrm_rm,
432 .src_val = ctxt->src.val64,
433 .dst_val = ctxt->dst.val64,
434 .src_bytes = ctxt->src.bytes,
435 .dst_bytes = ctxt->dst.bytes,
436 .ad_bytes = ctxt->ad_bytes,
437 .next_rip = ctxt->eip,
440 return ctxt->ops->intercept(ctxt, &info, stage);
443 static void assign_masked(ulong *dest, ulong src, ulong mask)
445 *dest = (*dest & ~mask) | (src & mask);
448 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
450 return (1UL << (ctxt->ad_bytes << 3)) - 1;
453 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
456 struct desc_struct ss;
458 if (ctxt->mode == X86EMUL_MODE_PROT64)
460 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
461 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
464 static int stack_size(struct x86_emulate_ctxt *ctxt)
466 return (__fls(stack_mask(ctxt)) + 1) >> 3;
469 /* Access/update address held in a register, based on addressing mode. */
470 static inline unsigned long
471 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
473 if (ctxt->ad_bytes == sizeof(unsigned long))
476 return reg & ad_mask(ctxt);
479 static inline unsigned long
480 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
482 return address_mask(ctxt, reg);
485 static void masked_increment(ulong *reg, ulong mask, int inc)
487 assign_masked(reg, *reg + inc, mask);
491 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
495 if (ctxt->ad_bytes == sizeof(unsigned long))
498 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc);
502 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
507 static u32 desc_limit_scaled(struct desc_struct *desc)
509 u32 limit = get_desc_limit(desc);
511 return desc->g ? (limit << 12) | 0xfff : limit;
514 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
516 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
519 return ctxt->ops->get_cached_segment_base(ctxt, seg);
522 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
523 u32 error, bool valid)
526 ctxt->exception.vector = vec;
527 ctxt->exception.error_code = error;
528 ctxt->exception.error_code_valid = valid;
529 return X86EMUL_PROPAGATE_FAULT;
532 static int emulate_db(struct x86_emulate_ctxt *ctxt)
534 return emulate_exception(ctxt, DB_VECTOR, 0, false);
537 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
539 return emulate_exception(ctxt, GP_VECTOR, err, true);
542 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
544 return emulate_exception(ctxt, SS_VECTOR, err, true);
547 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
549 return emulate_exception(ctxt, UD_VECTOR, 0, false);
552 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
554 return emulate_exception(ctxt, TS_VECTOR, err, true);
557 static int emulate_de(struct x86_emulate_ctxt *ctxt)
559 return emulate_exception(ctxt, DE_VECTOR, 0, false);
562 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
564 return emulate_exception(ctxt, NM_VECTOR, 0, false);
567 static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
570 switch (ctxt->op_bytes) {
572 ctxt->_eip = (u16)dst;
575 ctxt->_eip = (u32)dst;
579 if ((cs_l && is_noncanonical_address(dst)) ||
580 (!cs_l && (dst >> 32) != 0))
581 return emulate_gp(ctxt, 0);
586 WARN(1, "unsupported eip assignment size\n");
588 return X86EMUL_CONTINUE;
591 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
593 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
596 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
598 return assign_eip_near(ctxt, ctxt->_eip + rel);
601 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
604 struct desc_struct desc;
606 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
610 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
615 struct desc_struct desc;
617 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
618 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
622 * x86 defines three classes of vector instructions: explicitly
623 * aligned, explicitly unaligned, and the rest, which change behaviour
624 * depending on whether they're AVX encoded or not.
626 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
627 * subject to the same check.
629 static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
631 if (likely(size < 16))
634 if (ctxt->d & Aligned)
636 else if (ctxt->d & Unaligned)
638 else if (ctxt->d & Avx)
644 static int __linearize(struct x86_emulate_ctxt *ctxt,
645 struct segmented_address addr,
646 unsigned *max_size, unsigned size,
647 bool write, bool fetch,
650 struct desc_struct desc;
657 la = seg_base(ctxt, addr.seg) + addr.ea;
659 switch (ctxt->mode) {
660 case X86EMUL_MODE_PROT64:
661 if (is_noncanonical_address(la))
662 return emulate_gp(ctxt, 0);
664 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
665 if (size > *max_size)
669 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
673 /* code segment in protected mode or read-only data segment */
674 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
675 || !(desc.type & 2)) && write)
677 /* unreadable code segment */
678 if (!fetch && (desc.type & 8) && !(desc.type & 2))
680 lim = desc_limit_scaled(&desc);
681 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
682 (ctxt->d & NoBigReal)) {
683 /* la is between zero and 0xffff */
686 *max_size = 0x10000 - la;
687 } else if ((desc.type & 8) || !(desc.type & 4)) {
688 /* expand-up segment */
691 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
693 /* expand-down segment */
696 lim = desc.d ? 0xffffffff : 0xffff;
699 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
701 if (size > *max_size)
703 cpl = ctxt->ops->cpl(ctxt);
704 if (!(desc.type & 8)) {
708 } else if ((desc.type & 8) && !(desc.type & 4)) {
709 /* nonconforming code segment */
712 } else if ((desc.type & 8) && (desc.type & 4)) {
713 /* conforming code segment */
719 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
721 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
722 return emulate_gp(ctxt, 0);
724 return X86EMUL_CONTINUE;
726 if (addr.seg == VCPU_SREG_SS)
727 return emulate_ss(ctxt, 0);
729 return emulate_gp(ctxt, 0);
732 static int linearize(struct x86_emulate_ctxt *ctxt,
733 struct segmented_address addr,
734 unsigned size, bool write,
738 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
742 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
743 struct segmented_address addr,
750 rc = linearize(ctxt, addr, size, false, &linear);
751 if (rc != X86EMUL_CONTINUE)
753 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
757 * Prefetch the remaining bytes of the instruction without crossing page
758 * boundary if they are not in fetch_cache yet.
760 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
763 unsigned size, max_size;
764 unsigned long linear;
765 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
767 .ea = ctxt->eip + cur_size };
770 * We do not know exactly how many bytes will be needed, and
771 * __linearize is expensive, so fetch as much as possible. We
772 * just have to avoid going beyond the 15 byte limit, the end
773 * of the segment, or the end of the page.
775 * __linearize is called with size 0 so that it does not do any
776 * boundary check itself. Instead, we use max_size to check
779 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
780 if (unlikely(rc != X86EMUL_CONTINUE))
783 size = min_t(unsigned, 15UL ^ cur_size, max_size);
784 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
787 * One instruction can only straddle two pages,
788 * and one has been loaded at the beginning of
789 * x86_decode_insn. So, if not enough bytes
790 * still, we must have hit the 15-byte boundary.
792 if (unlikely(size < op_size))
793 return emulate_gp(ctxt, 0);
795 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
796 size, &ctxt->exception);
797 if (unlikely(rc != X86EMUL_CONTINUE))
799 ctxt->fetch.end += size;
800 return X86EMUL_CONTINUE;
803 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
806 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
808 if (unlikely(done_size < size))
809 return __do_insn_fetch_bytes(ctxt, size - done_size);
811 return X86EMUL_CONTINUE;
814 /* Fetch next part of the instruction being emulated. */
815 #define insn_fetch(_type, _ctxt) \
818 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
819 if (rc != X86EMUL_CONTINUE) \
821 ctxt->_eip += sizeof(_type); \
822 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
823 ctxt->fetch.ptr += sizeof(_type); \
827 #define insn_fetch_arr(_arr, _size, _ctxt) \
829 rc = do_insn_fetch_bytes(_ctxt, _size); \
830 if (rc != X86EMUL_CONTINUE) \
832 ctxt->_eip += (_size); \
833 memcpy(_arr, ctxt->fetch.ptr, _size); \
834 ctxt->fetch.ptr += (_size); \
838 * Given the 'reg' portion of a ModRM byte, and a register block, return a
839 * pointer into the block that addresses the relevant register.
840 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
842 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
846 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
848 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
849 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
851 p = reg_rmw(ctxt, modrm_reg);
855 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
856 struct segmented_address addr,
857 u16 *size, unsigned long *address, int op_bytes)
864 rc = segmented_read_std(ctxt, addr, size, 2);
865 if (rc != X86EMUL_CONTINUE)
868 rc = segmented_read_std(ctxt, addr, address, op_bytes);
882 FASTOP1SRC2(mul, mul_ex);
883 FASTOP1SRC2(imul, imul_ex);
884 FASTOP1SRC2EX(div, div_ex);
885 FASTOP1SRC2EX(idiv, idiv_ex);
914 static u8 test_cc(unsigned int condition, unsigned long flags)
917 void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
919 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
920 asm("push %[flags]; popf; call *%[fastop]"
921 : "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
925 static void fetch_register_operand(struct operand *op)
929 op->val = *(u8 *)op->addr.reg;
932 op->val = *(u16 *)op->addr.reg;
935 op->val = *(u32 *)op->addr.reg;
938 op->val = *(u64 *)op->addr.reg;
943 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
945 ctxt->ops->get_fpu(ctxt);
947 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
948 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
949 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
950 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
951 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
952 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
953 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
954 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
956 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
957 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
958 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
959 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
960 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
961 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
962 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
963 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
967 ctxt->ops->put_fpu(ctxt);
970 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
973 ctxt->ops->get_fpu(ctxt);
975 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
976 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
977 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
978 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
979 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
980 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
981 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
982 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
984 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
985 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
986 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
987 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
988 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
989 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
990 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
991 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
995 ctxt->ops->put_fpu(ctxt);
998 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1000 ctxt->ops->get_fpu(ctxt);
1002 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
1003 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
1004 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
1005 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
1006 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
1007 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
1008 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
1009 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
1012 ctxt->ops->put_fpu(ctxt);
1015 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
1017 ctxt->ops->get_fpu(ctxt);
1019 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
1020 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
1021 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
1022 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
1023 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
1024 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
1025 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
1026 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
1029 ctxt->ops->put_fpu(ctxt);
1032 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1034 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1035 return emulate_nm(ctxt);
1037 ctxt->ops->get_fpu(ctxt);
1038 asm volatile("fninit");
1039 ctxt->ops->put_fpu(ctxt);
1040 return X86EMUL_CONTINUE;
1043 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1047 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1048 return emulate_nm(ctxt);
1050 ctxt->ops->get_fpu(ctxt);
1051 asm volatile("fnstcw %0": "+m"(fcw));
1052 ctxt->ops->put_fpu(ctxt);
1054 /* force 2 byte destination */
1055 ctxt->dst.bytes = 2;
1056 ctxt->dst.val = fcw;
1058 return X86EMUL_CONTINUE;
1061 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1065 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1066 return emulate_nm(ctxt);
1068 ctxt->ops->get_fpu(ctxt);
1069 asm volatile("fnstsw %0": "+m"(fsw));
1070 ctxt->ops->put_fpu(ctxt);
1072 /* force 2 byte destination */
1073 ctxt->dst.bytes = 2;
1074 ctxt->dst.val = fsw;
1076 return X86EMUL_CONTINUE;
1079 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1082 unsigned reg = ctxt->modrm_reg;
1084 if (!(ctxt->d & ModRM))
1085 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1087 if (ctxt->d & Sse) {
1091 read_sse_reg(ctxt, &op->vec_val, reg);
1094 if (ctxt->d & Mmx) {
1103 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1104 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1106 fetch_register_operand(op);
1107 op->orig_val = op->val;
1110 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1112 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1113 ctxt->modrm_seg = VCPU_SREG_SS;
1116 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1120 int index_reg, base_reg, scale;
1121 int rc = X86EMUL_CONTINUE;
1124 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1125 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1126 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1128 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1129 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1130 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1131 ctxt->modrm_seg = VCPU_SREG_DS;
1133 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1135 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1136 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1138 if (ctxt->d & Sse) {
1141 op->addr.xmm = ctxt->modrm_rm;
1142 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
1145 if (ctxt->d & Mmx) {
1148 op->addr.mm = ctxt->modrm_rm & 7;
1151 fetch_register_operand(op);
1157 if (ctxt->ad_bytes == 2) {
1158 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1159 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1160 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1161 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1163 /* 16-bit ModR/M decode. */
1164 switch (ctxt->modrm_mod) {
1166 if (ctxt->modrm_rm == 6)
1167 modrm_ea += insn_fetch(u16, ctxt);
1170 modrm_ea += insn_fetch(s8, ctxt);
1173 modrm_ea += insn_fetch(u16, ctxt);
1176 switch (ctxt->modrm_rm) {
1178 modrm_ea += bx + si;
1181 modrm_ea += bx + di;
1184 modrm_ea += bp + si;
1187 modrm_ea += bp + di;
1196 if (ctxt->modrm_mod != 0)
1203 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1204 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1205 ctxt->modrm_seg = VCPU_SREG_SS;
1206 modrm_ea = (u16)modrm_ea;
1208 /* 32/64-bit ModR/M decode. */
1209 if ((ctxt->modrm_rm & 7) == 4) {
1210 sib = insn_fetch(u8, ctxt);
1211 index_reg |= (sib >> 3) & 7;
1212 base_reg |= sib & 7;
1215 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1216 modrm_ea += insn_fetch(s32, ctxt);
1218 modrm_ea += reg_read(ctxt, base_reg);
1219 adjust_modrm_seg(ctxt, base_reg);
1222 modrm_ea += reg_read(ctxt, index_reg) << scale;
1223 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1224 if (ctxt->mode == X86EMUL_MODE_PROT64)
1225 ctxt->rip_relative = 1;
1227 base_reg = ctxt->modrm_rm;
1228 modrm_ea += reg_read(ctxt, base_reg);
1229 adjust_modrm_seg(ctxt, base_reg);
1231 switch (ctxt->modrm_mod) {
1233 if (ctxt->modrm_rm == 5)
1234 modrm_ea += insn_fetch(s32, ctxt);
1237 modrm_ea += insn_fetch(s8, ctxt);
1240 modrm_ea += insn_fetch(s32, ctxt);
1244 op->addr.mem.ea = modrm_ea;
1245 if (ctxt->ad_bytes != 8)
1246 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1252 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1255 int rc = X86EMUL_CONTINUE;
1258 switch (ctxt->ad_bytes) {
1260 op->addr.mem.ea = insn_fetch(u16, ctxt);
1263 op->addr.mem.ea = insn_fetch(u32, ctxt);
1266 op->addr.mem.ea = insn_fetch(u64, ctxt);
1273 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1277 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1278 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1280 if (ctxt->src.bytes == 2)
1281 sv = (s16)ctxt->src.val & (s16)mask;
1282 else if (ctxt->src.bytes == 4)
1283 sv = (s32)ctxt->src.val & (s32)mask;
1285 sv = (s64)ctxt->src.val & (s64)mask;
1287 ctxt->dst.addr.mem.ea += (sv >> 3);
1290 /* only subword offset */
1291 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1294 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1295 unsigned long addr, void *dest, unsigned size)
1298 struct read_cache *mc = &ctxt->mem_read;
1300 if (mc->pos < mc->end)
1303 WARN_ON((mc->end + size) >= sizeof(mc->data));
1305 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1307 if (rc != X86EMUL_CONTINUE)
1313 memcpy(dest, mc->data + mc->pos, size);
1315 return X86EMUL_CONTINUE;
1318 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1319 struct segmented_address addr,
1326 rc = linearize(ctxt, addr, size, false, &linear);
1327 if (rc != X86EMUL_CONTINUE)
1329 return read_emulated(ctxt, linear, data, size);
1332 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1333 struct segmented_address addr,
1340 rc = linearize(ctxt, addr, size, true, &linear);
1341 if (rc != X86EMUL_CONTINUE)
1343 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1347 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1348 struct segmented_address addr,
1349 const void *orig_data, const void *data,
1355 rc = linearize(ctxt, addr, size, true, &linear);
1356 if (rc != X86EMUL_CONTINUE)
1358 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1359 size, &ctxt->exception);
1362 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1363 unsigned int size, unsigned short port,
1366 struct read_cache *rc = &ctxt->io_read;
1368 if (rc->pos == rc->end) { /* refill pio read ahead */
1369 unsigned int in_page, n;
1370 unsigned int count = ctxt->rep_prefix ?
1371 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1372 in_page = (ctxt->eflags & EFLG_DF) ?
1373 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1374 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1375 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1378 rc->pos = rc->end = 0;
1379 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1384 if (ctxt->rep_prefix && (ctxt->d & String) &&
1385 !(ctxt->eflags & EFLG_DF)) {
1386 ctxt->dst.data = rc->data + rc->pos;
1387 ctxt->dst.type = OP_MEM_STR;
1388 ctxt->dst.count = (rc->end - rc->pos) / size;
1391 memcpy(dest, rc->data + rc->pos, size);
1397 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1398 u16 index, struct desc_struct *desc)
1403 ctxt->ops->get_idt(ctxt, &dt);
1405 if (dt.size < index * 8 + 7)
1406 return emulate_gp(ctxt, index << 3 | 0x2);
1408 addr = dt.address + index * 8;
1409 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1413 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1414 u16 selector, struct desc_ptr *dt)
1416 const struct x86_emulate_ops *ops = ctxt->ops;
1419 if (selector & 1 << 2) {
1420 struct desc_struct desc;
1423 memset (dt, 0, sizeof *dt);
1424 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1428 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1429 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1431 ops->get_gdt(ctxt, dt);
1434 /* allowed just for 8 bytes segments */
1435 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1436 u16 selector, struct desc_struct *desc,
1440 u16 index = selector >> 3;
1443 get_descriptor_table_ptr(ctxt, selector, &dt);
1445 if (dt.size < index * 8 + 7)
1446 return emulate_gp(ctxt, selector & 0xfffc);
1448 *desc_addr_p = addr = dt.address + index * 8;
1449 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1453 /* allowed just for 8 bytes segments */
1454 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1455 u16 selector, struct desc_struct *desc)
1458 u16 index = selector >> 3;
1461 get_descriptor_table_ptr(ctxt, selector, &dt);
1463 if (dt.size < index * 8 + 7)
1464 return emulate_gp(ctxt, selector & 0xfffc);
1466 addr = dt.address + index * 8;
1467 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1471 /* Does not support long mode */
1472 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1473 u16 selector, int seg, u8 cpl,
1474 bool in_task_switch,
1475 struct desc_struct *desc)
1477 struct desc_struct seg_desc, old_desc;
1479 unsigned err_vec = GP_VECTOR;
1481 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1487 memset(&seg_desc, 0, sizeof seg_desc);
1489 if (ctxt->mode == X86EMUL_MODE_REAL) {
1490 /* set real mode segment descriptor (keep limit etc. for
1492 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1493 set_desc_base(&seg_desc, selector << 4);
1495 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1496 /* VM86 needs a clean new segment descriptor */
1497 set_desc_base(&seg_desc, selector << 4);
1498 set_desc_limit(&seg_desc, 0xffff);
1508 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1509 if ((seg == VCPU_SREG_CS
1510 || (seg == VCPU_SREG_SS
1511 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1512 || seg == VCPU_SREG_TR)
1516 /* TR should be in GDT only */
1517 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1520 if (null_selector) /* for NULL selector skip all following checks */
1523 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1524 if (ret != X86EMUL_CONTINUE)
1527 err_code = selector & 0xfffc;
1528 err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
1530 /* can't load system descriptor into segment selector */
1531 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1535 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1544 * segment is not a writable data segment or segment
1545 * selector's RPL != CPL or segment selector's RPL != CPL
1547 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1551 if (!(seg_desc.type & 8))
1554 if (seg_desc.type & 4) {
1560 if (rpl > cpl || dpl != cpl)
1563 /* in long-mode d/b must be clear if l is set */
1564 if (seg_desc.d && seg_desc.l) {
1567 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1568 if (efer & EFER_LMA)
1572 /* CS(RPL) <- CPL */
1573 selector = (selector & 0xfffc) | cpl;
1576 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1578 old_desc = seg_desc;
1579 seg_desc.type |= 2; /* busy */
1580 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1581 sizeof(seg_desc), &ctxt->exception);
1582 if (ret != X86EMUL_CONTINUE)
1585 case VCPU_SREG_LDTR:
1586 if (seg_desc.s || seg_desc.type != 2)
1589 default: /* DS, ES, FS, or GS */
1591 * segment is not a data or readable code segment or
1592 * ((segment is a data or nonconforming code segment)
1593 * and (both RPL and CPL > DPL))
1595 if ((seg_desc.type & 0xa) == 0x8 ||
1596 (((seg_desc.type & 0xc) != 0xc) &&
1597 (rpl > dpl && cpl > dpl)))
1603 /* mark segment as accessed */
1605 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1606 if (ret != X86EMUL_CONTINUE)
1608 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1609 ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
1610 sizeof(base3), &ctxt->exception);
1611 if (ret != X86EMUL_CONTINUE)
1615 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1618 return X86EMUL_CONTINUE;
1620 return emulate_exception(ctxt, err_vec, err_code, true);
1623 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1624 u16 selector, int seg)
1626 u8 cpl = ctxt->ops->cpl(ctxt);
1627 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1630 static void write_register_operand(struct operand *op)
1632 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1633 switch (op->bytes) {
1635 *(u8 *)op->addr.reg = (u8)op->val;
1638 *(u16 *)op->addr.reg = (u16)op->val;
1641 *op->addr.reg = (u32)op->val;
1642 break; /* 64b: zero-extend */
1644 *op->addr.reg = op->val;
1649 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1653 write_register_operand(op);
1656 if (ctxt->lock_prefix)
1657 return segmented_cmpxchg(ctxt,
1663 return segmented_write(ctxt,
1669 return segmented_write(ctxt,
1672 op->bytes * op->count);
1675 write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
1678 write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
1686 return X86EMUL_CONTINUE;
1689 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1691 struct segmented_address addr;
1693 rsp_increment(ctxt, -bytes);
1694 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1695 addr.seg = VCPU_SREG_SS;
1697 return segmented_write(ctxt, addr, data, bytes);
1700 static int em_push(struct x86_emulate_ctxt *ctxt)
1702 /* Disable writeback. */
1703 ctxt->dst.type = OP_NONE;
1704 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1707 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1708 void *dest, int len)
1711 struct segmented_address addr;
1713 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1714 addr.seg = VCPU_SREG_SS;
1715 rc = segmented_read(ctxt, addr, dest, len);
1716 if (rc != X86EMUL_CONTINUE)
1719 rsp_increment(ctxt, len);
1723 static int em_pop(struct x86_emulate_ctxt *ctxt)
1725 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1728 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1729 void *dest, int len)
1732 unsigned long val, change_mask;
1733 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1734 int cpl = ctxt->ops->cpl(ctxt);
1736 rc = emulate_pop(ctxt, &val, len);
1737 if (rc != X86EMUL_CONTINUE)
1740 change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1741 | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
1743 switch(ctxt->mode) {
1744 case X86EMUL_MODE_PROT64:
1745 case X86EMUL_MODE_PROT32:
1746 case X86EMUL_MODE_PROT16:
1748 change_mask |= EFLG_IOPL;
1750 change_mask |= EFLG_IF;
1752 case X86EMUL_MODE_VM86:
1754 return emulate_gp(ctxt, 0);
1755 change_mask |= EFLG_IF;
1757 default: /* real mode */
1758 change_mask |= (EFLG_IOPL | EFLG_IF);
1762 *(unsigned long *)dest =
1763 (ctxt->eflags & ~change_mask) | (val & change_mask);
1768 static int em_popf(struct x86_emulate_ctxt *ctxt)
1770 ctxt->dst.type = OP_REG;
1771 ctxt->dst.addr.reg = &ctxt->eflags;
1772 ctxt->dst.bytes = ctxt->op_bytes;
1773 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1776 static int em_enter(struct x86_emulate_ctxt *ctxt)
1779 unsigned frame_size = ctxt->src.val;
1780 unsigned nesting_level = ctxt->src2.val & 31;
1784 return X86EMUL_UNHANDLEABLE;
1786 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1787 rc = push(ctxt, &rbp, stack_size(ctxt));
1788 if (rc != X86EMUL_CONTINUE)
1790 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1792 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1793 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1795 return X86EMUL_CONTINUE;
1798 static int em_leave(struct x86_emulate_ctxt *ctxt)
1800 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1802 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1805 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1807 int seg = ctxt->src2.val;
1809 ctxt->src.val = get_segment_selector(ctxt, seg);
1811 return em_push(ctxt);
1814 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1816 int seg = ctxt->src2.val;
1817 unsigned long selector;
1820 rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1821 if (rc != X86EMUL_CONTINUE)
1824 if (ctxt->modrm_reg == VCPU_SREG_SS)
1825 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1827 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1831 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1833 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1834 int rc = X86EMUL_CONTINUE;
1835 int reg = VCPU_REGS_RAX;
1837 while (reg <= VCPU_REGS_RDI) {
1838 (reg == VCPU_REGS_RSP) ?
1839 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1842 if (rc != X86EMUL_CONTINUE)
1851 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1853 ctxt->src.val = (unsigned long)ctxt->eflags;
1854 return em_push(ctxt);
1857 static int em_popa(struct x86_emulate_ctxt *ctxt)
1859 int rc = X86EMUL_CONTINUE;
1860 int reg = VCPU_REGS_RDI;
1862 while (reg >= VCPU_REGS_RAX) {
1863 if (reg == VCPU_REGS_RSP) {
1864 rsp_increment(ctxt, ctxt->op_bytes);
1868 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1869 if (rc != X86EMUL_CONTINUE)
1876 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1878 const struct x86_emulate_ops *ops = ctxt->ops;
1885 /* TODO: Add limit checks */
1886 ctxt->src.val = ctxt->eflags;
1888 if (rc != X86EMUL_CONTINUE)
1891 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1893 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1895 if (rc != X86EMUL_CONTINUE)
1898 ctxt->src.val = ctxt->_eip;
1900 if (rc != X86EMUL_CONTINUE)
1903 ops->get_idt(ctxt, &dt);
1905 eip_addr = dt.address + (irq << 2);
1906 cs_addr = dt.address + (irq << 2) + 2;
1908 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1909 if (rc != X86EMUL_CONTINUE)
1912 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1913 if (rc != X86EMUL_CONTINUE)
1916 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1917 if (rc != X86EMUL_CONTINUE)
1925 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1929 invalidate_registers(ctxt);
1930 rc = __emulate_int_real(ctxt, irq);
1931 if (rc == X86EMUL_CONTINUE)
1932 writeback_registers(ctxt);
1936 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1938 switch(ctxt->mode) {
1939 case X86EMUL_MODE_REAL:
1940 return __emulate_int_real(ctxt, irq);
1941 case X86EMUL_MODE_VM86:
1942 case X86EMUL_MODE_PROT16:
1943 case X86EMUL_MODE_PROT32:
1944 case X86EMUL_MODE_PROT64:
1946 /* Protected mode interrupts unimplemented yet */
1947 return X86EMUL_UNHANDLEABLE;
1951 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1953 int rc = X86EMUL_CONTINUE;
1954 unsigned long temp_eip = 0;
1955 unsigned long temp_eflags = 0;
1956 unsigned long cs = 0;
1957 unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1958 EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1959 EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1960 unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1962 /* TODO: Add stack limit check */
1964 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1966 if (rc != X86EMUL_CONTINUE)
1969 if (temp_eip & ~0xffff)
1970 return emulate_gp(ctxt, 0);
1972 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1974 if (rc != X86EMUL_CONTINUE)
1977 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1979 if (rc != X86EMUL_CONTINUE)
1982 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1984 if (rc != X86EMUL_CONTINUE)
1987 ctxt->_eip = temp_eip;
1990 if (ctxt->op_bytes == 4)
1991 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1992 else if (ctxt->op_bytes == 2) {
1993 ctxt->eflags &= ~0xffff;
1994 ctxt->eflags |= temp_eflags;
1997 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1998 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2003 static int em_iret(struct x86_emulate_ctxt *ctxt)
2005 switch(ctxt->mode) {
2006 case X86EMUL_MODE_REAL:
2007 return emulate_iret_real(ctxt);
2008 case X86EMUL_MODE_VM86:
2009 case X86EMUL_MODE_PROT16:
2010 case X86EMUL_MODE_PROT32:
2011 case X86EMUL_MODE_PROT64:
2013 /* iret from protected mode unimplemented yet */
2014 return X86EMUL_UNHANDLEABLE;
2018 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2021 unsigned short sel, old_sel;
2022 struct desc_struct old_desc, new_desc;
2023 const struct x86_emulate_ops *ops = ctxt->ops;
2024 u8 cpl = ctxt->ops->cpl(ctxt);
2026 /* Assignment of RIP may only fail in 64-bit mode */
2027 if (ctxt->mode == X86EMUL_MODE_PROT64)
2028 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2031 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2033 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2035 if (rc != X86EMUL_CONTINUE)
2038 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2039 if (rc != X86EMUL_CONTINUE) {
2040 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2041 /* assigning eip failed; restore the old cs */
2042 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2048 static int em_grp45(struct x86_emulate_ctxt *ctxt)
2050 int rc = X86EMUL_CONTINUE;
2052 switch (ctxt->modrm_reg) {
2053 case 2: /* call near abs */ {
2055 old_eip = ctxt->_eip;
2056 rc = assign_eip_near(ctxt, ctxt->src.val);
2057 if (rc != X86EMUL_CONTINUE)
2059 ctxt->src.val = old_eip;
2063 case 4: /* jmp abs */
2064 rc = assign_eip_near(ctxt, ctxt->src.val);
2066 case 5: /* jmp far */
2067 rc = em_jmp_far(ctxt);
2076 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2078 u64 old = ctxt->dst.orig_val64;
2080 if (ctxt->dst.bytes == 16)
2081 return X86EMUL_UNHANDLEABLE;
2083 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2084 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2085 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2086 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2087 ctxt->eflags &= ~EFLG_ZF;
2089 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2090 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2092 ctxt->eflags |= EFLG_ZF;
2094 return X86EMUL_CONTINUE;
2097 static int em_ret(struct x86_emulate_ctxt *ctxt)
2102 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2103 if (rc != X86EMUL_CONTINUE)
2106 return assign_eip_near(ctxt, eip);
2109 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2112 unsigned long eip, cs;
2114 int cpl = ctxt->ops->cpl(ctxt);
2115 struct desc_struct old_desc, new_desc;
2116 const struct x86_emulate_ops *ops = ctxt->ops;
2118 if (ctxt->mode == X86EMUL_MODE_PROT64)
2119 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2122 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2123 if (rc != X86EMUL_CONTINUE)
2125 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2126 if (rc != X86EMUL_CONTINUE)
2128 /* Outer-privilege level return is not implemented */
2129 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2130 return X86EMUL_UNHANDLEABLE;
2131 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
2133 if (rc != X86EMUL_CONTINUE)
2135 rc = assign_eip_far(ctxt, eip, new_desc.l);
2136 if (rc != X86EMUL_CONTINUE) {
2137 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2138 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2143 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2147 rc = em_ret_far(ctxt);
2148 if (rc != X86EMUL_CONTINUE)
2150 rsp_increment(ctxt, ctxt->src.val);
2151 return X86EMUL_CONTINUE;
2154 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2156 /* Save real source value, then compare EAX against destination. */
2157 ctxt->dst.orig_val = ctxt->dst.val;
2158 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2159 ctxt->src.orig_val = ctxt->src.val;
2160 ctxt->src.val = ctxt->dst.orig_val;
2161 fastop(ctxt, em_cmp);
2163 if (ctxt->eflags & EFLG_ZF) {
2164 /* Success: write back to memory. */
2165 ctxt->dst.val = ctxt->src.orig_val;
2167 /* Failure: write the value we saw to EAX. */
2168 ctxt->dst.type = OP_REG;
2169 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2170 ctxt->dst.val = ctxt->dst.orig_val;
2172 return X86EMUL_CONTINUE;
2175 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2177 int seg = ctxt->src2.val;
2181 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2183 rc = load_segment_descriptor(ctxt, sel, seg);
2184 if (rc != X86EMUL_CONTINUE)
2187 ctxt->dst.val = ctxt->src.val;
2192 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2193 struct desc_struct *cs, struct desc_struct *ss)
2195 cs->l = 0; /* will be adjusted later */
2196 set_desc_base(cs, 0); /* flat segment */
2197 cs->g = 1; /* 4kb granularity */
2198 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2199 cs->type = 0x0b; /* Read, Execute, Accessed */
2201 cs->dpl = 0; /* will be adjusted later */
2206 set_desc_base(ss, 0); /* flat segment */
2207 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2208 ss->g = 1; /* 4kb granularity */
2210 ss->type = 0x03; /* Read/Write, Accessed */
2211 ss->d = 1; /* 32bit stack segment */
2218 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2220 u32 eax, ebx, ecx, edx;
2223 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2224 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
2225 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
2226 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2229 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2231 const struct x86_emulate_ops *ops = ctxt->ops;
2232 u32 eax, ebx, ecx, edx;
2235 * syscall should always be enabled in longmode - so only become
2236 * vendor specific (cpuid) if other modes are active...
2238 if (ctxt->mode == X86EMUL_MODE_PROT64)
2243 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2245 * Intel ("GenuineIntel")
2246 * remark: Intel CPUs only support "syscall" in 64bit
2247 * longmode. Also an 64bit guest with a
2248 * 32bit compat-app running will #UD !! While this
2249 * behaviour can be fixed (by emulating) into AMD
2250 * response - CPUs of AMD can't behave like Intel.
2252 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2253 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2254 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2257 /* AMD ("AuthenticAMD") */
2258 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2259 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2260 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2263 /* AMD ("AMDisbetter!") */
2264 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2265 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2266 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2269 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2273 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2275 const struct x86_emulate_ops *ops = ctxt->ops;
2276 struct desc_struct cs, ss;
2281 /* syscall is not available in real mode */
2282 if (ctxt->mode == X86EMUL_MODE_REAL ||
2283 ctxt->mode == X86EMUL_MODE_VM86)
2284 return emulate_ud(ctxt);
2286 if (!(em_syscall_is_enabled(ctxt)))
2287 return emulate_ud(ctxt);
2289 ops->get_msr(ctxt, MSR_EFER, &efer);
2290 setup_syscalls_segments(ctxt, &cs, &ss);
2292 if (!(efer & EFER_SCE))
2293 return emulate_ud(ctxt);
2295 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2297 cs_sel = (u16)(msr_data & 0xfffc);
2298 ss_sel = (u16)(msr_data + 8);
2300 if (efer & EFER_LMA) {
2304 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2305 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2307 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2308 if (efer & EFER_LMA) {
2309 #ifdef CONFIG_X86_64
2310 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2313 ctxt->mode == X86EMUL_MODE_PROT64 ?
2314 MSR_LSTAR : MSR_CSTAR, &msr_data);
2315 ctxt->_eip = msr_data;
2317 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2318 ctxt->eflags &= ~msr_data;
2322 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2323 ctxt->_eip = (u32)msr_data;
2325 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2328 return X86EMUL_CONTINUE;
2331 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2333 const struct x86_emulate_ops *ops = ctxt->ops;
2334 struct desc_struct cs, ss;
2339 ops->get_msr(ctxt, MSR_EFER, &efer);
2340 /* inject #GP if in real mode */
2341 if (ctxt->mode == X86EMUL_MODE_REAL)
2342 return emulate_gp(ctxt, 0);
2345 * Not recognized on AMD in compat mode (but is recognized in legacy
2348 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2349 && !vendor_intel(ctxt))
2350 return emulate_ud(ctxt);
2352 /* XXX sysenter/sysexit have not been tested in 64bit mode.
2353 * Therefore, we inject an #UD.
2355 if (ctxt->mode == X86EMUL_MODE_PROT64)
2356 return emulate_ud(ctxt);
2358 setup_syscalls_segments(ctxt, &cs, &ss);
2360 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2361 if ((msr_data & 0xfffc) == 0x0)
2362 return emulate_gp(ctxt, 0);
2364 ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
2365 cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
2366 ss_sel = cs_sel + 8;
2367 if (efer & EFER_LMA) {
2372 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2373 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2375 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2376 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2378 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2379 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2382 return X86EMUL_CONTINUE;
2385 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2387 const struct x86_emulate_ops *ops = ctxt->ops;
2388 struct desc_struct cs, ss;
2389 u64 msr_data, rcx, rdx;
2391 u16 cs_sel = 0, ss_sel = 0;
2393 /* inject #GP if in real mode or Virtual 8086 mode */
2394 if (ctxt->mode == X86EMUL_MODE_REAL ||
2395 ctxt->mode == X86EMUL_MODE_VM86)
2396 return emulate_gp(ctxt, 0);
2398 setup_syscalls_segments(ctxt, &cs, &ss);
2400 if ((ctxt->rex_prefix & 0x8) != 0x0)
2401 usermode = X86EMUL_MODE_PROT64;
2403 usermode = X86EMUL_MODE_PROT32;
2405 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2406 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2410 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2412 case X86EMUL_MODE_PROT32:
2413 cs_sel = (u16)(msr_data + 16);
2414 if ((msr_data & 0xfffc) == 0x0)
2415 return emulate_gp(ctxt, 0);
2416 ss_sel = (u16)(msr_data + 24);
2418 case X86EMUL_MODE_PROT64:
2419 cs_sel = (u16)(msr_data + 32);
2420 if (msr_data == 0x0)
2421 return emulate_gp(ctxt, 0);
2422 ss_sel = cs_sel + 8;
2425 if (is_noncanonical_address(rcx) ||
2426 is_noncanonical_address(rdx))
2427 return emulate_gp(ctxt, 0);
2430 cs_sel |= SELECTOR_RPL_MASK;
2431 ss_sel |= SELECTOR_RPL_MASK;
2433 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2434 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2437 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2439 return X86EMUL_CONTINUE;
2442 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2445 if (ctxt->mode == X86EMUL_MODE_REAL)
2447 if (ctxt->mode == X86EMUL_MODE_VM86)
2449 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2450 return ctxt->ops->cpl(ctxt) > iopl;
2453 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2456 const struct x86_emulate_ops *ops = ctxt->ops;
2457 struct desc_struct tr_seg;
2460 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2461 unsigned mask = (1 << len) - 1;
2464 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2467 if (desc_limit_scaled(&tr_seg) < 103)
2469 base = get_desc_base(&tr_seg);
2470 #ifdef CONFIG_X86_64
2471 base |= ((u64)base3) << 32;
2473 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2474 if (r != X86EMUL_CONTINUE)
2476 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2478 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2479 if (r != X86EMUL_CONTINUE)
2481 if ((perm >> bit_idx) & mask)
2486 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2492 if (emulator_bad_iopl(ctxt))
2493 if (!emulator_io_port_access_allowed(ctxt, port, len))
2496 ctxt->perm_ok = true;
2501 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2502 struct tss_segment_16 *tss)
2504 tss->ip = ctxt->_eip;
2505 tss->flag = ctxt->eflags;
2506 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2507 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2508 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2509 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2510 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2511 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2512 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2513 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2515 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2516 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2517 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2518 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2519 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2522 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2523 struct tss_segment_16 *tss)
2528 ctxt->_eip = tss->ip;
2529 ctxt->eflags = tss->flag | 2;
2530 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2531 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2532 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2533 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2534 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2535 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2536 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2537 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2540 * SDM says that segment selectors are loaded before segment
2543 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2544 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2545 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2546 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2547 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2552 * Now load segment descriptors. If fault happens at this stage
2553 * it is handled in a context of new task
2555 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2557 if (ret != X86EMUL_CONTINUE)
2559 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2561 if (ret != X86EMUL_CONTINUE)
2563 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2565 if (ret != X86EMUL_CONTINUE)
2567 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2569 if (ret != X86EMUL_CONTINUE)
2571 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2573 if (ret != X86EMUL_CONTINUE)
2576 return X86EMUL_CONTINUE;
2579 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2580 u16 tss_selector, u16 old_tss_sel,
2581 ulong old_tss_base, struct desc_struct *new_desc)
2583 const struct x86_emulate_ops *ops = ctxt->ops;
2584 struct tss_segment_16 tss_seg;
2586 u32 new_tss_base = get_desc_base(new_desc);
2588 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2590 if (ret != X86EMUL_CONTINUE)
2591 /* FIXME: need to provide precise fault address */
2594 save_state_to_tss16(ctxt, &tss_seg);
2596 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2598 if (ret != X86EMUL_CONTINUE)
2599 /* FIXME: need to provide precise fault address */
2602 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2604 if (ret != X86EMUL_CONTINUE)
2605 /* FIXME: need to provide precise fault address */
2608 if (old_tss_sel != 0xffff) {
2609 tss_seg.prev_task_link = old_tss_sel;
2611 ret = ops->write_std(ctxt, new_tss_base,
2612 &tss_seg.prev_task_link,
2613 sizeof tss_seg.prev_task_link,
2615 if (ret != X86EMUL_CONTINUE)
2616 /* FIXME: need to provide precise fault address */
2620 return load_state_from_tss16(ctxt, &tss_seg);
2623 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2624 struct tss_segment_32 *tss)
2626 /* CR3 and ldt selector are not saved intentionally */
2627 tss->eip = ctxt->_eip;
2628 tss->eflags = ctxt->eflags;
2629 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2630 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2631 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2632 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2633 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2634 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2635 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2636 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2638 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2639 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2640 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2641 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2642 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2643 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2646 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2647 struct tss_segment_32 *tss)
2652 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2653 return emulate_gp(ctxt, 0);
2654 ctxt->_eip = tss->eip;
2655 ctxt->eflags = tss->eflags | 2;
2657 /* General purpose registers */
2658 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2659 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2660 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2661 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2662 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2663 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2664 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2665 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2668 * SDM says that segment selectors are loaded before segment
2669 * descriptors. This is important because CPL checks will
2672 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2673 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2674 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2675 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2676 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2677 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2678 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2681 * If we're switching between Protected Mode and VM86, we need to make
2682 * sure to update the mode before loading the segment descriptors so
2683 * that the selectors are interpreted correctly.
2685 if (ctxt->eflags & X86_EFLAGS_VM) {
2686 ctxt->mode = X86EMUL_MODE_VM86;
2689 ctxt->mode = X86EMUL_MODE_PROT32;
2694 * Now load segment descriptors. If fault happenes at this stage
2695 * it is handled in a context of new task
2697 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2699 if (ret != X86EMUL_CONTINUE)
2701 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2703 if (ret != X86EMUL_CONTINUE)
2705 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2707 if (ret != X86EMUL_CONTINUE)
2709 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2711 if (ret != X86EMUL_CONTINUE)
2713 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2715 if (ret != X86EMUL_CONTINUE)
2717 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2719 if (ret != X86EMUL_CONTINUE)
2721 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2723 if (ret != X86EMUL_CONTINUE)
2726 return X86EMUL_CONTINUE;
2729 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2730 u16 tss_selector, u16 old_tss_sel,
2731 ulong old_tss_base, struct desc_struct *new_desc)
2733 const struct x86_emulate_ops *ops = ctxt->ops;
2734 struct tss_segment_32 tss_seg;
2736 u32 new_tss_base = get_desc_base(new_desc);
2737 u32 eip_offset = offsetof(struct tss_segment_32, eip);
2738 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
2740 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2742 if (ret != X86EMUL_CONTINUE)
2743 /* FIXME: need to provide precise fault address */
2746 save_state_to_tss32(ctxt, &tss_seg);
2748 /* Only GP registers and segment selectors are saved */
2749 ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
2750 ldt_sel_offset - eip_offset, &ctxt->exception);
2751 if (ret != X86EMUL_CONTINUE)
2752 /* FIXME: need to provide precise fault address */
2755 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2757 if (ret != X86EMUL_CONTINUE)
2758 /* FIXME: need to provide precise fault address */
2761 if (old_tss_sel != 0xffff) {
2762 tss_seg.prev_task_link = old_tss_sel;
2764 ret = ops->write_std(ctxt, new_tss_base,
2765 &tss_seg.prev_task_link,
2766 sizeof tss_seg.prev_task_link,
2768 if (ret != X86EMUL_CONTINUE)
2769 /* FIXME: need to provide precise fault address */
2773 return load_state_from_tss32(ctxt, &tss_seg);
2776 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2777 u16 tss_selector, int idt_index, int reason,
2778 bool has_error_code, u32 error_code)
2780 const struct x86_emulate_ops *ops = ctxt->ops;
2781 struct desc_struct curr_tss_desc, next_tss_desc;
2783 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2784 ulong old_tss_base =
2785 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2789 /* FIXME: old_tss_base == ~0 ? */
2791 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2792 if (ret != X86EMUL_CONTINUE)
2794 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2795 if (ret != X86EMUL_CONTINUE)
2798 /* FIXME: check that next_tss_desc is tss */
2801 * Check privileges. The three cases are task switch caused by...
2803 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2804 * 2. Exception/IRQ/iret: No check is performed
2805 * 3. jmp/call to TSS: Check against DPL of the TSS
2807 if (reason == TASK_SWITCH_GATE) {
2808 if (idt_index != -1) {
2809 /* Software interrupts */
2810 struct desc_struct task_gate_desc;
2813 ret = read_interrupt_descriptor(ctxt, idt_index,
2815 if (ret != X86EMUL_CONTINUE)
2818 dpl = task_gate_desc.dpl;
2819 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2820 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2822 } else if (reason != TASK_SWITCH_IRET) {
2823 int dpl = next_tss_desc.dpl;
2824 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2825 return emulate_gp(ctxt, tss_selector);
2829 desc_limit = desc_limit_scaled(&next_tss_desc);
2830 if (!next_tss_desc.p ||
2831 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2832 desc_limit < 0x2b)) {
2833 return emulate_ts(ctxt, tss_selector & 0xfffc);
2836 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2837 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2838 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2841 if (reason == TASK_SWITCH_IRET)
2842 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2844 /* set back link to prev task only if NT bit is set in eflags
2845 note that old_tss_sel is not used after this point */
2846 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2847 old_tss_sel = 0xffff;
2849 if (next_tss_desc.type & 8)
2850 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2851 old_tss_base, &next_tss_desc);
2853 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2854 old_tss_base, &next_tss_desc);
2855 if (ret != X86EMUL_CONTINUE)
2858 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2859 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2861 if (reason != TASK_SWITCH_IRET) {
2862 next_tss_desc.type |= (1 << 1); /* set busy flag */
2863 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2866 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2867 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2869 if (has_error_code) {
2870 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2871 ctxt->lock_prefix = 0;
2872 ctxt->src.val = (unsigned long) error_code;
2873 ret = em_push(ctxt);
2879 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2880 u16 tss_selector, int idt_index, int reason,
2881 bool has_error_code, u32 error_code)
2885 invalidate_registers(ctxt);
2886 ctxt->_eip = ctxt->eip;
2887 ctxt->dst.type = OP_NONE;
2889 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2890 has_error_code, error_code);
2892 if (rc == X86EMUL_CONTINUE) {
2893 ctxt->eip = ctxt->_eip;
2894 writeback_registers(ctxt);
2897 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2900 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2903 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2905 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2906 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2909 static int em_das(struct x86_emulate_ctxt *ctxt)
2912 bool af, cf, old_cf;
2914 cf = ctxt->eflags & X86_EFLAGS_CF;
2920 af = ctxt->eflags & X86_EFLAGS_AF;
2921 if ((al & 0x0f) > 9 || af) {
2923 cf = old_cf | (al >= 250);
2928 if (old_al > 0x99 || old_cf) {
2934 /* Set PF, ZF, SF */
2935 ctxt->src.type = OP_IMM;
2937 ctxt->src.bytes = 1;
2938 fastop(ctxt, em_or);
2939 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2941 ctxt->eflags |= X86_EFLAGS_CF;
2943 ctxt->eflags |= X86_EFLAGS_AF;
2944 return X86EMUL_CONTINUE;
2947 static int em_aam(struct x86_emulate_ctxt *ctxt)
2951 if (ctxt->src.val == 0)
2952 return emulate_de(ctxt);
2954 al = ctxt->dst.val & 0xff;
2955 ah = al / ctxt->src.val;
2956 al %= ctxt->src.val;
2958 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
2960 /* Set PF, ZF, SF */
2961 ctxt->src.type = OP_IMM;
2963 ctxt->src.bytes = 1;
2964 fastop(ctxt, em_or);
2966 return X86EMUL_CONTINUE;
2969 static int em_aad(struct x86_emulate_ctxt *ctxt)
2971 u8 al = ctxt->dst.val & 0xff;
2972 u8 ah = (ctxt->dst.val >> 8) & 0xff;
2974 al = (al + (ah * ctxt->src.val)) & 0xff;
2976 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
2978 /* Set PF, ZF, SF */
2979 ctxt->src.type = OP_IMM;
2981 ctxt->src.bytes = 1;
2982 fastop(ctxt, em_or);
2984 return X86EMUL_CONTINUE;
2987 static int em_call(struct x86_emulate_ctxt *ctxt)
2990 long rel = ctxt->src.val;
2992 ctxt->src.val = (unsigned long)ctxt->_eip;
2993 rc = jmp_rel(ctxt, rel);
2994 if (rc != X86EMUL_CONTINUE)
2996 return em_push(ctxt);
2999 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3004 struct desc_struct old_desc, new_desc;
3005 const struct x86_emulate_ops *ops = ctxt->ops;
3006 int cpl = ctxt->ops->cpl(ctxt);
3008 old_eip = ctxt->_eip;
3009 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3011 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3012 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3014 if (rc != X86EMUL_CONTINUE)
3015 return X86EMUL_CONTINUE;
3017 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
3018 if (rc != X86EMUL_CONTINUE)
3021 ctxt->src.val = old_cs;
3023 if (rc != X86EMUL_CONTINUE)
3026 ctxt->src.val = old_eip;
3028 /* If we failed, we tainted the memory, but the very least we should
3030 if (rc != X86EMUL_CONTINUE)
3034 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3039 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3044 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3045 if (rc != X86EMUL_CONTINUE)
3047 rc = assign_eip_near(ctxt, eip);
3048 if (rc != X86EMUL_CONTINUE)
3050 rsp_increment(ctxt, ctxt->src.val);
3051 return X86EMUL_CONTINUE;
3054 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3056 /* Write back the register source. */
3057 ctxt->src.val = ctxt->dst.val;
3058 write_register_operand(&ctxt->src);
3060 /* Write back the memory destination with implicit LOCK prefix. */
3061 ctxt->dst.val = ctxt->src.orig_val;
3062 ctxt->lock_prefix = 1;
3063 return X86EMUL_CONTINUE;
3066 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3068 ctxt->dst.val = ctxt->src2.val;
3069 return fastop(ctxt, em_imul);
3072 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3074 ctxt->dst.type = OP_REG;
3075 ctxt->dst.bytes = ctxt->src.bytes;
3076 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3077 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3079 return X86EMUL_CONTINUE;
3082 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3086 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3087 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3088 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3089 return X86EMUL_CONTINUE;
3092 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3096 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3097 return emulate_gp(ctxt, 0);
3098 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3099 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3100 return X86EMUL_CONTINUE;
3103 static int em_mov(struct x86_emulate_ctxt *ctxt)
3105 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3106 return X86EMUL_CONTINUE;
3109 #define FFL(x) bit(X86_FEATURE_##x)
3111 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3113 u32 ebx, ecx, edx, eax = 1;
3117 * Check MOVBE is set in the guest-visible CPUID leaf.
3119 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3120 if (!(ecx & FFL(MOVBE)))
3121 return emulate_ud(ctxt);
3123 switch (ctxt->op_bytes) {
3126 * From MOVBE definition: "...When the operand size is 16 bits,
3127 * the upper word of the destination register remains unchanged
3130 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3131 * rules so we have to do the operation almost per hand.
3133 tmp = (u16)ctxt->src.val;
3134 ctxt->dst.val &= ~0xffffUL;
3135 ctxt->dst.val |= (unsigned long)swab16(tmp);
3138 ctxt->dst.val = swab32((u32)ctxt->src.val);
3141 ctxt->dst.val = swab64(ctxt->src.val);
3146 return X86EMUL_CONTINUE;
3149 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3151 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3152 return emulate_gp(ctxt, 0);
3154 /* Disable writeback. */
3155 ctxt->dst.type = OP_NONE;
3156 return X86EMUL_CONTINUE;
3159 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3163 if (ctxt->mode == X86EMUL_MODE_PROT64)
3164 val = ctxt->src.val & ~0ULL;
3166 val = ctxt->src.val & ~0U;
3168 /* #UD condition is already handled. */
3169 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3170 return emulate_gp(ctxt, 0);
3172 /* Disable writeback. */
3173 ctxt->dst.type = OP_NONE;
3174 return X86EMUL_CONTINUE;
3177 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3181 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3182 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3183 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
3184 return emulate_gp(ctxt, 0);
3186 return X86EMUL_CONTINUE;
3189 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3193 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3194 return emulate_gp(ctxt, 0);
3196 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3197 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3198 return X86EMUL_CONTINUE;
3201 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3203 if (ctxt->modrm_reg > VCPU_SREG_GS)
3204 return emulate_ud(ctxt);
3206 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3207 return X86EMUL_CONTINUE;
3210 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3212 u16 sel = ctxt->src.val;
3214 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3215 return emulate_ud(ctxt);
3217 if (ctxt->modrm_reg == VCPU_SREG_SS)
3218 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3220 /* Disable writeback. */
3221 ctxt->dst.type = OP_NONE;
3222 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3225 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3227 u16 sel = ctxt->src.val;
3229 /* Disable writeback. */
3230 ctxt->dst.type = OP_NONE;
3231 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3234 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3236 u16 sel = ctxt->src.val;
3238 /* Disable writeback. */
3239 ctxt->dst.type = OP_NONE;
3240 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3243 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3248 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3249 if (rc == X86EMUL_CONTINUE)
3250 ctxt->ops->invlpg(ctxt, linear);
3251 /* Disable writeback. */
3252 ctxt->dst.type = OP_NONE;
3253 return X86EMUL_CONTINUE;
3256 static int em_clts(struct x86_emulate_ctxt *ctxt)
3260 cr0 = ctxt->ops->get_cr(ctxt, 0);
3262 ctxt->ops->set_cr(ctxt, 0, cr0);
3263 return X86EMUL_CONTINUE;
3266 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
3268 int rc = ctxt->ops->fix_hypercall(ctxt);
3270 if (rc != X86EMUL_CONTINUE)
3273 /* Let the processor re-execute the fixed hypercall */
3274 ctxt->_eip = ctxt->eip;
3275 /* Disable writeback. */
3276 ctxt->dst.type = OP_NONE;
3277 return X86EMUL_CONTINUE;
3280 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3281 void (*get)(struct x86_emulate_ctxt *ctxt,
3282 struct desc_ptr *ptr))
3284 struct desc_ptr desc_ptr;
3286 if (ctxt->mode == X86EMUL_MODE_PROT64)
3288 get(ctxt, &desc_ptr);
3289 if (ctxt->op_bytes == 2) {
3291 desc_ptr.address &= 0x00ffffff;
3293 /* Disable writeback. */
3294 ctxt->dst.type = OP_NONE;
3295 return segmented_write(ctxt, ctxt->dst.addr.mem,
3296 &desc_ptr, 2 + ctxt->op_bytes);
3299 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3301 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3304 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3306 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3309 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3311 struct desc_ptr desc_ptr;
3314 if (ctxt->mode == X86EMUL_MODE_PROT64)
3316 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3317 &desc_ptr.size, &desc_ptr.address,
3319 if (rc != X86EMUL_CONTINUE)
3321 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3322 /* Disable writeback. */
3323 ctxt->dst.type = OP_NONE;
3324 return X86EMUL_CONTINUE;
3327 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3331 rc = ctxt->ops->fix_hypercall(ctxt);
3333 /* Disable writeback. */
3334 ctxt->dst.type = OP_NONE;
3338 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3340 struct desc_ptr desc_ptr;
3343 if (ctxt->mode == X86EMUL_MODE_PROT64)
3345 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3346 &desc_ptr.size, &desc_ptr.address,
3348 if (rc != X86EMUL_CONTINUE)
3350 ctxt->ops->set_idt(ctxt, &desc_ptr);
3351 /* Disable writeback. */
3352 ctxt->dst.type = OP_NONE;
3353 return X86EMUL_CONTINUE;
3356 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3358 if (ctxt->dst.type == OP_MEM)
3359 ctxt->dst.bytes = 2;
3360 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3361 return X86EMUL_CONTINUE;
3364 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3366 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3367 | (ctxt->src.val & 0x0f));
3368 ctxt->dst.type = OP_NONE;
3369 return X86EMUL_CONTINUE;
3372 static int em_loop(struct x86_emulate_ctxt *ctxt)
3374 int rc = X86EMUL_CONTINUE;
3376 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3377 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3378 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3379 rc = jmp_rel(ctxt, ctxt->src.val);
3384 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3386 int rc = X86EMUL_CONTINUE;
3388 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3389 rc = jmp_rel(ctxt, ctxt->src.val);
3394 static int em_in(struct x86_emulate_ctxt *ctxt)
3396 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3398 return X86EMUL_IO_NEEDED;
3400 return X86EMUL_CONTINUE;
3403 static int em_out(struct x86_emulate_ctxt *ctxt)
3405 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3407 /* Disable writeback. */
3408 ctxt->dst.type = OP_NONE;
3409 return X86EMUL_CONTINUE;
3412 static int em_cli(struct x86_emulate_ctxt *ctxt)
3414 if (emulator_bad_iopl(ctxt))
3415 return emulate_gp(ctxt, 0);
3417 ctxt->eflags &= ~X86_EFLAGS_IF;
3418 return X86EMUL_CONTINUE;
3421 static int em_sti(struct x86_emulate_ctxt *ctxt)
3423 if (emulator_bad_iopl(ctxt))
3424 return emulate_gp(ctxt, 0);
3426 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3427 ctxt->eflags |= X86_EFLAGS_IF;
3428 return X86EMUL_CONTINUE;
3431 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3433 u32 eax, ebx, ecx, edx;
3435 eax = reg_read(ctxt, VCPU_REGS_RAX);
3436 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3437 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3438 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3439 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3440 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3441 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3442 return X86EMUL_CONTINUE;
3445 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3449 flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
3450 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3452 ctxt->eflags &= ~0xffUL;
3453 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3454 return X86EMUL_CONTINUE;
3457 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3459 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3460 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3461 return X86EMUL_CONTINUE;
3464 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3466 switch (ctxt->op_bytes) {
3467 #ifdef CONFIG_X86_64
3469 asm("bswap %0" : "+r"(ctxt->dst.val));
3473 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3476 return X86EMUL_CONTINUE;
3479 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3481 /* emulating clflush regardless of cpuid */
3482 return X86EMUL_CONTINUE;
3485 static bool valid_cr(int nr)
3497 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
3499 if (!valid_cr(ctxt->modrm_reg))
3500 return emulate_ud(ctxt);
3502 return X86EMUL_CONTINUE;
3505 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3507 u64 new_val = ctxt->src.val64;
3508 int cr = ctxt->modrm_reg;
3511 static u64 cr_reserved_bits[] = {
3512 0xffffffff00000000ULL,
3513 0, 0, 0, /* CR3 checked later */
3520 return emulate_ud(ctxt);
3522 if (new_val & cr_reserved_bits[cr])
3523 return emulate_gp(ctxt, 0);
3528 if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
3529 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
3530 return emulate_gp(ctxt, 0);
3532 cr4 = ctxt->ops->get_cr(ctxt, 4);
3533 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3535 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
3536 !(cr4 & X86_CR4_PAE))
3537 return emulate_gp(ctxt, 0);
3544 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3545 if (efer & EFER_LMA)
3546 rsvd = CR3_L_MODE_RESERVED_BITS;
3549 return emulate_gp(ctxt, 0);
3554 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3556 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
3557 return emulate_gp(ctxt, 0);
3563 return X86EMUL_CONTINUE;
3566 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
3570 ctxt->ops->get_dr(ctxt, 7, &dr7);
3572 /* Check if DR7.Global_Enable is set */
3573 return dr7 & (1 << 13);
3576 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3578 int dr = ctxt->modrm_reg;
3582 return emulate_ud(ctxt);
3584 cr4 = ctxt->ops->get_cr(ctxt, 4);
3585 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3586 return emulate_ud(ctxt);
3588 if (check_dr7_gd(ctxt))
3589 return emulate_db(ctxt);
3591 return X86EMUL_CONTINUE;
3594 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
3596 u64 new_val = ctxt->src.val64;
3597 int dr = ctxt->modrm_reg;
3599 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
3600 return emulate_gp(ctxt, 0);
3602 return check_dr_read(ctxt);
3605 static int check_svme(struct x86_emulate_ctxt *ctxt)
3609 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3611 if (!(efer & EFER_SVME))
3612 return emulate_ud(ctxt);
3614 return X86EMUL_CONTINUE;
3617 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3619 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3621 /* Valid physical address? */
3622 if (rax & 0xffff000000000000ULL)
3623 return emulate_gp(ctxt, 0);
3625 return check_svme(ctxt);
3628 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3630 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3632 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
3633 return emulate_ud(ctxt);
3635 return X86EMUL_CONTINUE;
3638 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3640 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3641 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3643 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3644 ctxt->ops->check_pmc(ctxt, rcx))
3645 return emulate_gp(ctxt, 0);
3647 return X86EMUL_CONTINUE;
3650 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
3652 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
3653 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
3654 return emulate_gp(ctxt, 0);
3656 return X86EMUL_CONTINUE;
3659 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3661 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
3662 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
3663 return emulate_gp(ctxt, 0);
3665 return X86EMUL_CONTINUE;
3668 #define D(_y) { .flags = (_y) }
3669 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
3670 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
3671 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3672 #define N D(NotImpl)
3673 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3674 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3675 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3676 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3677 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3678 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
3679 #define II(_f, _e, _i) \
3680 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
3681 #define IIP(_f, _e, _i, _p) \
3682 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
3683 .intercept = x86_intercept_##_i, .check_perm = (_p) }
3684 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
3686 #define D2bv(_f) D((_f) | ByteOp), D(_f)
3687 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
3688 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
3689 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
3690 #define I2bvIP(_f, _e, _i, _p) \
3691 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
3693 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
3694 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3695 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3697 static const struct opcode group7_rm0[] = {
3699 I(SrcNone | Priv | EmulateOnUD, em_vmcall),
3703 static const struct opcode group7_rm1[] = {
3704 DI(SrcNone | Priv, monitor),
3705 DI(SrcNone | Priv, mwait),
3709 static const struct opcode group7_rm3[] = {
3710 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3711 II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
3712 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3713 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3714 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3715 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3716 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3717 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3720 static const struct opcode group7_rm7[] = {
3722 DIP(SrcNone, rdtscp, check_rdtsc),
3726 static const struct opcode group1[] = {
3728 F(Lock | PageTable, em_or),
3731 F(Lock | PageTable, em_and),
3737 static const struct opcode group1A[] = {
3738 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3741 static const struct opcode group2[] = {
3742 F(DstMem | ModRM, em_rol),
3743 F(DstMem | ModRM, em_ror),
3744 F(DstMem | ModRM, em_rcl),
3745 F(DstMem | ModRM, em_rcr),
3746 F(DstMem | ModRM, em_shl),
3747 F(DstMem | ModRM, em_shr),
3748 F(DstMem | ModRM, em_shl),
3749 F(DstMem | ModRM, em_sar),
3752 static const struct opcode group3[] = {
3753 F(DstMem | SrcImm | NoWrite, em_test),
3754 F(DstMem | SrcImm | NoWrite, em_test),
3755 F(DstMem | SrcNone | Lock, em_not),
3756 F(DstMem | SrcNone | Lock, em_neg),
3757 F(DstXacc | Src2Mem, em_mul_ex),
3758 F(DstXacc | Src2Mem, em_imul_ex),
3759 F(DstXacc | Src2Mem, em_div_ex),
3760 F(DstXacc | Src2Mem, em_idiv_ex),
3763 static const struct opcode group4[] = {
3764 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
3765 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
3769 static const struct opcode group5[] = {
3770 F(DstMem | SrcNone | Lock, em_inc),
3771 F(DstMem | SrcNone | Lock, em_dec),
3772 I(SrcMem | Stack, em_grp45),
3773 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3774 I(SrcMem | Stack, em_grp45),
3775 I(SrcMemFAddr | ImplicitOps, em_grp45),
3776 I(SrcMem | Stack, em_grp45), D(Undefined),
3779 static const struct opcode group6[] = {
3780 DI(Prot | DstMem, sldt),
3781 DI(Prot | DstMem, str),
3782 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3783 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3787 static const struct group_dual group7 = { {
3788 II(Mov | DstMem, em_sgdt, sgdt),
3789 II(Mov | DstMem, em_sidt, sidt),
3790 II(SrcMem | Priv, em_lgdt, lgdt),
3791 II(SrcMem | Priv, em_lidt, lidt),
3792 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3793 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3794 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3798 N, EXT(0, group7_rm3),
3799 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3800 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3804 static const struct opcode group8[] = {
3806 F(DstMem | SrcImmByte | NoWrite, em_bt),
3807 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3808 F(DstMem | SrcImmByte | Lock, em_btr),
3809 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3812 static const struct group_dual group9 = { {
3813 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3815 N, N, N, N, N, N, N, N,
3818 static const struct opcode group11[] = {
3819 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3823 static const struct gprefix pfx_0f_ae_7 = {
3824 I(SrcMem | ByteOp, em_clflush), N, N, N,
3827 static const struct group_dual group15 = { {
3828 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3830 N, N, N, N, N, N, N, N,
3833 static const struct gprefix pfx_0f_6f_0f_7f = {
3834 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3837 static const struct gprefix pfx_0f_2b = {
3838 I(0, em_mov), I(0, em_mov), N, N,
3841 static const struct gprefix pfx_0f_28_0f_29 = {
3842 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
3845 static const struct gprefix pfx_0f_e7 = {
3846 N, I(Sse, em_mov), N, N,
3849 static const struct escape escape_d9 = { {
3850 N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
3853 N, N, N, N, N, N, N, N,
3855 N, N, N, N, N, N, N, N,
3857 N, N, N, N, N, N, N, N,
3859 N, N, N, N, N, N, N, N,
3861 N, N, N, N, N, N, N, N,
3863 N, N, N, N, N, N, N, N,
3865 N, N, N, N, N, N, N, N,
3867 N, N, N, N, N, N, N, N,
3870 static const struct escape escape_db = { {
3871 N, N, N, N, N, N, N, N,
3874 N, N, N, N, N, N, N, N,
3876 N, N, N, N, N, N, N, N,
3878 N, N, N, N, N, N, N, N,
3880 N, N, N, N, N, N, N, N,
3882 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
3884 N, N, N, N, N, N, N, N,
3886 N, N, N, N, N, N, N, N,
3888 N, N, N, N, N, N, N, N,
3891 static const struct escape escape_dd = { {
3892 N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
3895 N, N, N, N, N, N, N, N,
3897 N, N, N, N, N, N, N, N,
3899 N, N, N, N, N, N, N, N,
3901 N, N, N, N, N, N, N, N,
3903 N, N, N, N, N, N, N, N,
3905 N, N, N, N, N, N, N, N,
3907 N, N, N, N, N, N, N, N,
3909 N, N, N, N, N, N, N, N,
3912 static const struct opcode opcode_table[256] = {
3914 F6ALU(Lock, em_add),
3915 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
3916 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
3918 F6ALU(Lock | PageTable, em_or),
3919 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
3922 F6ALU(Lock, em_adc),
3923 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
3924 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
3926 F6ALU(Lock, em_sbb),
3927 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
3928 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
3930 F6ALU(Lock | PageTable, em_and), N, N,
3932 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3934 F6ALU(Lock, em_xor), N, N,
3936 F6ALU(NoWrite, em_cmp), N, N,
3938 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
3940 X8(I(SrcReg | Stack, em_push)),
3942 X8(I(DstReg | Stack, em_pop)),
3944 I(ImplicitOps | Stack | No64, em_pusha),
3945 I(ImplicitOps | Stack | No64, em_popa),
3946 N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3949 I(SrcImm | Mov | Stack, em_push),
3950 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3951 I(SrcImmByte | Mov | Stack, em_push),
3952 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3953 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3954 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3958 G(ByteOp | DstMem | SrcImm, group1),
3959 G(DstMem | SrcImm, group1),
3960 G(ByteOp | DstMem | SrcImm | No64, group1),
3961 G(DstMem | SrcImmByte, group1),
3962 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
3963 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3965 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
3966 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3967 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
3968 D(ModRM | SrcMem | NoAccess | DstReg),
3969 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3972 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3974 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3975 I(SrcImmFAddr | No64, em_call_far), N,
3976 II(ImplicitOps | Stack, em_pushf, pushf),
3977 II(ImplicitOps | Stack, em_popf, popf),
3978 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
3980 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3981 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3982 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3983 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
3985 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3986 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3987 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3988 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
3990 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3992 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
3994 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
3995 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3996 I(ImplicitOps | Stack, em_ret),
3997 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
3998 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3999 G(ByteOp, group11), G(0, group11),
4001 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
4002 I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
4003 I(ImplicitOps | Stack, em_ret_far),
4004 D(ImplicitOps), DI(SrcImmByte, intn),
4005 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
4007 G(Src2One | ByteOp, group2), G(Src2One, group2),
4008 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4009 I(DstAcc | SrcImmUByte | No64, em_aam),
4010 I(DstAcc | SrcImmUByte | No64, em_aad),
4011 F(DstAcc | ByteOp | No64, em_salc),
4012 I(DstAcc | SrcXLat | ByteOp, em_mov),
4014 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4016 X3(I(SrcImmByte, em_loop)),
4017 I(SrcImmByte, em_jcxz),
4018 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4019 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4021 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
4022 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
4023 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4024 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4026 N, DI(ImplicitOps, icebp), N, N,
4027 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4028 G(ByteOp, group3), G(0, group3),
4030 D(ImplicitOps), D(ImplicitOps),
4031 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4032 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4035 static const struct opcode twobyte_table[256] = {
4037 G(0, group6), GD(0, &group7), N, N,
4038 N, I(ImplicitOps | EmulateOnUD, em_syscall),
4039 II(ImplicitOps | Priv, em_clts, clts), N,
4040 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4041 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4043 N, N, N, N, N, N, N, N,
4044 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4045 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
4047 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
4048 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4049 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4051 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4054 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4055 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4056 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4059 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4060 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4061 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4062 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4063 I(ImplicitOps | EmulateOnUD, em_sysenter),
4064 I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
4066 N, N, N, N, N, N, N, N,
4068 X16(D(DstReg | SrcMem | ModRM)),
4070 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4075 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4080 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4084 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4086 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4087 II(ImplicitOps, em_cpuid, cpuid),
4088 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4089 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4090 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4092 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4093 DI(ImplicitOps, rsm),
4094 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4095 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4096 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4097 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4099 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
4100 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4101 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4102 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4103 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4104 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4108 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4109 F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
4110 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4112 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4113 N, D(DstMem | SrcReg | ModRM | Mov),
4114 N, N, N, GD(0, &group9),
4116 X8(I(DstReg, em_bswap)),
4118 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4120 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4121 N, N, N, N, N, N, N, N,
4123 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4126 static const struct gprefix three_byte_0f_38_f0 = {
4127 I(DstReg | SrcMem | Mov, em_movbe), N, N, N
4130 static const struct gprefix three_byte_0f_38_f1 = {
4131 I(DstMem | SrcReg | Mov, em_movbe), N, N, N
4135 * Insns below are selected by the prefix which indexed by the third opcode
4138 static const struct opcode opcode_map_0f_38[256] = {
4140 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4142 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4144 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
4145 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
4164 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4168 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4174 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4175 unsigned size, bool sign_extension)
4177 int rc = X86EMUL_CONTINUE;
4181 op->addr.mem.ea = ctxt->_eip;
4182 /* NB. Immediates are sign-extended as necessary. */
4183 switch (op->bytes) {
4185 op->val = insn_fetch(s8, ctxt);
4188 op->val = insn_fetch(s16, ctxt);
4191 op->val = insn_fetch(s32, ctxt);
4194 op->val = insn_fetch(s64, ctxt);
4197 if (!sign_extension) {
4198 switch (op->bytes) {
4206 op->val &= 0xffffffff;
4214 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4217 int rc = X86EMUL_CONTINUE;
4221 decode_register_operand(ctxt, op);
4224 rc = decode_imm(ctxt, op, 1, false);
4227 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4231 if (ctxt->d & BitOp)
4232 fetch_bit_operand(ctxt);
4233 op->orig_val = op->val;
4236 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4240 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4241 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4242 fetch_register_operand(op);
4243 op->orig_val = op->val;
4247 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4248 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4249 fetch_register_operand(op);
4250 op->orig_val = op->val;
4253 if (ctxt->d & ByteOp) {
4258 op->bytes = ctxt->op_bytes;
4259 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4260 fetch_register_operand(op);
4261 op->orig_val = op->val;
4265 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4267 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
4268 op->addr.mem.seg = VCPU_SREG_ES;
4275 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4276 fetch_register_operand(op);
4281 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4284 rc = decode_imm(ctxt, op, 1, true);
4292 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4295 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4298 ctxt->memop.bytes = 1;
4299 if (ctxt->memop.type == OP_REG) {
4300 ctxt->memop.addr.reg = decode_register(ctxt,
4301 ctxt->modrm_rm, true);
4302 fetch_register_operand(&ctxt->memop);
4306 ctxt->memop.bytes = 2;
4309 ctxt->memop.bytes = 4;
4312 rc = decode_imm(ctxt, op, 2, false);
4315 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4319 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4321 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
4322 op->addr.mem.seg = ctxt->seg_override;
4328 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4330 register_address(ctxt,
4331 reg_read(ctxt, VCPU_REGS_RBX) +
4332 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4333 op->addr.mem.seg = ctxt->seg_override;
4338 op->addr.mem.ea = ctxt->_eip;
4339 op->bytes = ctxt->op_bytes + 2;
4340 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4343 ctxt->memop.bytes = ctxt->op_bytes + 2;
4347 op->val = VCPU_SREG_ES;
4351 op->val = VCPU_SREG_CS;
4355 op->val = VCPU_SREG_SS;
4359 op->val = VCPU_SREG_DS;
4363 op->val = VCPU_SREG_FS;
4367 op->val = VCPU_SREG_GS;
4370 /* Special instructions do their own operand decoding. */
4372 op->type = OP_NONE; /* Disable writeback. */
4380 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4382 int rc = X86EMUL_CONTINUE;
4383 int mode = ctxt->mode;
4384 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
4385 bool op_prefix = false;
4386 bool has_seg_override = false;
4387 struct opcode opcode;
4389 struct desc_struct desc;
4391 ctxt->memop.type = OP_NONE;
4392 ctxt->memopp = NULL;
4393 ctxt->_eip = ctxt->eip;
4394 ctxt->fetch.ptr = ctxt->fetch.data;
4395 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4396 ctxt->opcode_len = 1;
4398 memcpy(ctxt->fetch.data, insn, insn_len);
4400 rc = __do_insn_fetch_bytes(ctxt, 1);
4401 if (rc != X86EMUL_CONTINUE)
4406 case X86EMUL_MODE_REAL:
4407 case X86EMUL_MODE_VM86:
4408 def_op_bytes = def_ad_bytes = 2;
4409 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
4411 def_op_bytes = def_ad_bytes = 4;
4413 case X86EMUL_MODE_PROT16:
4414 def_op_bytes = def_ad_bytes = 2;
4416 case X86EMUL_MODE_PROT32:
4417 def_op_bytes = def_ad_bytes = 4;
4419 #ifdef CONFIG_X86_64
4420 case X86EMUL_MODE_PROT64:
4426 return EMULATION_FAILED;
4429 ctxt->op_bytes = def_op_bytes;
4430 ctxt->ad_bytes = def_ad_bytes;
4432 /* Legacy prefixes. */
4434 switch (ctxt->b = insn_fetch(u8, ctxt)) {
4435 case 0x66: /* operand-size override */
4437 /* switch between 2/4 bytes */
4438 ctxt->op_bytes = def_op_bytes ^ 6;
4440 case 0x67: /* address-size override */
4441 if (mode == X86EMUL_MODE_PROT64)
4442 /* switch between 4/8 bytes */
4443 ctxt->ad_bytes = def_ad_bytes ^ 12;
4445 /* switch between 2/4 bytes */
4446 ctxt->ad_bytes = def_ad_bytes ^ 6;
4448 case 0x26: /* ES override */
4449 case 0x2e: /* CS override */
4450 case 0x36: /* SS override */
4451 case 0x3e: /* DS override */
4452 has_seg_override = true;
4453 ctxt->seg_override = (ctxt->b >> 3) & 3;
4455 case 0x64: /* FS override */
4456 case 0x65: /* GS override */
4457 has_seg_override = true;
4458 ctxt->seg_override = ctxt->b & 7;
4460 case 0x40 ... 0x4f: /* REX */
4461 if (mode != X86EMUL_MODE_PROT64)
4463 ctxt->rex_prefix = ctxt->b;
4465 case 0xf0: /* LOCK */
4466 ctxt->lock_prefix = 1;
4468 case 0xf2: /* REPNE/REPNZ */
4469 case 0xf3: /* REP/REPE/REPZ */
4470 ctxt->rep_prefix = ctxt->b;
4476 /* Any legacy prefix after a REX prefix nullifies its effect. */
4478 ctxt->rex_prefix = 0;
4484 if (ctxt->rex_prefix & 8)
4485 ctxt->op_bytes = 8; /* REX.W */
4487 /* Opcode byte(s). */
4488 opcode = opcode_table[ctxt->b];
4489 /* Two-byte opcode? */
4490 if (ctxt->b == 0x0f) {
4491 ctxt->opcode_len = 2;
4492 ctxt->b = insn_fetch(u8, ctxt);
4493 opcode = twobyte_table[ctxt->b];
4495 /* 0F_38 opcode map */
4496 if (ctxt->b == 0x38) {
4497 ctxt->opcode_len = 3;
4498 ctxt->b = insn_fetch(u8, ctxt);
4499 opcode = opcode_map_0f_38[ctxt->b];
4502 ctxt->d = opcode.flags;
4504 if (ctxt->d & ModRM)
4505 ctxt->modrm = insn_fetch(u8, ctxt);
4507 /* vex-prefix instructions are not implemented */
4508 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4509 (mode == X86EMUL_MODE_PROT64 ||
4510 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4514 while (ctxt->d & GroupMask) {
4515 switch (ctxt->d & GroupMask) {
4517 goffset = (ctxt->modrm >> 3) & 7;
4518 opcode = opcode.u.group[goffset];
4521 goffset = (ctxt->modrm >> 3) & 7;
4522 if ((ctxt->modrm >> 6) == 3)
4523 opcode = opcode.u.gdual->mod3[goffset];
4525 opcode = opcode.u.gdual->mod012[goffset];
4528 goffset = ctxt->modrm & 7;
4529 opcode = opcode.u.group[goffset];
4532 if (ctxt->rep_prefix && op_prefix)
4533 return EMULATION_FAILED;
4534 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
4535 switch (simd_prefix) {
4536 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
4537 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
4538 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
4539 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
4543 if (ctxt->modrm > 0xbf)
4544 opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
4546 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4549 return EMULATION_FAILED;
4552 ctxt->d &= ~(u64)GroupMask;
4553 ctxt->d |= opcode.flags;
4558 return EMULATION_FAILED;
4560 ctxt->execute = opcode.u.execute;
4562 if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
4563 return EMULATION_FAILED;
4565 if (unlikely(ctxt->d &
4566 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
4568 * These are copied unconditionally here, and checked unconditionally
4569 * in x86_emulate_insn.
4571 ctxt->check_perm = opcode.check_perm;
4572 ctxt->intercept = opcode.intercept;
4574 if (ctxt->d & NotImpl)
4575 return EMULATION_FAILED;
4577 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
4580 if (ctxt->d & Op3264) {
4581 if (mode == X86EMUL_MODE_PROT64)
4588 ctxt->op_bytes = 16;
4589 else if (ctxt->d & Mmx)
4593 /* ModRM and SIB bytes. */
4594 if (ctxt->d & ModRM) {
4595 rc = decode_modrm(ctxt, &ctxt->memop);
4596 if (!has_seg_override) {
4597 has_seg_override = true;
4598 ctxt->seg_override = ctxt->modrm_seg;
4600 } else if (ctxt->d & MemAbs)
4601 rc = decode_abs(ctxt, &ctxt->memop);
4602 if (rc != X86EMUL_CONTINUE)
4605 if (!has_seg_override)
4606 ctxt->seg_override = VCPU_SREG_DS;
4608 ctxt->memop.addr.mem.seg = ctxt->seg_override;
4611 * Decode and fetch the source operand: register, memory
4614 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
4615 if (rc != X86EMUL_CONTINUE)
4619 * Decode and fetch the second source operand: register, memory
4622 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
4623 if (rc != X86EMUL_CONTINUE)
4626 /* Decode and fetch the destination operand: register or memory. */
4627 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4629 if (ctxt->rip_relative)
4630 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4633 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4636 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
4638 return ctxt->d & PageTable;
4641 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4643 /* The second termination condition only applies for REPE
4644 * and REPNE. Test if the repeat string operation prefix is
4645 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
4646 * corresponding termination condition according to:
4647 * - if REPE/REPZ and ZF = 0 then done
4648 * - if REPNE/REPNZ and ZF = 1 then done
4650 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
4651 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
4652 && (((ctxt->rep_prefix == REPE_PREFIX) &&
4653 ((ctxt->eflags & EFLG_ZF) == 0))
4654 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
4655 ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
4661 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4665 ctxt->ops->get_fpu(ctxt);
4666 asm volatile("1: fwait \n\t"
4668 ".pushsection .fixup,\"ax\" \n\t"
4670 "movb $1, %[fault] \n\t"
4673 _ASM_EXTABLE(1b, 3b)
4674 : [fault]"+qm"(fault));
4675 ctxt->ops->put_fpu(ctxt);
4677 if (unlikely(fault))
4678 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4680 return X86EMUL_CONTINUE;
4683 static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4686 if (op->type == OP_MM)
4687 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4690 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
4692 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
4693 if (!(ctxt->d & ByteOp))
4694 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
4695 asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
4696 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
4698 : "c"(ctxt->src2.val));
4699 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
4700 if (!fop) /* exception is returned in fop variable */
4701 return emulate_de(ctxt);
4702 return X86EMUL_CONTINUE;
4705 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
4707 memset(&ctxt->rip_relative, 0,
4708 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
4710 ctxt->io_read.pos = 0;
4711 ctxt->io_read.end = 0;
4712 ctxt->mem_read.end = 0;
4715 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4717 const struct x86_emulate_ops *ops = ctxt->ops;
4718 int rc = X86EMUL_CONTINUE;
4719 int saved_dst_type = ctxt->dst.type;
4721 ctxt->mem_read.pos = 0;
4723 /* LOCK prefix is allowed only with some instructions */
4724 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
4725 rc = emulate_ud(ctxt);
4729 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
4730 rc = emulate_ud(ctxt);
4734 if (unlikely(ctxt->d &
4735 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
4736 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
4737 (ctxt->d & Undefined)) {
4738 rc = emulate_ud(ctxt);
4742 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4743 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4744 rc = emulate_ud(ctxt);
4748 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4749 rc = emulate_nm(ctxt);
4753 if (ctxt->d & Mmx) {
4754 rc = flush_pending_x87_faults(ctxt);
4755 if (rc != X86EMUL_CONTINUE)
4758 * Now that we know the fpu is exception safe, we can fetch
4761 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4762 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4763 if (!(ctxt->d & Mov))
4764 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4767 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4768 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4769 X86_ICPT_PRE_EXCEPT);
4770 if (rc != X86EMUL_CONTINUE)
4774 /* Privileged instruction can be executed only in CPL=0 */
4775 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4776 if (ctxt->d & PrivUD)
4777 rc = emulate_ud(ctxt);
4779 rc = emulate_gp(ctxt, 0);
4783 /* Instruction can only be executed in protected mode */
4784 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4785 rc = emulate_ud(ctxt);
4789 /* Do instruction specific permission checks */
4790 if (ctxt->d & CheckPerm) {
4791 rc = ctxt->check_perm(ctxt);
4792 if (rc != X86EMUL_CONTINUE)
4796 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4797 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4798 X86_ICPT_POST_EXCEPT);
4799 if (rc != X86EMUL_CONTINUE)
4803 if (ctxt->rep_prefix && (ctxt->d & String)) {
4804 /* All REP prefixes have the same first termination condition */
4805 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4806 ctxt->eip = ctxt->_eip;
4807 ctxt->eflags &= ~EFLG_RF;
4813 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
4814 rc = segmented_read(ctxt, ctxt->src.addr.mem,
4815 ctxt->src.valptr, ctxt->src.bytes);
4816 if (rc != X86EMUL_CONTINUE)
4818 ctxt->src.orig_val64 = ctxt->src.val64;
4821 if (ctxt->src2.type == OP_MEM) {
4822 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
4823 &ctxt->src2.val, ctxt->src2.bytes);
4824 if (rc != X86EMUL_CONTINUE)
4828 if ((ctxt->d & DstMask) == ImplicitOps)
4832 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
4833 /* optimisation - avoid slow emulated read if Mov */
4834 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
4835 &ctxt->dst.val, ctxt->dst.bytes);
4836 if (rc != X86EMUL_CONTINUE)
4839 /* Copy full 64-bit value for CMPXCHG8B. */
4840 ctxt->dst.orig_val64 = ctxt->dst.val64;
4844 if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
4845 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4846 X86_ICPT_POST_MEMACCESS);
4847 if (rc != X86EMUL_CONTINUE)
4851 if (ctxt->rep_prefix && (ctxt->d & String))
4852 ctxt->eflags |= EFLG_RF;
4854 ctxt->eflags &= ~EFLG_RF;
4856 if (ctxt->execute) {
4857 if (ctxt->d & Fastop) {
4858 void (*fop)(struct fastop *) = (void *)ctxt->execute;
4859 rc = fastop(ctxt, fop);
4860 if (rc != X86EMUL_CONTINUE)
4864 rc = ctxt->execute(ctxt);
4865 if (rc != X86EMUL_CONTINUE)
4870 if (ctxt->opcode_len == 2)
4872 else if (ctxt->opcode_len == 3)
4873 goto threebyte_insn;
4876 case 0x63: /* movsxd */
4877 if (ctxt->mode != X86EMUL_MODE_PROT64)
4878 goto cannot_emulate;
4879 ctxt->dst.val = (s32) ctxt->src.val;
4881 case 0x70 ... 0x7f: /* jcc (short) */
4882 if (test_cc(ctxt->b, ctxt->eflags))
4883 rc = jmp_rel(ctxt, ctxt->src.val);
4885 case 0x8d: /* lea r16/r32, m */
4886 ctxt->dst.val = ctxt->src.addr.mem.ea;
4888 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4889 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4890 ctxt->dst.type = OP_NONE;
4894 case 0x98: /* cbw/cwde/cdqe */
4895 switch (ctxt->op_bytes) {
4896 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
4897 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
4898 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
4901 case 0xcc: /* int3 */
4902 rc = emulate_int(ctxt, 3);
4904 case 0xcd: /* int n */
4905 rc = emulate_int(ctxt, ctxt->src.val);
4907 case 0xce: /* into */
4908 if (ctxt->eflags & EFLG_OF)
4909 rc = emulate_int(ctxt, 4);
4911 case 0xe9: /* jmp rel */
4912 case 0xeb: /* jmp rel short */
4913 rc = jmp_rel(ctxt, ctxt->src.val);
4914 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4916 case 0xf4: /* hlt */
4917 ctxt->ops->halt(ctxt);
4919 case 0xf5: /* cmc */
4920 /* complement carry flag from eflags reg */
4921 ctxt->eflags ^= EFLG_CF;
4923 case 0xf8: /* clc */
4924 ctxt->eflags &= ~EFLG_CF;
4926 case 0xf9: /* stc */
4927 ctxt->eflags |= EFLG_CF;
4929 case 0xfc: /* cld */
4930 ctxt->eflags &= ~EFLG_DF;
4932 case 0xfd: /* std */
4933 ctxt->eflags |= EFLG_DF;
4936 goto cannot_emulate;
4939 if (rc != X86EMUL_CONTINUE)
4943 if (ctxt->d & SrcWrite) {
4944 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
4945 rc = writeback(ctxt, &ctxt->src);
4946 if (rc != X86EMUL_CONTINUE)
4949 if (!(ctxt->d & NoWrite)) {
4950 rc = writeback(ctxt, &ctxt->dst);
4951 if (rc != X86EMUL_CONTINUE)
4956 * restore dst type in case the decoding will be reused
4957 * (happens for string instruction )
4959 ctxt->dst.type = saved_dst_type;
4961 if ((ctxt->d & SrcMask) == SrcSI)
4962 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4964 if ((ctxt->d & DstMask) == DstDI)
4965 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4967 if (ctxt->rep_prefix && (ctxt->d & String)) {
4969 struct read_cache *r = &ctxt->io_read;
4970 if ((ctxt->d & SrcMask) == SrcSI)
4971 count = ctxt->src.count;
4973 count = ctxt->dst.count;
4974 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4977 if (!string_insn_completed(ctxt)) {
4979 * Re-enter guest when pio read ahead buffer is empty
4980 * or, if it is not used, after each 1024 iteration.
4982 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4983 (r->end == 0 || r->end != r->pos)) {
4985 * Reset read cache. Usually happens before
4986 * decode, but since instruction is restarted
4987 * we have to do it here.
4989 ctxt->mem_read.end = 0;
4990 writeback_registers(ctxt);
4991 return EMULATION_RESTART;
4993 goto done; /* skip rip writeback */
4995 ctxt->eflags &= ~EFLG_RF;
4998 ctxt->eip = ctxt->_eip;
5001 if (rc == X86EMUL_PROPAGATE_FAULT) {
5002 WARN_ON(ctxt->exception.vector > 0x1f);
5003 ctxt->have_exception = true;
5005 if (rc == X86EMUL_INTERCEPTED)
5006 return EMULATION_INTERCEPTED;
5008 if (rc == X86EMUL_CONTINUE)
5009 writeback_registers(ctxt);
5011 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5015 case 0x09: /* wbinvd */
5016 (ctxt->ops->wbinvd)(ctxt);
5018 case 0x08: /* invd */
5019 case 0x0d: /* GrpP (prefetch) */
5020 case 0x18: /* Grp16 (prefetch/nop) */
5021 case 0x1f: /* nop */
5023 case 0x20: /* mov cr, reg */
5024 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5026 case 0x21: /* mov from dr to reg */
5027 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5029 case 0x40 ... 0x4f: /* cmov */
5030 if (test_cc(ctxt->b, ctxt->eflags))
5031 ctxt->dst.val = ctxt->src.val;
5032 else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
5033 ctxt->op_bytes != 4)
5034 ctxt->dst.type = OP_NONE; /* no writeback */
5036 case 0x80 ... 0x8f: /* jnz rel, etc*/
5037 if (test_cc(ctxt->b, ctxt->eflags))
5038 rc = jmp_rel(ctxt, ctxt->src.val);
5040 case 0x90 ... 0x9f: /* setcc r/m8 */
5041 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5043 case 0xb6 ... 0xb7: /* movzx */
5044 ctxt->dst.bytes = ctxt->op_bytes;
5045 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5046 : (u16) ctxt->src.val;
5048 case 0xbe ... 0xbf: /* movsx */
5049 ctxt->dst.bytes = ctxt->op_bytes;
5050 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5051 (s16) ctxt->src.val;
5053 case 0xc3: /* movnti */
5054 ctxt->dst.bytes = ctxt->op_bytes;
5055 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5056 (u32) ctxt->src.val;
5059 goto cannot_emulate;
5064 if (rc != X86EMUL_CONTINUE)
5070 return EMULATION_FAILED;
5073 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5075 invalidate_registers(ctxt);
5078 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5080 writeback_registers(ctxt);