xref: /linux/arch/x86/kvm/emulate.c (revision 60063497a95e716c9a689af3be2687d261f115b4)
1 /******************************************************************************
2  * emulate.c
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005 Keir Fraser
7  *
8  * Linux coding style, mod r/m decoder, segment base fixes, real-mode
9  * privileged instructions:
10  *
11  * Copyright (C) 2006 Qumranet
12  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13  *
14  *   Avi Kivity <avi@qumranet.com>
15  *   Yaniv Kamay <yaniv@qumranet.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21  */
22 
23 #include <linux/kvm_host.h>
24 #include "kvm_cache_regs.h"
25 #include <linux/module.h>
26 #include <asm/kvm_emulate.h>
27 
28 #include "x86.h"
29 #include "tss.h"
30 
31 /*
32  * Opcode effective-address decode tables.
33  * Note that we only emulate instructions that have at least one memory
34  * operand (excluding implicit stack references). We assume that stack
35  * references and instruction fetches will never occur in special memory
36  * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
37  * not be handled.
38  */
39 
40 /* Operand sizes: 8-bit operands or specified/overridden size. */
41 #define ByteOp      (1<<0)	/* 8-bit operands. */
42 /* Destination operand type. */
43 #define ImplicitOps (1<<1)	/* Implicit in opcode. No generic decode. */
44 #define DstReg      (2<<1)	/* Register operand. */
45 #define DstMem      (3<<1)	/* Memory operand. */
46 #define DstAcc      (4<<1)	/* Destination Accumulator */
47 #define DstDI       (5<<1)	/* Destination is in ES:(E)DI */
48 #define DstMem64    (6<<1)	/* 64bit memory operand */
49 #define DstImmUByte (7<<1)	/* 8-bit unsigned immediate operand */
50 #define DstDX       (8<<1)	/* Destination is in DX register */
51 #define DstMask     (0xf<<1)
52 /* Source operand type. */
53 #define SrcNone     (0<<5)	/* No source operand. */
54 #define SrcReg      (1<<5)	/* Register operand. */
55 #define SrcMem      (2<<5)	/* Memory operand. */
56 #define SrcMem16    (3<<5)	/* Memory operand (16-bit). */
57 #define SrcMem32    (4<<5)	/* Memory operand (32-bit). */
58 #define SrcImm      (5<<5)	/* Immediate operand. */
59 #define SrcImmByte  (6<<5)	/* 8-bit sign-extended immediate operand. */
60 #define SrcOne      (7<<5)	/* Implied '1' */
61 #define SrcImmUByte (8<<5)      /* 8-bit unsigned immediate operand. */
62 #define SrcImmU     (9<<5)      /* Immediate operand, unsigned */
63 #define SrcSI       (0xa<<5)	/* Source is in the DS:RSI */
64 #define SrcImmFAddr (0xb<<5)	/* Source is immediate far address */
65 #define SrcMemFAddr (0xc<<5)	/* Source is far address in memory */
66 #define SrcAcc      (0xd<<5)	/* Source Accumulator */
67 #define SrcImmU16   (0xe<<5)    /* Immediate operand, unsigned, 16 bits */
68 #define SrcDX       (0xf<<5)	/* Source is in DX register */
69 #define SrcMask     (0xf<<5)
70 /* Generic ModRM decode. */
71 #define ModRM       (1<<9)
72 /* Destination is only written; never read. */
73 #define Mov         (1<<10)
74 #define BitOp       (1<<11)
75 #define MemAbs      (1<<12)      /* Memory operand is absolute displacement */
76 #define String      (1<<13)     /* String instruction (rep capable) */
77 #define Stack       (1<<14)     /* Stack instruction (push/pop) */
78 #define GroupMask   (7<<15)     /* Opcode uses one of the group mechanisms */
79 #define Group       (1<<15)     /* Bits 3:5 of modrm byte extend opcode */
80 #define GroupDual   (2<<15)     /* Alternate decoding of mod == 3 */
81 #define Prefix      (3<<15)     /* Instruction varies with 66/f2/f3 prefix */
82 #define RMExt       (4<<15)     /* Opcode extension in ModRM r/m if mod == 3 */
83 #define Sse         (1<<18)     /* SSE Vector instruction */
84 /* Misc flags */
85 #define Prot        (1<<21) /* instruction generates #UD if not in prot-mode */
86 #define VendorSpecific (1<<22) /* Vendor specific instruction */
87 #define NoAccess    (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
88 #define Op3264      (1<<24) /* Operand is 64b in long mode, 32b otherwise */
89 #define Undefined   (1<<25) /* No Such Instruction */
90 #define Lock        (1<<26) /* lock prefix is allowed for the instruction */
91 #define Priv        (1<<27) /* instruction generates #GP if current CPL != 0 */
92 #define No64	    (1<<28)
93 /* Source 2 operand type */
94 #define Src2None    (0<<29)
95 #define Src2CL      (1<<29)
96 #define Src2ImmByte (2<<29)
97 #define Src2One     (3<<29)
98 #define Src2Imm     (4<<29)
99 #define Src2Mask    (7<<29)
100 
101 #define X2(x...) x, x
102 #define X3(x...) X2(x), x
103 #define X4(x...) X2(x), X2(x)
104 #define X5(x...) X4(x), x
105 #define X6(x...) X4(x), X2(x)
106 #define X7(x...) X4(x), X3(x)
107 #define X8(x...) X4(x), X4(x)
108 #define X16(x...) X8(x), X8(x)
109 
110 struct opcode {
111 	u32 flags;
112 	u8 intercept;
113 	union {
114 		int (*execute)(struct x86_emulate_ctxt *ctxt);
115 		struct opcode *group;
116 		struct group_dual *gdual;
117 		struct gprefix *gprefix;
118 	} u;
119 	int (*check_perm)(struct x86_emulate_ctxt *ctxt);
120 };
121 
122 struct group_dual {
123 	struct opcode mod012[8];
124 	struct opcode mod3[8];
125 };
126 
127 struct gprefix {
128 	struct opcode pfx_no;
129 	struct opcode pfx_66;
130 	struct opcode pfx_f2;
131 	struct opcode pfx_f3;
132 };
133 
134 /* EFLAGS bit definitions. */
135 #define EFLG_ID (1<<21)
136 #define EFLG_VIP (1<<20)
137 #define EFLG_VIF (1<<19)
138 #define EFLG_AC (1<<18)
139 #define EFLG_VM (1<<17)
140 #define EFLG_RF (1<<16)
141 #define EFLG_IOPL (3<<12)
142 #define EFLG_NT (1<<14)
143 #define EFLG_OF (1<<11)
144 #define EFLG_DF (1<<10)
145 #define EFLG_IF (1<<9)
146 #define EFLG_TF (1<<8)
147 #define EFLG_SF (1<<7)
148 #define EFLG_ZF (1<<6)
149 #define EFLG_AF (1<<4)
150 #define EFLG_PF (1<<2)
151 #define EFLG_CF (1<<0)
152 
153 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
154 #define EFLG_RESERVED_ONE_MASK 2
155 
156 /*
157  * Instruction emulation:
158  * Most instructions are emulated directly via a fragment of inline assembly
159  * code. This allows us to save/restore EFLAGS and thus very easily pick up
160  * any modified flags.
161  */
162 
163 #if defined(CONFIG_X86_64)
164 #define _LO32 "k"		/* force 32-bit operand */
165 #define _STK  "%%rsp"		/* stack pointer */
166 #elif defined(__i386__)
167 #define _LO32 ""		/* force 32-bit operand */
168 #define _STK  "%%esp"		/* stack pointer */
169 #endif
170 
171 /*
172  * These EFLAGS bits are restored from saved value during emulation, and
173  * any changes are written back to the saved value after emulation.
174  */
175 #define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
176 
177 /* Before executing instruction: restore necessary bits in EFLAGS. */
178 #define _PRE_EFLAGS(_sav, _msk, _tmp)					\
179 	/* EFLAGS = (_sav & _msk) | (EFLAGS & ~_msk); _sav &= ~_msk; */ \
180 	"movl %"_sav",%"_LO32 _tmp"; "                                  \
181 	"push %"_tmp"; "                                                \
182 	"push %"_tmp"; "                                                \
183 	"movl %"_msk",%"_LO32 _tmp"; "                                  \
184 	"andl %"_LO32 _tmp",("_STK"); "                                 \
185 	"pushf; "                                                       \
186 	"notl %"_LO32 _tmp"; "                                          \
187 	"andl %"_LO32 _tmp",("_STK"); "                                 \
188 	"andl %"_LO32 _tmp","__stringify(BITS_PER_LONG/4)"("_STK"); "	\
189 	"pop  %"_tmp"; "                                                \
190 	"orl  %"_LO32 _tmp",("_STK"); "                                 \
191 	"popf; "                                                        \
192 	"pop  %"_sav"; "
193 
194 /* After executing instruction: write-back necessary bits in EFLAGS. */
195 #define _POST_EFLAGS(_sav, _msk, _tmp) \
196 	/* _sav |= EFLAGS & _msk; */		\
197 	"pushf; "				\
198 	"pop  %"_tmp"; "			\
199 	"andl %"_msk",%"_LO32 _tmp"; "		\
200 	"orl  %"_LO32 _tmp",%"_sav"; "
201 
202 #ifdef CONFIG_X86_64
203 #define ON64(x) x
204 #else
205 #define ON64(x)
206 #endif
207 
208 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
209 	do {								\
210 		__asm__ __volatile__ (					\
211 			_PRE_EFLAGS("0", "4", "2")			\
212 			_op _suffix " %"_x"3,%1; "			\
213 			_POST_EFLAGS("0", "4", "2")			\
214 			: "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\
215 			  "=&r" (_tmp)					\
216 			: _y ((_src).val), "i" (EFLAGS_MASK));		\
217 	} while (0)
218 
219 
220 /* Raw emulation: instruction has two explicit operands. */
221 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
222 	do {								\
223 		unsigned long _tmp;					\
224 									\
225 		switch ((_dst).bytes) {					\
226 		case 2:							\
227 			____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
228 			break;						\
229 		case 4:							\
230 			____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\
231 			break;						\
232 		case 8:							\
233 			ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \
234 			break;						\
235 		}							\
236 	} while (0)
237 
238 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
239 	do {								     \
240 		unsigned long _tmp;					     \
241 		switch ((_dst).bytes) {				             \
242 		case 1:							     \
243 			____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
244 			break;						     \
245 		default:						     \
246 			__emulate_2op_nobyte(_op, _src, _dst, _eflags,	     \
247 					     _wx, _wy, _lx, _ly, _qx, _qy);  \
248 			break;						     \
249 		}							     \
250 	} while (0)
251 
252 /* Source operand is byte-sized and may be restricted to just %cl. */
253 #define emulate_2op_SrcB(_op, _src, _dst, _eflags)                      \
254 	__emulate_2op(_op, _src, _dst, _eflags,				\
255 		      "b", "c", "b", "c", "b", "c", "b", "c")
256 
257 /* Source operand is byte, word, long or quad sized. */
258 #define emulate_2op_SrcV(_op, _src, _dst, _eflags)                      \
259 	__emulate_2op(_op, _src, _dst, _eflags,				\
260 		      "b", "q", "w", "r", _LO32, "r", "", "r")
261 
262 /* Source operand is word, long or quad sized. */
263 #define emulate_2op_SrcV_nobyte(_op, _src, _dst, _eflags)               \
264 	__emulate_2op_nobyte(_op, _src, _dst, _eflags,			\
265 			     "w", "r", _LO32, "r", "", "r")
266 
267 /* Instruction has three operands and one operand is stored in ECX register */
268 #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type)	\
269 	do {								\
270 		unsigned long _tmp;					\
271 		_type _clv  = (_cl).val;				\
272 		_type _srcv = (_src).val;				\
273 		_type _dstv = (_dst).val;				\
274 									\
275 		__asm__ __volatile__ (					\
276 			_PRE_EFLAGS("0", "5", "2")			\
277 			_op _suffix " %4,%1 \n"				\
278 			_POST_EFLAGS("0", "5", "2")			\
279 			: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp)	\
280 			: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK)	\
281 			);						\
282 									\
283 		(_cl).val  = (unsigned long) _clv;			\
284 		(_src).val = (unsigned long) _srcv;			\
285 		(_dst).val = (unsigned long) _dstv;			\
286 	} while (0)
287 
288 #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags)			\
289 	do {								\
290 		switch ((_dst).bytes) {					\
291 		case 2:							\
292 			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
293 					 "w", unsigned short);         	\
294 			break;						\
295 		case 4:							\
296 			__emulate_2op_cl(_op, _cl, _src, _dst, _eflags,	\
297 					 "l", unsigned int);           	\
298 			break;						\
299 		case 8:							\
300 			ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
301 					      "q", unsigned long));	\
302 			break;						\
303 		}							\
304 	} while (0)
305 
306 #define __emulate_1op(_op, _dst, _eflags, _suffix)			\
307 	do {								\
308 		unsigned long _tmp;					\
309 									\
310 		__asm__ __volatile__ (					\
311 			_PRE_EFLAGS("0", "3", "2")			\
312 			_op _suffix " %1; "				\
313 			_POST_EFLAGS("0", "3", "2")			\
314 			: "=m" (_eflags), "+m" ((_dst).val),		\
315 			  "=&r" (_tmp)					\
316 			: "i" (EFLAGS_MASK));				\
317 	} while (0)
318 
319 /* Instruction has only one explicit operand (no source operand). */
320 #define emulate_1op(_op, _dst, _eflags)                                    \
321 	do {								\
322 		switch ((_dst).bytes) {				        \
323 		case 1:	__emulate_1op(_op, _dst, _eflags, "b"); break;	\
324 		case 2:	__emulate_1op(_op, _dst, _eflags, "w"); break;	\
325 		case 4:	__emulate_1op(_op, _dst, _eflags, "l"); break;	\
326 		case 8:	ON64(__emulate_1op(_op, _dst, _eflags, "q")); break; \
327 		}							\
328 	} while (0)
329 
330 #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix)		\
331 	do {								\
332 		unsigned long _tmp;					\
333 									\
334 		__asm__ __volatile__ (					\
335 			_PRE_EFLAGS("0", "4", "1")			\
336 			_op _suffix " %5; "				\
337 			_POST_EFLAGS("0", "4", "1")			\
338 			: "=m" (_eflags), "=&r" (_tmp),			\
339 			  "+a" (_rax), "+d" (_rdx)			\
340 			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
341 			  "a" (_rax), "d" (_rdx));			\
342 	} while (0)
343 
344 #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \
345 	do {								\
346 		unsigned long _tmp;					\
347 									\
348 		__asm__ __volatile__ (					\
349 			_PRE_EFLAGS("0", "5", "1")			\
350 			"1: \n\t"					\
351 			_op _suffix " %6; "				\
352 			"2: \n\t"					\
353 			_POST_EFLAGS("0", "5", "1")			\
354 			".pushsection .fixup,\"ax\" \n\t"		\
355 			"3: movb $1, %4 \n\t"				\
356 			"jmp 2b \n\t"					\
357 			".popsection \n\t"				\
358 			_ASM_EXTABLE(1b, 3b)				\
359 			: "=m" (_eflags), "=&r" (_tmp),			\
360 			  "+a" (_rax), "+d" (_rdx), "+qm"(_ex)		\
361 			: "i" (EFLAGS_MASK), "m" ((_src).val),		\
362 			  "a" (_rax), "d" (_rdx));			\
363 	} while (0)
364 
365 /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */
366 #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags)		\
367 	do {								\
368 		switch((_src).bytes) {					\
369 		case 1:							\
370 			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
371 					      _eflags, "b");		\
372 			break;						\
373 		case 2:							\
374 			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
375 					      _eflags, "w");		\
376 			break;						\
377 		case 4:							\
378 			__emulate_1op_rax_rdx(_op, _src, _rax, _rdx,	\
379 					      _eflags, "l");		\
380 			break;						\
381 		case 8:							\
382 			ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \
383 						   _eflags, "q"));	\
384 			break;						\
385 		}							\
386 	} while (0)
387 
388 #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex)	\
389 	do {								\
390 		switch((_src).bytes) {					\
391 		case 1:							\
392 			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx,	\
393 						 _eflags, "b", _ex);	\
394 			break;						\
395 		case 2:							\
396 			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
397 						 _eflags, "w", _ex);	\
398 			break;						\
399 		case 4:							\
400 			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
401 						 _eflags, "l", _ex);	\
402 			break;						\
403 		case 8: ON64(						\
404 			__emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \
405 						 _eflags, "q", _ex));	\
406 			break;						\
407 		}							\
408 	} while (0)
409 
410 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
411 				    enum x86_intercept intercept,
412 				    enum x86_intercept_stage stage)
413 {
414 	struct x86_instruction_info info = {
415 		.intercept  = intercept,
416 		.rep_prefix = ctxt->rep_prefix,
417 		.modrm_mod  = ctxt->modrm_mod,
418 		.modrm_reg  = ctxt->modrm_reg,
419 		.modrm_rm   = ctxt->modrm_rm,
420 		.src_val    = ctxt->src.val64,
421 		.src_bytes  = ctxt->src.bytes,
422 		.dst_bytes  = ctxt->dst.bytes,
423 		.ad_bytes   = ctxt->ad_bytes,
424 		.next_rip   = ctxt->eip,
425 	};
426 
427 	return ctxt->ops->intercept(ctxt, &info, stage);
428 }
429 
430 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
431 {
432 	return (1UL << (ctxt->ad_bytes << 3)) - 1;
433 }
434 
435 /* Access/update address held in a register, based on addressing mode. */
436 static inline unsigned long
437 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
438 {
439 	if (ctxt->ad_bytes == sizeof(unsigned long))
440 		return reg;
441 	else
442 		return reg & ad_mask(ctxt);
443 }
444 
445 static inline unsigned long
446 register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
447 {
448 	return address_mask(ctxt, reg);
449 }
450 
451 static inline void
452 register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
453 {
454 	if (ctxt->ad_bytes == sizeof(unsigned long))
455 		*reg += inc;
456 	else
457 		*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt));
458 }
459 
460 static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
461 {
462 	register_address_increment(ctxt, &ctxt->_eip, rel);
463 }
464 
465 static u32 desc_limit_scaled(struct desc_struct *desc)
466 {
467 	u32 limit = get_desc_limit(desc);
468 
469 	return desc->g ? (limit << 12) | 0xfff : limit;
470 }
471 
472 static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
473 {
474 	ctxt->has_seg_override = true;
475 	ctxt->seg_override = seg;
476 }
477 
478 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
479 {
480 	if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
481 		return 0;
482 
483 	return ctxt->ops->get_cached_segment_base(ctxt, seg);
484 }
485 
486 static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
487 {
488 	if (!ctxt->has_seg_override)
489 		return 0;
490 
491 	return ctxt->seg_override;
492 }
493 
494 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
495 			     u32 error, bool valid)
496 {
497 	ctxt->exception.vector = vec;
498 	ctxt->exception.error_code = error;
499 	ctxt->exception.error_code_valid = valid;
500 	return X86EMUL_PROPAGATE_FAULT;
501 }
502 
503 static int emulate_db(struct x86_emulate_ctxt *ctxt)
504 {
505 	return emulate_exception(ctxt, DB_VECTOR, 0, false);
506 }
507 
508 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
509 {
510 	return emulate_exception(ctxt, GP_VECTOR, err, true);
511 }
512 
513 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
514 {
515 	return emulate_exception(ctxt, SS_VECTOR, err, true);
516 }
517 
518 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
519 {
520 	return emulate_exception(ctxt, UD_VECTOR, 0, false);
521 }
522 
523 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
524 {
525 	return emulate_exception(ctxt, TS_VECTOR, err, true);
526 }
527 
528 static int emulate_de(struct x86_emulate_ctxt *ctxt)
529 {
530 	return emulate_exception(ctxt, DE_VECTOR, 0, false);
531 }
532 
533 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
534 {
535 	return emulate_exception(ctxt, NM_VECTOR, 0, false);
536 }
537 
538 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
539 {
540 	u16 selector;
541 	struct desc_struct desc;
542 
543 	ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
544 	return selector;
545 }
546 
547 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
548 				 unsigned seg)
549 {
550 	u16 dummy;
551 	u32 base3;
552 	struct desc_struct desc;
553 
554 	ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
555 	ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
556 }
557 
558 static int __linearize(struct x86_emulate_ctxt *ctxt,
559 		     struct segmented_address addr,
560 		     unsigned size, bool write, bool fetch,
561 		     ulong *linear)
562 {
563 	struct desc_struct desc;
564 	bool usable;
565 	ulong la;
566 	u32 lim;
567 	u16 sel;
568 	unsigned cpl, rpl;
569 
570 	la = seg_base(ctxt, addr.seg) + addr.ea;
571 	switch (ctxt->mode) {
572 	case X86EMUL_MODE_REAL:
573 		break;
574 	case X86EMUL_MODE_PROT64:
575 		if (((signed long)la << 16) >> 16 != la)
576 			return emulate_gp(ctxt, 0);
577 		break;
578 	default:
579 		usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
580 						addr.seg);
581 		if (!usable)
582 			goto bad;
583 		/* code segment or read-only data segment */
584 		if (((desc.type & 8) || !(desc.type & 2)) && write)
585 			goto bad;
586 		/* unreadable code segment */
587 		if (!fetch && (desc.type & 8) && !(desc.type & 2))
588 			goto bad;
589 		lim = desc_limit_scaled(&desc);
590 		if ((desc.type & 8) || !(desc.type & 4)) {
591 			/* expand-up segment */
592 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
593 				goto bad;
594 		} else {
595 			/* exapand-down segment */
596 			if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
597 				goto bad;
598 			lim = desc.d ? 0xffffffff : 0xffff;
599 			if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
600 				goto bad;
601 		}
602 		cpl = ctxt->ops->cpl(ctxt);
603 		rpl = sel & 3;
604 		cpl = max(cpl, rpl);
605 		if (!(desc.type & 8)) {
606 			/* data segment */
607 			if (cpl > desc.dpl)
608 				goto bad;
609 		} else if ((desc.type & 8) && !(desc.type & 4)) {
610 			/* nonconforming code segment */
611 			if (cpl != desc.dpl)
612 				goto bad;
613 		} else if ((desc.type & 8) && (desc.type & 4)) {
614 			/* conforming code segment */
615 			if (cpl < desc.dpl)
616 				goto bad;
617 		}
618 		break;
619 	}
620 	if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
621 		la &= (u32)-1;
622 	*linear = la;
623 	return X86EMUL_CONTINUE;
624 bad:
625 	if (addr.seg == VCPU_SREG_SS)
626 		return emulate_ss(ctxt, addr.seg);
627 	else
628 		return emulate_gp(ctxt, addr.seg);
629 }
630 
631 static int linearize(struct x86_emulate_ctxt *ctxt,
632 		     struct segmented_address addr,
633 		     unsigned size, bool write,
634 		     ulong *linear)
635 {
636 	return __linearize(ctxt, addr, size, write, false, linear);
637 }
638 
639 
640 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
641 			      struct segmented_address addr,
642 			      void *data,
643 			      unsigned size)
644 {
645 	int rc;
646 	ulong linear;
647 
648 	rc = linearize(ctxt, addr, size, false, &linear);
649 	if (rc != X86EMUL_CONTINUE)
650 		return rc;
651 	return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
652 }
653 
654 static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt,
655 			      unsigned long eip, u8 *dest)
656 {
657 	struct fetch_cache *fc = &ctxt->fetch;
658 	int rc;
659 	int size, cur_size;
660 
661 	if (eip == fc->end) {
662 		unsigned long linear;
663 		struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
664 		cur_size = fc->end - fc->start;
665 		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
666 		rc = __linearize(ctxt, addr, size, false, true, &linear);
667 		if (rc != X86EMUL_CONTINUE)
668 			return rc;
669 		rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
670 				      size, &ctxt->exception);
671 		if (rc != X86EMUL_CONTINUE)
672 			return rc;
673 		fc->end += size;
674 	}
675 	*dest = fc->data[eip - fc->start];
676 	return X86EMUL_CONTINUE;
677 }
678 
679 static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
680 			 unsigned long eip, void *dest, unsigned size)
681 {
682 	int rc;
683 
684 	/* x86 instructions are limited to 15 bytes. */
685 	if (eip + size - ctxt->eip > 15)
686 		return X86EMUL_UNHANDLEABLE;
687 	while (size--) {
688 		rc = do_insn_fetch_byte(ctxt, eip++, dest++);
689 		if (rc != X86EMUL_CONTINUE)
690 			return rc;
691 	}
692 	return X86EMUL_CONTINUE;
693 }
694 
695 /* Fetch next part of the instruction being emulated. */
696 #define insn_fetch(_type, _size, _eip)					\
697 ({	unsigned long _x;						\
698 	rc = do_insn_fetch(ctxt, (_eip), &_x, (_size));			\
699 	if (rc != X86EMUL_CONTINUE)					\
700 		goto done;						\
701 	(_eip) += (_size);						\
702 	(_type)_x;							\
703 })
704 
705 #define insn_fetch_arr(_arr, _size, _eip)				\
706 ({	rc = do_insn_fetch(ctxt, (_eip), _arr, (_size));		\
707 	if (rc != X86EMUL_CONTINUE)					\
708 		goto done;						\
709 	(_eip) += (_size);						\
710 })
711 
712 /*
713  * Given the 'reg' portion of a ModRM byte, and a register block, return a
714  * pointer into the block that addresses the relevant register.
715  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
716  */
717 static void *decode_register(u8 modrm_reg, unsigned long *regs,
718 			     int highbyte_regs)
719 {
720 	void *p;
721 
722 	p = &regs[modrm_reg];
723 	if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
724 		p = (unsigned char *)&regs[modrm_reg & 3] + 1;
725 	return p;
726 }
727 
728 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
729 			   struct segmented_address addr,
730 			   u16 *size, unsigned long *address, int op_bytes)
731 {
732 	int rc;
733 
734 	if (op_bytes == 2)
735 		op_bytes = 3;
736 	*address = 0;
737 	rc = segmented_read_std(ctxt, addr, size, 2);
738 	if (rc != X86EMUL_CONTINUE)
739 		return rc;
740 	addr.ea += 2;
741 	rc = segmented_read_std(ctxt, addr, address, op_bytes);
742 	return rc;
743 }
744 
745 static int test_cc(unsigned int condition, unsigned int flags)
746 {
747 	int rc = 0;
748 
749 	switch ((condition & 15) >> 1) {
750 	case 0: /* o */
751 		rc |= (flags & EFLG_OF);
752 		break;
753 	case 1: /* b/c/nae */
754 		rc |= (flags & EFLG_CF);
755 		break;
756 	case 2: /* z/e */
757 		rc |= (flags & EFLG_ZF);
758 		break;
759 	case 3: /* be/na */
760 		rc |= (flags & (EFLG_CF|EFLG_ZF));
761 		break;
762 	case 4: /* s */
763 		rc |= (flags & EFLG_SF);
764 		break;
765 	case 5: /* p/pe */
766 		rc |= (flags & EFLG_PF);
767 		break;
768 	case 7: /* le/ng */
769 		rc |= (flags & EFLG_ZF);
770 		/* fall through */
771 	case 6: /* l/nge */
772 		rc |= (!(flags & EFLG_SF) != !(flags & EFLG_OF));
773 		break;
774 	}
775 
776 	/* Odd condition identifiers (lsb == 1) have inverted sense. */
777 	return (!!rc ^ (condition & 1));
778 }
779 
780 static void fetch_register_operand(struct operand *op)
781 {
782 	switch (op->bytes) {
783 	case 1:
784 		op->val = *(u8 *)op->addr.reg;
785 		break;
786 	case 2:
787 		op->val = *(u16 *)op->addr.reg;
788 		break;
789 	case 4:
790 		op->val = *(u32 *)op->addr.reg;
791 		break;
792 	case 8:
793 		op->val = *(u64 *)op->addr.reg;
794 		break;
795 	}
796 }
797 
798 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
799 {
800 	ctxt->ops->get_fpu(ctxt);
801 	switch (reg) {
802 	case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
803 	case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
804 	case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
805 	case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
806 	case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
807 	case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
808 	case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
809 	case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
810 #ifdef CONFIG_X86_64
811 	case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
812 	case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
813 	case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
814 	case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
815 	case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
816 	case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
817 	case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
818 	case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
819 #endif
820 	default: BUG();
821 	}
822 	ctxt->ops->put_fpu(ctxt);
823 }
824 
825 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
826 			  int reg)
827 {
828 	ctxt->ops->get_fpu(ctxt);
829 	switch (reg) {
830 	case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
831 	case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
832 	case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
833 	case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
834 	case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
835 	case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
836 	case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
837 	case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
838 #ifdef CONFIG_X86_64
839 	case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
840 	case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
841 	case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
842 	case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
843 	case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
844 	case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
845 	case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
846 	case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
847 #endif
848 	default: BUG();
849 	}
850 	ctxt->ops->put_fpu(ctxt);
851 }
852 
853 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
854 				    struct operand *op,
855 				    int inhibit_bytereg)
856 {
857 	unsigned reg = ctxt->modrm_reg;
858 	int highbyte_regs = ctxt->rex_prefix == 0;
859 
860 	if (!(ctxt->d & ModRM))
861 		reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
862 
863 	if (ctxt->d & Sse) {
864 		op->type = OP_XMM;
865 		op->bytes = 16;
866 		op->addr.xmm = reg;
867 		read_sse_reg(ctxt, &op->vec_val, reg);
868 		return;
869 	}
870 
871 	op->type = OP_REG;
872 	if ((ctxt->d & ByteOp) && !inhibit_bytereg) {
873 		op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs);
874 		op->bytes = 1;
875 	} else {
876 		op->addr.reg = decode_register(reg, ctxt->regs, 0);
877 		op->bytes = ctxt->op_bytes;
878 	}
879 	fetch_register_operand(op);
880 	op->orig_val = op->val;
881 }
882 
883 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
884 			struct operand *op)
885 {
886 	u8 sib;
887 	int index_reg = 0, base_reg = 0, scale;
888 	int rc = X86EMUL_CONTINUE;
889 	ulong modrm_ea = 0;
890 
891 	if (ctxt->rex_prefix) {
892 		ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;	/* REX.R */
893 		index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
894 		ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
895 	}
896 
897 	ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
898 	ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
899 	ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
900 	ctxt->modrm_rm |= (ctxt->modrm & 0x07);
901 	ctxt->modrm_seg = VCPU_SREG_DS;
902 
903 	if (ctxt->modrm_mod == 3) {
904 		op->type = OP_REG;
905 		op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
906 		op->addr.reg = decode_register(ctxt->modrm_rm,
907 					       ctxt->regs, ctxt->d & ByteOp);
908 		if (ctxt->d & Sse) {
909 			op->type = OP_XMM;
910 			op->bytes = 16;
911 			op->addr.xmm = ctxt->modrm_rm;
912 			read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
913 			return rc;
914 		}
915 		fetch_register_operand(op);
916 		return rc;
917 	}
918 
919 	op->type = OP_MEM;
920 
921 	if (ctxt->ad_bytes == 2) {
922 		unsigned bx = ctxt->regs[VCPU_REGS_RBX];
923 		unsigned bp = ctxt->regs[VCPU_REGS_RBP];
924 		unsigned si = ctxt->regs[VCPU_REGS_RSI];
925 		unsigned di = ctxt->regs[VCPU_REGS_RDI];
926 
927 		/* 16-bit ModR/M decode. */
928 		switch (ctxt->modrm_mod) {
929 		case 0:
930 			if (ctxt->modrm_rm == 6)
931 				modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
932 			break;
933 		case 1:
934 			modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
935 			break;
936 		case 2:
937 			modrm_ea += insn_fetch(u16, 2, ctxt->_eip);
938 			break;
939 		}
940 		switch (ctxt->modrm_rm) {
941 		case 0:
942 			modrm_ea += bx + si;
943 			break;
944 		case 1:
945 			modrm_ea += bx + di;
946 			break;
947 		case 2:
948 			modrm_ea += bp + si;
949 			break;
950 		case 3:
951 			modrm_ea += bp + di;
952 			break;
953 		case 4:
954 			modrm_ea += si;
955 			break;
956 		case 5:
957 			modrm_ea += di;
958 			break;
959 		case 6:
960 			if (ctxt->modrm_mod != 0)
961 				modrm_ea += bp;
962 			break;
963 		case 7:
964 			modrm_ea += bx;
965 			break;
966 		}
967 		if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
968 		    (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
969 			ctxt->modrm_seg = VCPU_SREG_SS;
970 		modrm_ea = (u16)modrm_ea;
971 	} else {
972 		/* 32/64-bit ModR/M decode. */
973 		if ((ctxt->modrm_rm & 7) == 4) {
974 			sib = insn_fetch(u8, 1, ctxt->_eip);
975 			index_reg |= (sib >> 3) & 7;
976 			base_reg |= sib & 7;
977 			scale = sib >> 6;
978 
979 			if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
980 				modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
981 			else
982 				modrm_ea += ctxt->regs[base_reg];
983 			if (index_reg != 4)
984 				modrm_ea += ctxt->regs[index_reg] << scale;
985 		} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
986 			if (ctxt->mode == X86EMUL_MODE_PROT64)
987 				ctxt->rip_relative = 1;
988 		} else
989 			modrm_ea += ctxt->regs[ctxt->modrm_rm];
990 		switch (ctxt->modrm_mod) {
991 		case 0:
992 			if (ctxt->modrm_rm == 5)
993 				modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
994 			break;
995 		case 1:
996 			modrm_ea += insn_fetch(s8, 1, ctxt->_eip);
997 			break;
998 		case 2:
999 			modrm_ea += insn_fetch(s32, 4, ctxt->_eip);
1000 			break;
1001 		}
1002 	}
1003 	op->addr.mem.ea = modrm_ea;
1004 done:
1005 	return rc;
1006 }
1007 
1008 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1009 		      struct operand *op)
1010 {
1011 	int rc = X86EMUL_CONTINUE;
1012 
1013 	op->type = OP_MEM;
1014 	switch (ctxt->ad_bytes) {
1015 	case 2:
1016 		op->addr.mem.ea = insn_fetch(u16, 2, ctxt->_eip);
1017 		break;
1018 	case 4:
1019 		op->addr.mem.ea = insn_fetch(u32, 4, ctxt->_eip);
1020 		break;
1021 	case 8:
1022 		op->addr.mem.ea = insn_fetch(u64, 8, ctxt->_eip);
1023 		break;
1024 	}
1025 done:
1026 	return rc;
1027 }
1028 
1029 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1030 {
1031 	long sv = 0, mask;
1032 
1033 	if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1034 		mask = ~(ctxt->dst.bytes * 8 - 1);
1035 
1036 		if (ctxt->src.bytes == 2)
1037 			sv = (s16)ctxt->src.val & (s16)mask;
1038 		else if (ctxt->src.bytes == 4)
1039 			sv = (s32)ctxt->src.val & (s32)mask;
1040 
1041 		ctxt->dst.addr.mem.ea += (sv >> 3);
1042 	}
1043 
1044 	/* only subword offset */
1045 	ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1046 }
1047 
1048 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1049 			 unsigned long addr, void *dest, unsigned size)
1050 {
1051 	int rc;
1052 	struct read_cache *mc = &ctxt->mem_read;
1053 
1054 	while (size) {
1055 		int n = min(size, 8u);
1056 		size -= n;
1057 		if (mc->pos < mc->end)
1058 			goto read_cached;
1059 
1060 		rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1061 					      &ctxt->exception);
1062 		if (rc != X86EMUL_CONTINUE)
1063 			return rc;
1064 		mc->end += n;
1065 
1066 	read_cached:
1067 		memcpy(dest, mc->data + mc->pos, n);
1068 		mc->pos += n;
1069 		dest += n;
1070 		addr += n;
1071 	}
1072 	return X86EMUL_CONTINUE;
1073 }
1074 
1075 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1076 			  struct segmented_address addr,
1077 			  void *data,
1078 			  unsigned size)
1079 {
1080 	int rc;
1081 	ulong linear;
1082 
1083 	rc = linearize(ctxt, addr, size, false, &linear);
1084 	if (rc != X86EMUL_CONTINUE)
1085 		return rc;
1086 	return read_emulated(ctxt, linear, data, size);
1087 }
1088 
1089 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1090 			   struct segmented_address addr,
1091 			   const void *data,
1092 			   unsigned size)
1093 {
1094 	int rc;
1095 	ulong linear;
1096 
1097 	rc = linearize(ctxt, addr, size, true, &linear);
1098 	if (rc != X86EMUL_CONTINUE)
1099 		return rc;
1100 	return ctxt->ops->write_emulated(ctxt, linear, data, size,
1101 					 &ctxt->exception);
1102 }
1103 
1104 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1105 			     struct segmented_address addr,
1106 			     const void *orig_data, const void *data,
1107 			     unsigned size)
1108 {
1109 	int rc;
1110 	ulong linear;
1111 
1112 	rc = linearize(ctxt, addr, size, true, &linear);
1113 	if (rc != X86EMUL_CONTINUE)
1114 		return rc;
1115 	return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1116 					   size, &ctxt->exception);
1117 }
1118 
1119 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1120 			   unsigned int size, unsigned short port,
1121 			   void *dest)
1122 {
1123 	struct read_cache *rc = &ctxt->io_read;
1124 
1125 	if (rc->pos == rc->end) { /* refill pio read ahead */
1126 		unsigned int in_page, n;
1127 		unsigned int count = ctxt->rep_prefix ?
1128 			address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1;
1129 		in_page = (ctxt->eflags & EFLG_DF) ?
1130 			offset_in_page(ctxt->regs[VCPU_REGS_RDI]) :
1131 			PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]);
1132 		n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1133 			count);
1134 		if (n == 0)
1135 			n = 1;
1136 		rc->pos = rc->end = 0;
1137 		if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1138 			return 0;
1139 		rc->end = n * size;
1140 	}
1141 
1142 	memcpy(dest, rc->data + rc->pos, size);
1143 	rc->pos += size;
1144 	return 1;
1145 }
1146 
1147 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1148 				     u16 selector, struct desc_ptr *dt)
1149 {
1150 	struct x86_emulate_ops *ops = ctxt->ops;
1151 
1152 	if (selector & 1 << 2) {
1153 		struct desc_struct desc;
1154 		u16 sel;
1155 
1156 		memset (dt, 0, sizeof *dt);
1157 		if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1158 			return;
1159 
1160 		dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1161 		dt->address = get_desc_base(&desc);
1162 	} else
1163 		ops->get_gdt(ctxt, dt);
1164 }
1165 
1166 /* allowed just for 8 bytes segments */
1167 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1168 				   u16 selector, struct desc_struct *desc)
1169 {
1170 	struct desc_ptr dt;
1171 	u16 index = selector >> 3;
1172 	ulong addr;
1173 
1174 	get_descriptor_table_ptr(ctxt, selector, &dt);
1175 
1176 	if (dt.size < index * 8 + 7)
1177 		return emulate_gp(ctxt, selector & 0xfffc);
1178 
1179 	addr = dt.address + index * 8;
1180 	return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1181 				   &ctxt->exception);
1182 }
1183 
1184 /* allowed just for 8 bytes segments */
1185 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1186 				    u16 selector, struct desc_struct *desc)
1187 {
1188 	struct desc_ptr dt;
1189 	u16 index = selector >> 3;
1190 	ulong addr;
1191 
1192 	get_descriptor_table_ptr(ctxt, selector, &dt);
1193 
1194 	if (dt.size < index * 8 + 7)
1195 		return emulate_gp(ctxt, selector & 0xfffc);
1196 
1197 	addr = dt.address + index * 8;
1198 	return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1199 				    &ctxt->exception);
1200 }
1201 
1202 /* Does not support long mode */
1203 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1204 				   u16 selector, int seg)
1205 {
1206 	struct desc_struct seg_desc;
1207 	u8 dpl, rpl, cpl;
1208 	unsigned err_vec = GP_VECTOR;
1209 	u32 err_code = 0;
1210 	bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1211 	int ret;
1212 
1213 	memset(&seg_desc, 0, sizeof seg_desc);
1214 
1215 	if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1216 	    || ctxt->mode == X86EMUL_MODE_REAL) {
1217 		/* set real mode segment descriptor */
1218 		set_desc_base(&seg_desc, selector << 4);
1219 		set_desc_limit(&seg_desc, 0xffff);
1220 		seg_desc.type = 3;
1221 		seg_desc.p = 1;
1222 		seg_desc.s = 1;
1223 		goto load;
1224 	}
1225 
1226 	/* NULL selector is not valid for TR, CS and SS */
1227 	if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
1228 	    && null_selector)
1229 		goto exception;
1230 
1231 	/* TR should be in GDT only */
1232 	if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1233 		goto exception;
1234 
1235 	if (null_selector) /* for NULL selector skip all following checks */
1236 		goto load;
1237 
1238 	ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1239 	if (ret != X86EMUL_CONTINUE)
1240 		return ret;
1241 
1242 	err_code = selector & 0xfffc;
1243 	err_vec = GP_VECTOR;
1244 
1245 	/* can't load system descriptor into segment selecor */
1246 	if (seg <= VCPU_SREG_GS && !seg_desc.s)
1247 		goto exception;
1248 
1249 	if (!seg_desc.p) {
1250 		err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1251 		goto exception;
1252 	}
1253 
1254 	rpl = selector & 3;
1255 	dpl = seg_desc.dpl;
1256 	cpl = ctxt->ops->cpl(ctxt);
1257 
1258 	switch (seg) {
1259 	case VCPU_SREG_SS:
1260 		/*
1261 		 * segment is not a writable data segment or segment
1262 		 * selector's RPL != CPL or segment selector's RPL != CPL
1263 		 */
1264 		if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1265 			goto exception;
1266 		break;
1267 	case VCPU_SREG_CS:
1268 		if (!(seg_desc.type & 8))
1269 			goto exception;
1270 
1271 		if (seg_desc.type & 4) {
1272 			/* conforming */
1273 			if (dpl > cpl)
1274 				goto exception;
1275 		} else {
1276 			/* nonconforming */
1277 			if (rpl > cpl || dpl != cpl)
1278 				goto exception;
1279 		}
1280 		/* CS(RPL) <- CPL */
1281 		selector = (selector & 0xfffc) | cpl;
1282 		break;
1283 	case VCPU_SREG_TR:
1284 		if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1285 			goto exception;
1286 		break;
1287 	case VCPU_SREG_LDTR:
1288 		if (seg_desc.s || seg_desc.type != 2)
1289 			goto exception;
1290 		break;
1291 	default: /*  DS, ES, FS, or GS */
1292 		/*
1293 		 * segment is not a data or readable code segment or
1294 		 * ((segment is a data or nonconforming code segment)
1295 		 * and (both RPL and CPL > DPL))
1296 		 */
1297 		if ((seg_desc.type & 0xa) == 0x8 ||
1298 		    (((seg_desc.type & 0xc) != 0xc) &&
1299 		     (rpl > dpl && cpl > dpl)))
1300 			goto exception;
1301 		break;
1302 	}
1303 
1304 	if (seg_desc.s) {
1305 		/* mark segment as accessed */
1306 		seg_desc.type |= 1;
1307 		ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1308 		if (ret != X86EMUL_CONTINUE)
1309 			return ret;
1310 	}
1311 load:
1312 	ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1313 	return X86EMUL_CONTINUE;
1314 exception:
1315 	emulate_exception(ctxt, err_vec, err_code, true);
1316 	return X86EMUL_PROPAGATE_FAULT;
1317 }
1318 
1319 static void write_register_operand(struct operand *op)
1320 {
1321 	/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
1322 	switch (op->bytes) {
1323 	case 1:
1324 		*(u8 *)op->addr.reg = (u8)op->val;
1325 		break;
1326 	case 2:
1327 		*(u16 *)op->addr.reg = (u16)op->val;
1328 		break;
1329 	case 4:
1330 		*op->addr.reg = (u32)op->val;
1331 		break;	/* 64b: zero-extend */
1332 	case 8:
1333 		*op->addr.reg = op->val;
1334 		break;
1335 	}
1336 }
1337 
1338 static int writeback(struct x86_emulate_ctxt *ctxt)
1339 {
1340 	int rc;
1341 
1342 	switch (ctxt->dst.type) {
1343 	case OP_REG:
1344 		write_register_operand(&ctxt->dst);
1345 		break;
1346 	case OP_MEM:
1347 		if (ctxt->lock_prefix)
1348 			rc = segmented_cmpxchg(ctxt,
1349 					       ctxt->dst.addr.mem,
1350 					       &ctxt->dst.orig_val,
1351 					       &ctxt->dst.val,
1352 					       ctxt->dst.bytes);
1353 		else
1354 			rc = segmented_write(ctxt,
1355 					     ctxt->dst.addr.mem,
1356 					     &ctxt->dst.val,
1357 					     ctxt->dst.bytes);
1358 		if (rc != X86EMUL_CONTINUE)
1359 			return rc;
1360 		break;
1361 	case OP_XMM:
1362 		write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1363 		break;
1364 	case OP_NONE:
1365 		/* no writeback */
1366 		break;
1367 	default:
1368 		break;
1369 	}
1370 	return X86EMUL_CONTINUE;
1371 }
1372 
1373 static int em_push(struct x86_emulate_ctxt *ctxt)
1374 {
1375 	struct segmented_address addr;
1376 
1377 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
1378 	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1379 	addr.seg = VCPU_SREG_SS;
1380 
1381 	/* Disable writeback. */
1382 	ctxt->dst.type = OP_NONE;
1383 	return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
1384 }
1385 
1386 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1387 		       void *dest, int len)
1388 {
1389 	int rc;
1390 	struct segmented_address addr;
1391 
1392 	addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1393 	addr.seg = VCPU_SREG_SS;
1394 	rc = segmented_read(ctxt, addr, dest, len);
1395 	if (rc != X86EMUL_CONTINUE)
1396 		return rc;
1397 
1398 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len);
1399 	return rc;
1400 }
1401 
1402 static int em_pop(struct x86_emulate_ctxt *ctxt)
1403 {
1404 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1405 }
1406 
1407 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1408 			void *dest, int len)
1409 {
1410 	int rc;
1411 	unsigned long val, change_mask;
1412 	int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1413 	int cpl = ctxt->ops->cpl(ctxt);
1414 
1415 	rc = emulate_pop(ctxt, &val, len);
1416 	if (rc != X86EMUL_CONTINUE)
1417 		return rc;
1418 
1419 	change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
1420 		| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
1421 
1422 	switch(ctxt->mode) {
1423 	case X86EMUL_MODE_PROT64:
1424 	case X86EMUL_MODE_PROT32:
1425 	case X86EMUL_MODE_PROT16:
1426 		if (cpl == 0)
1427 			change_mask |= EFLG_IOPL;
1428 		if (cpl <= iopl)
1429 			change_mask |= EFLG_IF;
1430 		break;
1431 	case X86EMUL_MODE_VM86:
1432 		if (iopl < 3)
1433 			return emulate_gp(ctxt, 0);
1434 		change_mask |= EFLG_IF;
1435 		break;
1436 	default: /* real mode */
1437 		change_mask |= (EFLG_IOPL | EFLG_IF);
1438 		break;
1439 	}
1440 
1441 	*(unsigned long *)dest =
1442 		(ctxt->eflags & ~change_mask) | (val & change_mask);
1443 
1444 	return rc;
1445 }
1446 
1447 static int em_popf(struct x86_emulate_ctxt *ctxt)
1448 {
1449 	ctxt->dst.type = OP_REG;
1450 	ctxt->dst.addr.reg = &ctxt->eflags;
1451 	ctxt->dst.bytes = ctxt->op_bytes;
1452 	return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1453 }
1454 
1455 static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1456 {
1457 	ctxt->src.val = get_segment_selector(ctxt, seg);
1458 
1459 	return em_push(ctxt);
1460 }
1461 
1462 static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1463 {
1464 	unsigned long selector;
1465 	int rc;
1466 
1467 	rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
1468 	if (rc != X86EMUL_CONTINUE)
1469 		return rc;
1470 
1471 	rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1472 	return rc;
1473 }
1474 
1475 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1476 {
1477 	unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP];
1478 	int rc = X86EMUL_CONTINUE;
1479 	int reg = VCPU_REGS_RAX;
1480 
1481 	while (reg <= VCPU_REGS_RDI) {
1482 		(reg == VCPU_REGS_RSP) ?
1483 		(ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]);
1484 
1485 		rc = em_push(ctxt);
1486 		if (rc != X86EMUL_CONTINUE)
1487 			return rc;
1488 
1489 		++reg;
1490 	}
1491 
1492 	return rc;
1493 }
1494 
1495 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1496 {
1497 	ctxt->src.val =  (unsigned long)ctxt->eflags;
1498 	return em_push(ctxt);
1499 }
1500 
1501 static int em_popa(struct x86_emulate_ctxt *ctxt)
1502 {
1503 	int rc = X86EMUL_CONTINUE;
1504 	int reg = VCPU_REGS_RDI;
1505 
1506 	while (reg >= VCPU_REGS_RAX) {
1507 		if (reg == VCPU_REGS_RSP) {
1508 			register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP],
1509 							ctxt->op_bytes);
1510 			--reg;
1511 		}
1512 
1513 		rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes);
1514 		if (rc != X86EMUL_CONTINUE)
1515 			break;
1516 		--reg;
1517 	}
1518 	return rc;
1519 }
1520 
1521 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1522 {
1523 	struct x86_emulate_ops *ops = ctxt->ops;
1524 	int rc;
1525 	struct desc_ptr dt;
1526 	gva_t cs_addr;
1527 	gva_t eip_addr;
1528 	u16 cs, eip;
1529 
1530 	/* TODO: Add limit checks */
1531 	ctxt->src.val = ctxt->eflags;
1532 	rc = em_push(ctxt);
1533 	if (rc != X86EMUL_CONTINUE)
1534 		return rc;
1535 
1536 	ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1537 
1538 	ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1539 	rc = em_push(ctxt);
1540 	if (rc != X86EMUL_CONTINUE)
1541 		return rc;
1542 
1543 	ctxt->src.val = ctxt->_eip;
1544 	rc = em_push(ctxt);
1545 	if (rc != X86EMUL_CONTINUE)
1546 		return rc;
1547 
1548 	ops->get_idt(ctxt, &dt);
1549 
1550 	eip_addr = dt.address + (irq << 2);
1551 	cs_addr = dt.address + (irq << 2) + 2;
1552 
1553 	rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1554 	if (rc != X86EMUL_CONTINUE)
1555 		return rc;
1556 
1557 	rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1558 	if (rc != X86EMUL_CONTINUE)
1559 		return rc;
1560 
1561 	rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1562 	if (rc != X86EMUL_CONTINUE)
1563 		return rc;
1564 
1565 	ctxt->_eip = eip;
1566 
1567 	return rc;
1568 }
1569 
1570 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1571 {
1572 	switch(ctxt->mode) {
1573 	case X86EMUL_MODE_REAL:
1574 		return emulate_int_real(ctxt, irq);
1575 	case X86EMUL_MODE_VM86:
1576 	case X86EMUL_MODE_PROT16:
1577 	case X86EMUL_MODE_PROT32:
1578 	case X86EMUL_MODE_PROT64:
1579 	default:
1580 		/* Protected mode interrupts unimplemented yet */
1581 		return X86EMUL_UNHANDLEABLE;
1582 	}
1583 }
1584 
1585 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1586 {
1587 	int rc = X86EMUL_CONTINUE;
1588 	unsigned long temp_eip = 0;
1589 	unsigned long temp_eflags = 0;
1590 	unsigned long cs = 0;
1591 	unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
1592 			     EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
1593 			     EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
1594 	unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
1595 
1596 	/* TODO: Add stack limit check */
1597 
1598 	rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
1599 
1600 	if (rc != X86EMUL_CONTINUE)
1601 		return rc;
1602 
1603 	if (temp_eip & ~0xffff)
1604 		return emulate_gp(ctxt, 0);
1605 
1606 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1607 
1608 	if (rc != X86EMUL_CONTINUE)
1609 		return rc;
1610 
1611 	rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
1612 
1613 	if (rc != X86EMUL_CONTINUE)
1614 		return rc;
1615 
1616 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1617 
1618 	if (rc != X86EMUL_CONTINUE)
1619 		return rc;
1620 
1621 	ctxt->_eip = temp_eip;
1622 
1623 
1624 	if (ctxt->op_bytes == 4)
1625 		ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
1626 	else if (ctxt->op_bytes == 2) {
1627 		ctxt->eflags &= ~0xffff;
1628 		ctxt->eflags |= temp_eflags;
1629 	}
1630 
1631 	ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
1632 	ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
1633 
1634 	return rc;
1635 }
1636 
1637 static int em_iret(struct x86_emulate_ctxt *ctxt)
1638 {
1639 	switch(ctxt->mode) {
1640 	case X86EMUL_MODE_REAL:
1641 		return emulate_iret_real(ctxt);
1642 	case X86EMUL_MODE_VM86:
1643 	case X86EMUL_MODE_PROT16:
1644 	case X86EMUL_MODE_PROT32:
1645 	case X86EMUL_MODE_PROT64:
1646 	default:
1647 		/* iret from protected mode unimplemented yet */
1648 		return X86EMUL_UNHANDLEABLE;
1649 	}
1650 }
1651 
1652 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1653 {
1654 	int rc;
1655 	unsigned short sel;
1656 
1657 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1658 
1659 	rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1660 	if (rc != X86EMUL_CONTINUE)
1661 		return rc;
1662 
1663 	ctxt->_eip = 0;
1664 	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1665 	return X86EMUL_CONTINUE;
1666 }
1667 
1668 static int em_grp1a(struct x86_emulate_ctxt *ctxt)
1669 {
1670 	return emulate_pop(ctxt, &ctxt->dst.val, ctxt->dst.bytes);
1671 }
1672 
1673 static int em_grp2(struct x86_emulate_ctxt *ctxt)
1674 {
1675 	switch (ctxt->modrm_reg) {
1676 	case 0:	/* rol */
1677 		emulate_2op_SrcB("rol", ctxt->src, ctxt->dst, ctxt->eflags);
1678 		break;
1679 	case 1:	/* ror */
1680 		emulate_2op_SrcB("ror", ctxt->src, ctxt->dst, ctxt->eflags);
1681 		break;
1682 	case 2:	/* rcl */
1683 		emulate_2op_SrcB("rcl", ctxt->src, ctxt->dst, ctxt->eflags);
1684 		break;
1685 	case 3:	/* rcr */
1686 		emulate_2op_SrcB("rcr", ctxt->src, ctxt->dst, ctxt->eflags);
1687 		break;
1688 	case 4:	/* sal/shl */
1689 	case 6:	/* sal/shl */
1690 		emulate_2op_SrcB("sal", ctxt->src, ctxt->dst, ctxt->eflags);
1691 		break;
1692 	case 5:	/* shr */
1693 		emulate_2op_SrcB("shr", ctxt->src, ctxt->dst, ctxt->eflags);
1694 		break;
1695 	case 7:	/* sar */
1696 		emulate_2op_SrcB("sar", ctxt->src, ctxt->dst, ctxt->eflags);
1697 		break;
1698 	}
1699 	return X86EMUL_CONTINUE;
1700 }
1701 
1702 static int em_grp3(struct x86_emulate_ctxt *ctxt)
1703 {
1704 	unsigned long *rax = &ctxt->regs[VCPU_REGS_RAX];
1705 	unsigned long *rdx = &ctxt->regs[VCPU_REGS_RDX];
1706 	u8 de = 0;
1707 
1708 	switch (ctxt->modrm_reg) {
1709 	case 0 ... 1:	/* test */
1710 		emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
1711 		break;
1712 	case 2:	/* not */
1713 		ctxt->dst.val = ~ctxt->dst.val;
1714 		break;
1715 	case 3:	/* neg */
1716 		emulate_1op("neg", ctxt->dst, ctxt->eflags);
1717 		break;
1718 	case 4: /* mul */
1719 		emulate_1op_rax_rdx("mul", ctxt->src, *rax, *rdx, ctxt->eflags);
1720 		break;
1721 	case 5: /* imul */
1722 		emulate_1op_rax_rdx("imul", ctxt->src, *rax, *rdx, ctxt->eflags);
1723 		break;
1724 	case 6: /* div */
1725 		emulate_1op_rax_rdx_ex("div", ctxt->src, *rax, *rdx,
1726 				       ctxt->eflags, de);
1727 		break;
1728 	case 7: /* idiv */
1729 		emulate_1op_rax_rdx_ex("idiv", ctxt->src, *rax, *rdx,
1730 				       ctxt->eflags, de);
1731 		break;
1732 	default:
1733 		return X86EMUL_UNHANDLEABLE;
1734 	}
1735 	if (de)
1736 		return emulate_de(ctxt);
1737 	return X86EMUL_CONTINUE;
1738 }
1739 
1740 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1741 {
1742 	int rc = X86EMUL_CONTINUE;
1743 
1744 	switch (ctxt->modrm_reg) {
1745 	case 0:	/* inc */
1746 		emulate_1op("inc", ctxt->dst, ctxt->eflags);
1747 		break;
1748 	case 1:	/* dec */
1749 		emulate_1op("dec", ctxt->dst, ctxt->eflags);
1750 		break;
1751 	case 2: /* call near abs */ {
1752 		long int old_eip;
1753 		old_eip = ctxt->_eip;
1754 		ctxt->_eip = ctxt->src.val;
1755 		ctxt->src.val = old_eip;
1756 		rc = em_push(ctxt);
1757 		break;
1758 	}
1759 	case 4: /* jmp abs */
1760 		ctxt->_eip = ctxt->src.val;
1761 		break;
1762 	case 5: /* jmp far */
1763 		rc = em_jmp_far(ctxt);
1764 		break;
1765 	case 6:	/* push */
1766 		rc = em_push(ctxt);
1767 		break;
1768 	}
1769 	return rc;
1770 }
1771 
1772 static int em_grp9(struct x86_emulate_ctxt *ctxt)
1773 {
1774 	u64 old = ctxt->dst.orig_val64;
1775 
1776 	if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) ||
1777 	    ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) {
1778 		ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
1779 		ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
1780 		ctxt->eflags &= ~EFLG_ZF;
1781 	} else {
1782 		ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) |
1783 			(u32) ctxt->regs[VCPU_REGS_RBX];
1784 
1785 		ctxt->eflags |= EFLG_ZF;
1786 	}
1787 	return X86EMUL_CONTINUE;
1788 }
1789 
1790 static int em_ret(struct x86_emulate_ctxt *ctxt)
1791 {
1792 	ctxt->dst.type = OP_REG;
1793 	ctxt->dst.addr.reg = &ctxt->_eip;
1794 	ctxt->dst.bytes = ctxt->op_bytes;
1795 	return em_pop(ctxt);
1796 }
1797 
1798 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1799 {
1800 	int rc;
1801 	unsigned long cs;
1802 
1803 	rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1804 	if (rc != X86EMUL_CONTINUE)
1805 		return rc;
1806 	if (ctxt->op_bytes == 4)
1807 		ctxt->_eip = (u32)ctxt->_eip;
1808 	rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1809 	if (rc != X86EMUL_CONTINUE)
1810 		return rc;
1811 	rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1812 	return rc;
1813 }
1814 
1815 static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
1816 {
1817 	unsigned short sel;
1818 	int rc;
1819 
1820 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1821 
1822 	rc = load_segment_descriptor(ctxt, sel, seg);
1823 	if (rc != X86EMUL_CONTINUE)
1824 		return rc;
1825 
1826 	ctxt->dst.val = ctxt->src.val;
1827 	return rc;
1828 }
1829 
1830 static void
1831 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1832 			struct desc_struct *cs, struct desc_struct *ss)
1833 {
1834 	u16 selector;
1835 
1836 	memset(cs, 0, sizeof(struct desc_struct));
1837 	ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1838 	memset(ss, 0, sizeof(struct desc_struct));
1839 
1840 	cs->l = 0;		/* will be adjusted later */
1841 	set_desc_base(cs, 0);	/* flat segment */
1842 	cs->g = 1;		/* 4kb granularity */
1843 	set_desc_limit(cs, 0xfffff);	/* 4GB limit */
1844 	cs->type = 0x0b;	/* Read, Execute, Accessed */
1845 	cs->s = 1;
1846 	cs->dpl = 0;		/* will be adjusted later */
1847 	cs->p = 1;
1848 	cs->d = 1;
1849 
1850 	set_desc_base(ss, 0);	/* flat segment */
1851 	set_desc_limit(ss, 0xfffff);	/* 4GB limit */
1852 	ss->g = 1;		/* 4kb granularity */
1853 	ss->s = 1;
1854 	ss->type = 0x03;	/* Read/Write, Accessed */
1855 	ss->d = 1;		/* 32bit stack segment */
1856 	ss->dpl = 0;
1857 	ss->p = 1;
1858 }
1859 
1860 static int em_syscall(struct x86_emulate_ctxt *ctxt)
1861 {
1862 	struct x86_emulate_ops *ops = ctxt->ops;
1863 	struct desc_struct cs, ss;
1864 	u64 msr_data;
1865 	u16 cs_sel, ss_sel;
1866 	u64 efer = 0;
1867 
1868 	/* syscall is not available in real mode */
1869 	if (ctxt->mode == X86EMUL_MODE_REAL ||
1870 	    ctxt->mode == X86EMUL_MODE_VM86)
1871 		return emulate_ud(ctxt);
1872 
1873 	ops->get_msr(ctxt, MSR_EFER, &efer);
1874 	setup_syscalls_segments(ctxt, &cs, &ss);
1875 
1876 	ops->get_msr(ctxt, MSR_STAR, &msr_data);
1877 	msr_data >>= 32;
1878 	cs_sel = (u16)(msr_data & 0xfffc);
1879 	ss_sel = (u16)(msr_data + 8);
1880 
1881 	if (efer & EFER_LMA) {
1882 		cs.d = 0;
1883 		cs.l = 1;
1884 	}
1885 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1886 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1887 
1888 	ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip;
1889 	if (efer & EFER_LMA) {
1890 #ifdef CONFIG_X86_64
1891 		ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1892 
1893 		ops->get_msr(ctxt,
1894 			     ctxt->mode == X86EMUL_MODE_PROT64 ?
1895 			     MSR_LSTAR : MSR_CSTAR, &msr_data);
1896 		ctxt->_eip = msr_data;
1897 
1898 		ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1899 		ctxt->eflags &= ~(msr_data | EFLG_RF);
1900 #endif
1901 	} else {
1902 		/* legacy mode */
1903 		ops->get_msr(ctxt, MSR_STAR, &msr_data);
1904 		ctxt->_eip = (u32)msr_data;
1905 
1906 		ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1907 	}
1908 
1909 	return X86EMUL_CONTINUE;
1910 }
1911 
1912 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
1913 {
1914 	struct x86_emulate_ops *ops = ctxt->ops;
1915 	struct desc_struct cs, ss;
1916 	u64 msr_data;
1917 	u16 cs_sel, ss_sel;
1918 	u64 efer = 0;
1919 
1920 	ops->get_msr(ctxt, MSR_EFER, &efer);
1921 	/* inject #GP if in real mode */
1922 	if (ctxt->mode == X86EMUL_MODE_REAL)
1923 		return emulate_gp(ctxt, 0);
1924 
1925 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
1926 	* Therefore, we inject an #UD.
1927 	*/
1928 	if (ctxt->mode == X86EMUL_MODE_PROT64)
1929 		return emulate_ud(ctxt);
1930 
1931 	setup_syscalls_segments(ctxt, &cs, &ss);
1932 
1933 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1934 	switch (ctxt->mode) {
1935 	case X86EMUL_MODE_PROT32:
1936 		if ((msr_data & 0xfffc) == 0x0)
1937 			return emulate_gp(ctxt, 0);
1938 		break;
1939 	case X86EMUL_MODE_PROT64:
1940 		if (msr_data == 0x0)
1941 			return emulate_gp(ctxt, 0);
1942 		break;
1943 	}
1944 
1945 	ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1946 	cs_sel = (u16)msr_data;
1947 	cs_sel &= ~SELECTOR_RPL_MASK;
1948 	ss_sel = cs_sel + 8;
1949 	ss_sel &= ~SELECTOR_RPL_MASK;
1950 	if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
1951 		cs.d = 0;
1952 		cs.l = 1;
1953 	}
1954 
1955 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1956 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1957 
1958 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1959 	ctxt->_eip = msr_data;
1960 
1961 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1962 	ctxt->regs[VCPU_REGS_RSP] = msr_data;
1963 
1964 	return X86EMUL_CONTINUE;
1965 }
1966 
1967 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1968 {
1969 	struct x86_emulate_ops *ops = ctxt->ops;
1970 	struct desc_struct cs, ss;
1971 	u64 msr_data;
1972 	int usermode;
1973 	u16 cs_sel = 0, ss_sel = 0;
1974 
1975 	/* inject #GP if in real mode or Virtual 8086 mode */
1976 	if (ctxt->mode == X86EMUL_MODE_REAL ||
1977 	    ctxt->mode == X86EMUL_MODE_VM86)
1978 		return emulate_gp(ctxt, 0);
1979 
1980 	setup_syscalls_segments(ctxt, &cs, &ss);
1981 
1982 	if ((ctxt->rex_prefix & 0x8) != 0x0)
1983 		usermode = X86EMUL_MODE_PROT64;
1984 	else
1985 		usermode = X86EMUL_MODE_PROT32;
1986 
1987 	cs.dpl = 3;
1988 	ss.dpl = 3;
1989 	ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1990 	switch (usermode) {
1991 	case X86EMUL_MODE_PROT32:
1992 		cs_sel = (u16)(msr_data + 16);
1993 		if ((msr_data & 0xfffc) == 0x0)
1994 			return emulate_gp(ctxt, 0);
1995 		ss_sel = (u16)(msr_data + 24);
1996 		break;
1997 	case X86EMUL_MODE_PROT64:
1998 		cs_sel = (u16)(msr_data + 32);
1999 		if (msr_data == 0x0)
2000 			return emulate_gp(ctxt, 0);
2001 		ss_sel = cs_sel + 8;
2002 		cs.d = 0;
2003 		cs.l = 1;
2004 		break;
2005 	}
2006 	cs_sel |= SELECTOR_RPL_MASK;
2007 	ss_sel |= SELECTOR_RPL_MASK;
2008 
2009 	ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2010 	ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2011 
2012 	ctxt->_eip = ctxt->regs[VCPU_REGS_RDX];
2013 	ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX];
2014 
2015 	return X86EMUL_CONTINUE;
2016 }
2017 
2018 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2019 {
2020 	int iopl;
2021 	if (ctxt->mode == X86EMUL_MODE_REAL)
2022 		return false;
2023 	if (ctxt->mode == X86EMUL_MODE_VM86)
2024 		return true;
2025 	iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2026 	return ctxt->ops->cpl(ctxt) > iopl;
2027 }
2028 
2029 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2030 					    u16 port, u16 len)
2031 {
2032 	struct x86_emulate_ops *ops = ctxt->ops;
2033 	struct desc_struct tr_seg;
2034 	u32 base3;
2035 	int r;
2036 	u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2037 	unsigned mask = (1 << len) - 1;
2038 	unsigned long base;
2039 
2040 	ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2041 	if (!tr_seg.p)
2042 		return false;
2043 	if (desc_limit_scaled(&tr_seg) < 103)
2044 		return false;
2045 	base = get_desc_base(&tr_seg);
2046 #ifdef CONFIG_X86_64
2047 	base |= ((u64)base3) << 32;
2048 #endif
2049 	r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2050 	if (r != X86EMUL_CONTINUE)
2051 		return false;
2052 	if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2053 		return false;
2054 	r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2055 	if (r != X86EMUL_CONTINUE)
2056 		return false;
2057 	if ((perm >> bit_idx) & mask)
2058 		return false;
2059 	return true;
2060 }
2061 
2062 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2063 				 u16 port, u16 len)
2064 {
2065 	if (ctxt->perm_ok)
2066 		return true;
2067 
2068 	if (emulator_bad_iopl(ctxt))
2069 		if (!emulator_io_port_access_allowed(ctxt, port, len))
2070 			return false;
2071 
2072 	ctxt->perm_ok = true;
2073 
2074 	return true;
2075 }
2076 
2077 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2078 				struct tss_segment_16 *tss)
2079 {
2080 	tss->ip = ctxt->_eip;
2081 	tss->flag = ctxt->eflags;
2082 	tss->ax = ctxt->regs[VCPU_REGS_RAX];
2083 	tss->cx = ctxt->regs[VCPU_REGS_RCX];
2084 	tss->dx = ctxt->regs[VCPU_REGS_RDX];
2085 	tss->bx = ctxt->regs[VCPU_REGS_RBX];
2086 	tss->sp = ctxt->regs[VCPU_REGS_RSP];
2087 	tss->bp = ctxt->regs[VCPU_REGS_RBP];
2088 	tss->si = ctxt->regs[VCPU_REGS_RSI];
2089 	tss->di = ctxt->regs[VCPU_REGS_RDI];
2090 
2091 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2092 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2093 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2094 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2095 	tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2096 }
2097 
2098 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2099 				 struct tss_segment_16 *tss)
2100 {
2101 	int ret;
2102 
2103 	ctxt->_eip = tss->ip;
2104 	ctxt->eflags = tss->flag | 2;
2105 	ctxt->regs[VCPU_REGS_RAX] = tss->ax;
2106 	ctxt->regs[VCPU_REGS_RCX] = tss->cx;
2107 	ctxt->regs[VCPU_REGS_RDX] = tss->dx;
2108 	ctxt->regs[VCPU_REGS_RBX] = tss->bx;
2109 	ctxt->regs[VCPU_REGS_RSP] = tss->sp;
2110 	ctxt->regs[VCPU_REGS_RBP] = tss->bp;
2111 	ctxt->regs[VCPU_REGS_RSI] = tss->si;
2112 	ctxt->regs[VCPU_REGS_RDI] = tss->di;
2113 
2114 	/*
2115 	 * SDM says that segment selectors are loaded before segment
2116 	 * descriptors
2117 	 */
2118 	set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2119 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2120 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2121 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2122 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2123 
2124 	/*
2125 	 * Now load segment descriptors. If fault happenes at this stage
2126 	 * it is handled in a context of new task
2127 	 */
2128 	ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2129 	if (ret != X86EMUL_CONTINUE)
2130 		return ret;
2131 	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2132 	if (ret != X86EMUL_CONTINUE)
2133 		return ret;
2134 	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2135 	if (ret != X86EMUL_CONTINUE)
2136 		return ret;
2137 	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2138 	if (ret != X86EMUL_CONTINUE)
2139 		return ret;
2140 	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2141 	if (ret != X86EMUL_CONTINUE)
2142 		return ret;
2143 
2144 	return X86EMUL_CONTINUE;
2145 }
2146 
2147 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2148 			  u16 tss_selector, u16 old_tss_sel,
2149 			  ulong old_tss_base, struct desc_struct *new_desc)
2150 {
2151 	struct x86_emulate_ops *ops = ctxt->ops;
2152 	struct tss_segment_16 tss_seg;
2153 	int ret;
2154 	u32 new_tss_base = get_desc_base(new_desc);
2155 
2156 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2157 			    &ctxt->exception);
2158 	if (ret != X86EMUL_CONTINUE)
2159 		/* FIXME: need to provide precise fault address */
2160 		return ret;
2161 
2162 	save_state_to_tss16(ctxt, &tss_seg);
2163 
2164 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2165 			     &ctxt->exception);
2166 	if (ret != X86EMUL_CONTINUE)
2167 		/* FIXME: need to provide precise fault address */
2168 		return ret;
2169 
2170 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2171 			    &ctxt->exception);
2172 	if (ret != X86EMUL_CONTINUE)
2173 		/* FIXME: need to provide precise fault address */
2174 		return ret;
2175 
2176 	if (old_tss_sel != 0xffff) {
2177 		tss_seg.prev_task_link = old_tss_sel;
2178 
2179 		ret = ops->write_std(ctxt, new_tss_base,
2180 				     &tss_seg.prev_task_link,
2181 				     sizeof tss_seg.prev_task_link,
2182 				     &ctxt->exception);
2183 		if (ret != X86EMUL_CONTINUE)
2184 			/* FIXME: need to provide precise fault address */
2185 			return ret;
2186 	}
2187 
2188 	return load_state_from_tss16(ctxt, &tss_seg);
2189 }
2190 
2191 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2192 				struct tss_segment_32 *tss)
2193 {
2194 	tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2195 	tss->eip = ctxt->_eip;
2196 	tss->eflags = ctxt->eflags;
2197 	tss->eax = ctxt->regs[VCPU_REGS_RAX];
2198 	tss->ecx = ctxt->regs[VCPU_REGS_RCX];
2199 	tss->edx = ctxt->regs[VCPU_REGS_RDX];
2200 	tss->ebx = ctxt->regs[VCPU_REGS_RBX];
2201 	tss->esp = ctxt->regs[VCPU_REGS_RSP];
2202 	tss->ebp = ctxt->regs[VCPU_REGS_RBP];
2203 	tss->esi = ctxt->regs[VCPU_REGS_RSI];
2204 	tss->edi = ctxt->regs[VCPU_REGS_RDI];
2205 
2206 	tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2207 	tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2208 	tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2209 	tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2210 	tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2211 	tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2212 	tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2213 }
2214 
2215 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2216 				 struct tss_segment_32 *tss)
2217 {
2218 	int ret;
2219 
2220 	if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2221 		return emulate_gp(ctxt, 0);
2222 	ctxt->_eip = tss->eip;
2223 	ctxt->eflags = tss->eflags | 2;
2224 	ctxt->regs[VCPU_REGS_RAX] = tss->eax;
2225 	ctxt->regs[VCPU_REGS_RCX] = tss->ecx;
2226 	ctxt->regs[VCPU_REGS_RDX] = tss->edx;
2227 	ctxt->regs[VCPU_REGS_RBX] = tss->ebx;
2228 	ctxt->regs[VCPU_REGS_RSP] = tss->esp;
2229 	ctxt->regs[VCPU_REGS_RBP] = tss->ebp;
2230 	ctxt->regs[VCPU_REGS_RSI] = tss->esi;
2231 	ctxt->regs[VCPU_REGS_RDI] = tss->edi;
2232 
2233 	/*
2234 	 * SDM says that segment selectors are loaded before segment
2235 	 * descriptors
2236 	 */
2237 	set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2238 	set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2239 	set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2240 	set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2241 	set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2242 	set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2243 	set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2244 
2245 	/*
2246 	 * Now load segment descriptors. If fault happenes at this stage
2247 	 * it is handled in a context of new task
2248 	 */
2249 	ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2250 	if (ret != X86EMUL_CONTINUE)
2251 		return ret;
2252 	ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2253 	if (ret != X86EMUL_CONTINUE)
2254 		return ret;
2255 	ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2256 	if (ret != X86EMUL_CONTINUE)
2257 		return ret;
2258 	ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2259 	if (ret != X86EMUL_CONTINUE)
2260 		return ret;
2261 	ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2262 	if (ret != X86EMUL_CONTINUE)
2263 		return ret;
2264 	ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2265 	if (ret != X86EMUL_CONTINUE)
2266 		return ret;
2267 	ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2268 	if (ret != X86EMUL_CONTINUE)
2269 		return ret;
2270 
2271 	return X86EMUL_CONTINUE;
2272 }
2273 
2274 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2275 			  u16 tss_selector, u16 old_tss_sel,
2276 			  ulong old_tss_base, struct desc_struct *new_desc)
2277 {
2278 	struct x86_emulate_ops *ops = ctxt->ops;
2279 	struct tss_segment_32 tss_seg;
2280 	int ret;
2281 	u32 new_tss_base = get_desc_base(new_desc);
2282 
2283 	ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2284 			    &ctxt->exception);
2285 	if (ret != X86EMUL_CONTINUE)
2286 		/* FIXME: need to provide precise fault address */
2287 		return ret;
2288 
2289 	save_state_to_tss32(ctxt, &tss_seg);
2290 
2291 	ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2292 			     &ctxt->exception);
2293 	if (ret != X86EMUL_CONTINUE)
2294 		/* FIXME: need to provide precise fault address */
2295 		return ret;
2296 
2297 	ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2298 			    &ctxt->exception);
2299 	if (ret != X86EMUL_CONTINUE)
2300 		/* FIXME: need to provide precise fault address */
2301 		return ret;
2302 
2303 	if (old_tss_sel != 0xffff) {
2304 		tss_seg.prev_task_link = old_tss_sel;
2305 
2306 		ret = ops->write_std(ctxt, new_tss_base,
2307 				     &tss_seg.prev_task_link,
2308 				     sizeof tss_seg.prev_task_link,
2309 				     &ctxt->exception);
2310 		if (ret != X86EMUL_CONTINUE)
2311 			/* FIXME: need to provide precise fault address */
2312 			return ret;
2313 	}
2314 
2315 	return load_state_from_tss32(ctxt, &tss_seg);
2316 }
2317 
2318 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2319 				   u16 tss_selector, int reason,
2320 				   bool has_error_code, u32 error_code)
2321 {
2322 	struct x86_emulate_ops *ops = ctxt->ops;
2323 	struct desc_struct curr_tss_desc, next_tss_desc;
2324 	int ret;
2325 	u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2326 	ulong old_tss_base =
2327 		ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2328 	u32 desc_limit;
2329 
2330 	/* FIXME: old_tss_base == ~0 ? */
2331 
2332 	ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2333 	if (ret != X86EMUL_CONTINUE)
2334 		return ret;
2335 	ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2336 	if (ret != X86EMUL_CONTINUE)
2337 		return ret;
2338 
2339 	/* FIXME: check that next_tss_desc is tss */
2340 
2341 	if (reason != TASK_SWITCH_IRET) {
2342 		if ((tss_selector & 3) > next_tss_desc.dpl ||
2343 		    ops->cpl(ctxt) > next_tss_desc.dpl)
2344 			return emulate_gp(ctxt, 0);
2345 	}
2346 
2347 	desc_limit = desc_limit_scaled(&next_tss_desc);
2348 	if (!next_tss_desc.p ||
2349 	    ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2350 	     desc_limit < 0x2b)) {
2351 		emulate_ts(ctxt, tss_selector & 0xfffc);
2352 		return X86EMUL_PROPAGATE_FAULT;
2353 	}
2354 
2355 	if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2356 		curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2357 		write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2358 	}
2359 
2360 	if (reason == TASK_SWITCH_IRET)
2361 		ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2362 
2363 	/* set back link to prev task only if NT bit is set in eflags
2364 	   note that old_tss_sel is not used afetr this point */
2365 	if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2366 		old_tss_sel = 0xffff;
2367 
2368 	if (next_tss_desc.type & 8)
2369 		ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2370 				     old_tss_base, &next_tss_desc);
2371 	else
2372 		ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2373 				     old_tss_base, &next_tss_desc);
2374 	if (ret != X86EMUL_CONTINUE)
2375 		return ret;
2376 
2377 	if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
2378 		ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
2379 
2380 	if (reason != TASK_SWITCH_IRET) {
2381 		next_tss_desc.type |= (1 << 1); /* set busy flag */
2382 		write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2383 	}
2384 
2385 	ops->set_cr(ctxt, 0,  ops->get_cr(ctxt, 0) | X86_CR0_TS);
2386 	ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2387 
2388 	if (has_error_code) {
2389 		ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2390 		ctxt->lock_prefix = 0;
2391 		ctxt->src.val = (unsigned long) error_code;
2392 		ret = em_push(ctxt);
2393 	}
2394 
2395 	return ret;
2396 }
2397 
2398 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2399 			 u16 tss_selector, int reason,
2400 			 bool has_error_code, u32 error_code)
2401 {
2402 	int rc;
2403 
2404 	ctxt->_eip = ctxt->eip;
2405 	ctxt->dst.type = OP_NONE;
2406 
2407 	rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2408 				     has_error_code, error_code);
2409 
2410 	if (rc == X86EMUL_CONTINUE)
2411 		ctxt->eip = ctxt->_eip;
2412 
2413 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2414 }
2415 
2416 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2417 			    int reg, struct operand *op)
2418 {
2419 	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2420 
2421 	register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes);
2422 	op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]);
2423 	op->addr.mem.seg = seg;
2424 }
2425 
2426 static int em_das(struct x86_emulate_ctxt *ctxt)
2427 {
2428 	u8 al, old_al;
2429 	bool af, cf, old_cf;
2430 
2431 	cf = ctxt->eflags & X86_EFLAGS_CF;
2432 	al = ctxt->dst.val;
2433 
2434 	old_al = al;
2435 	old_cf = cf;
2436 	cf = false;
2437 	af = ctxt->eflags & X86_EFLAGS_AF;
2438 	if ((al & 0x0f) > 9 || af) {
2439 		al -= 6;
2440 		cf = old_cf | (al >= 250);
2441 		af = true;
2442 	} else {
2443 		af = false;
2444 	}
2445 	if (old_al > 0x99 || old_cf) {
2446 		al -= 0x60;
2447 		cf = true;
2448 	}
2449 
2450 	ctxt->dst.val = al;
2451 	/* Set PF, ZF, SF */
2452 	ctxt->src.type = OP_IMM;
2453 	ctxt->src.val = 0;
2454 	ctxt->src.bytes = 1;
2455 	emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
2456 	ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
2457 	if (cf)
2458 		ctxt->eflags |= X86_EFLAGS_CF;
2459 	if (af)
2460 		ctxt->eflags |= X86_EFLAGS_AF;
2461 	return X86EMUL_CONTINUE;
2462 }
2463 
2464 static int em_call_far(struct x86_emulate_ctxt *ctxt)
2465 {
2466 	u16 sel, old_cs;
2467 	ulong old_eip;
2468 	int rc;
2469 
2470 	old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2471 	old_eip = ctxt->_eip;
2472 
2473 	memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2474 	if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2475 		return X86EMUL_CONTINUE;
2476 
2477 	ctxt->_eip = 0;
2478 	memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
2479 
2480 	ctxt->src.val = old_cs;
2481 	rc = em_push(ctxt);
2482 	if (rc != X86EMUL_CONTINUE)
2483 		return rc;
2484 
2485 	ctxt->src.val = old_eip;
2486 	return em_push(ctxt);
2487 }
2488 
2489 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2490 {
2491 	int rc;
2492 
2493 	ctxt->dst.type = OP_REG;
2494 	ctxt->dst.addr.reg = &ctxt->_eip;
2495 	ctxt->dst.bytes = ctxt->op_bytes;
2496 	rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
2497 	if (rc != X86EMUL_CONTINUE)
2498 		return rc;
2499 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
2500 	return X86EMUL_CONTINUE;
2501 }
2502 
2503 static int em_add(struct x86_emulate_ctxt *ctxt)
2504 {
2505 	emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
2506 	return X86EMUL_CONTINUE;
2507 }
2508 
2509 static int em_or(struct x86_emulate_ctxt *ctxt)
2510 {
2511 	emulate_2op_SrcV("or", ctxt->src, ctxt->dst, ctxt->eflags);
2512 	return X86EMUL_CONTINUE;
2513 }
2514 
2515 static int em_adc(struct x86_emulate_ctxt *ctxt)
2516 {
2517 	emulate_2op_SrcV("adc", ctxt->src, ctxt->dst, ctxt->eflags);
2518 	return X86EMUL_CONTINUE;
2519 }
2520 
2521 static int em_sbb(struct x86_emulate_ctxt *ctxt)
2522 {
2523 	emulate_2op_SrcV("sbb", ctxt->src, ctxt->dst, ctxt->eflags);
2524 	return X86EMUL_CONTINUE;
2525 }
2526 
2527 static int em_and(struct x86_emulate_ctxt *ctxt)
2528 {
2529 	emulate_2op_SrcV("and", ctxt->src, ctxt->dst, ctxt->eflags);
2530 	return X86EMUL_CONTINUE;
2531 }
2532 
2533 static int em_sub(struct x86_emulate_ctxt *ctxt)
2534 {
2535 	emulate_2op_SrcV("sub", ctxt->src, ctxt->dst, ctxt->eflags);
2536 	return X86EMUL_CONTINUE;
2537 }
2538 
2539 static int em_xor(struct x86_emulate_ctxt *ctxt)
2540 {
2541 	emulate_2op_SrcV("xor", ctxt->src, ctxt->dst, ctxt->eflags);
2542 	return X86EMUL_CONTINUE;
2543 }
2544 
2545 static int em_cmp(struct x86_emulate_ctxt *ctxt)
2546 {
2547 	emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
2548 	/* Disable writeback. */
2549 	ctxt->dst.type = OP_NONE;
2550 	return X86EMUL_CONTINUE;
2551 }
2552 
2553 static int em_test(struct x86_emulate_ctxt *ctxt)
2554 {
2555 	emulate_2op_SrcV("test", ctxt->src, ctxt->dst, ctxt->eflags);
2556 	return X86EMUL_CONTINUE;
2557 }
2558 
2559 static int em_xchg(struct x86_emulate_ctxt *ctxt)
2560 {
2561 	/* Write back the register source. */
2562 	ctxt->src.val = ctxt->dst.val;
2563 	write_register_operand(&ctxt->src);
2564 
2565 	/* Write back the memory destination with implicit LOCK prefix. */
2566 	ctxt->dst.val = ctxt->src.orig_val;
2567 	ctxt->lock_prefix = 1;
2568 	return X86EMUL_CONTINUE;
2569 }
2570 
2571 static int em_imul(struct x86_emulate_ctxt *ctxt)
2572 {
2573 	emulate_2op_SrcV_nobyte("imul", ctxt->src, ctxt->dst, ctxt->eflags);
2574 	return X86EMUL_CONTINUE;
2575 }
2576 
2577 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
2578 {
2579 	ctxt->dst.val = ctxt->src2.val;
2580 	return em_imul(ctxt);
2581 }
2582 
2583 static int em_cwd(struct x86_emulate_ctxt *ctxt)
2584 {
2585 	ctxt->dst.type = OP_REG;
2586 	ctxt->dst.bytes = ctxt->src.bytes;
2587 	ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
2588 	ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2589 
2590 	return X86EMUL_CONTINUE;
2591 }
2592 
2593 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2594 {
2595 	u64 tsc = 0;
2596 
2597 	ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2598 	ctxt->regs[VCPU_REGS_RAX] = (u32)tsc;
2599 	ctxt->regs[VCPU_REGS_RDX] = tsc >> 32;
2600 	return X86EMUL_CONTINUE;
2601 }
2602 
2603 static int em_mov(struct x86_emulate_ctxt *ctxt)
2604 {
2605 	ctxt->dst.val = ctxt->src.val;
2606 	return X86EMUL_CONTINUE;
2607 }
2608 
2609 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
2610 {
2611 	if (ctxt->modrm_reg > VCPU_SREG_GS)
2612 		return emulate_ud(ctxt);
2613 
2614 	ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
2615 	return X86EMUL_CONTINUE;
2616 }
2617 
2618 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2619 {
2620 	u16 sel = ctxt->src.val;
2621 
2622 	if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
2623 		return emulate_ud(ctxt);
2624 
2625 	if (ctxt->modrm_reg == VCPU_SREG_SS)
2626 		ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
2627 
2628 	/* Disable writeback. */
2629 	ctxt->dst.type = OP_NONE;
2630 	return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2631 }
2632 
2633 static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2634 {
2635 	memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2636 	return X86EMUL_CONTINUE;
2637 }
2638 
2639 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2640 {
2641 	int rc;
2642 	ulong linear;
2643 
2644 	rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
2645 	if (rc == X86EMUL_CONTINUE)
2646 		ctxt->ops->invlpg(ctxt, linear);
2647 	/* Disable writeback. */
2648 	ctxt->dst.type = OP_NONE;
2649 	return X86EMUL_CONTINUE;
2650 }
2651 
2652 static int em_clts(struct x86_emulate_ctxt *ctxt)
2653 {
2654 	ulong cr0;
2655 
2656 	cr0 = ctxt->ops->get_cr(ctxt, 0);
2657 	cr0 &= ~X86_CR0_TS;
2658 	ctxt->ops->set_cr(ctxt, 0, cr0);
2659 	return X86EMUL_CONTINUE;
2660 }
2661 
2662 static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2663 {
2664 	int rc;
2665 
2666 	if (ctxt->modrm_mod != 3 || ctxt->modrm_rm != 1)
2667 		return X86EMUL_UNHANDLEABLE;
2668 
2669 	rc = ctxt->ops->fix_hypercall(ctxt);
2670 	if (rc != X86EMUL_CONTINUE)
2671 		return rc;
2672 
2673 	/* Let the processor re-execute the fixed hypercall */
2674 	ctxt->_eip = ctxt->eip;
2675 	/* Disable writeback. */
2676 	ctxt->dst.type = OP_NONE;
2677 	return X86EMUL_CONTINUE;
2678 }
2679 
2680 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2681 {
2682 	struct desc_ptr desc_ptr;
2683 	int rc;
2684 
2685 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2686 			     &desc_ptr.size, &desc_ptr.address,
2687 			     ctxt->op_bytes);
2688 	if (rc != X86EMUL_CONTINUE)
2689 		return rc;
2690 	ctxt->ops->set_gdt(ctxt, &desc_ptr);
2691 	/* Disable writeback. */
2692 	ctxt->dst.type = OP_NONE;
2693 	return X86EMUL_CONTINUE;
2694 }
2695 
2696 static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
2697 {
2698 	int rc;
2699 
2700 	rc = ctxt->ops->fix_hypercall(ctxt);
2701 
2702 	/* Disable writeback. */
2703 	ctxt->dst.type = OP_NONE;
2704 	return rc;
2705 }
2706 
2707 static int em_lidt(struct x86_emulate_ctxt *ctxt)
2708 {
2709 	struct desc_ptr desc_ptr;
2710 	int rc;
2711 
2712 	rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2713 			     &desc_ptr.size, &desc_ptr.address,
2714 			     ctxt->op_bytes);
2715 	if (rc != X86EMUL_CONTINUE)
2716 		return rc;
2717 	ctxt->ops->set_idt(ctxt, &desc_ptr);
2718 	/* Disable writeback. */
2719 	ctxt->dst.type = OP_NONE;
2720 	return X86EMUL_CONTINUE;
2721 }
2722 
2723 static int em_smsw(struct x86_emulate_ctxt *ctxt)
2724 {
2725 	ctxt->dst.bytes = 2;
2726 	ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
2727 	return X86EMUL_CONTINUE;
2728 }
2729 
2730 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
2731 {
2732 	ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
2733 			  | (ctxt->src.val & 0x0f));
2734 	ctxt->dst.type = OP_NONE;
2735 	return X86EMUL_CONTINUE;
2736 }
2737 
2738 static int em_loop(struct x86_emulate_ctxt *ctxt)
2739 {
2740 	register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
2741 	if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
2742 	    (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
2743 		jmp_rel(ctxt, ctxt->src.val);
2744 
2745 	return X86EMUL_CONTINUE;
2746 }
2747 
2748 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
2749 {
2750 	if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
2751 		jmp_rel(ctxt, ctxt->src.val);
2752 
2753 	return X86EMUL_CONTINUE;
2754 }
2755 
2756 static int em_cli(struct x86_emulate_ctxt *ctxt)
2757 {
2758 	if (emulator_bad_iopl(ctxt))
2759 		return emulate_gp(ctxt, 0);
2760 
2761 	ctxt->eflags &= ~X86_EFLAGS_IF;
2762 	return X86EMUL_CONTINUE;
2763 }
2764 
2765 static int em_sti(struct x86_emulate_ctxt *ctxt)
2766 {
2767 	if (emulator_bad_iopl(ctxt))
2768 		return emulate_gp(ctxt, 0);
2769 
2770 	ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2771 	ctxt->eflags |= X86_EFLAGS_IF;
2772 	return X86EMUL_CONTINUE;
2773 }
2774 
2775 static bool valid_cr(int nr)
2776 {
2777 	switch (nr) {
2778 	case 0:
2779 	case 2 ... 4:
2780 	case 8:
2781 		return true;
2782 	default:
2783 		return false;
2784 	}
2785 }
2786 
2787 static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2788 {
2789 	if (!valid_cr(ctxt->modrm_reg))
2790 		return emulate_ud(ctxt);
2791 
2792 	return X86EMUL_CONTINUE;
2793 }
2794 
2795 static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2796 {
2797 	u64 new_val = ctxt->src.val64;
2798 	int cr = ctxt->modrm_reg;
2799 	u64 efer = 0;
2800 
2801 	static u64 cr_reserved_bits[] = {
2802 		0xffffffff00000000ULL,
2803 		0, 0, 0, /* CR3 checked later */
2804 		CR4_RESERVED_BITS,
2805 		0, 0, 0,
2806 		CR8_RESERVED_BITS,
2807 	};
2808 
2809 	if (!valid_cr(cr))
2810 		return emulate_ud(ctxt);
2811 
2812 	if (new_val & cr_reserved_bits[cr])
2813 		return emulate_gp(ctxt, 0);
2814 
2815 	switch (cr) {
2816 	case 0: {
2817 		u64 cr4;
2818 		if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2819 		    ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2820 			return emulate_gp(ctxt, 0);
2821 
2822 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2823 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2824 
2825 		if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2826 		    !(cr4 & X86_CR4_PAE))
2827 			return emulate_gp(ctxt, 0);
2828 
2829 		break;
2830 		}
2831 	case 3: {
2832 		u64 rsvd = 0;
2833 
2834 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2835 		if (efer & EFER_LMA)
2836 			rsvd = CR3_L_MODE_RESERVED_BITS;
2837 		else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE)
2838 			rsvd = CR3_PAE_RESERVED_BITS;
2839 		else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG)
2840 			rsvd = CR3_NONPAE_RESERVED_BITS;
2841 
2842 		if (new_val & rsvd)
2843 			return emulate_gp(ctxt, 0);
2844 
2845 		break;
2846 		}
2847 	case 4: {
2848 		u64 cr4;
2849 
2850 		cr4 = ctxt->ops->get_cr(ctxt, 4);
2851 		ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2852 
2853 		if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2854 			return emulate_gp(ctxt, 0);
2855 
2856 		break;
2857 		}
2858 	}
2859 
2860 	return X86EMUL_CONTINUE;
2861 }
2862 
2863 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2864 {
2865 	unsigned long dr7;
2866 
2867 	ctxt->ops->get_dr(ctxt, 7, &dr7);
2868 
2869 	/* Check if DR7.Global_Enable is set */
2870 	return dr7 & (1 << 13);
2871 }
2872 
2873 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2874 {
2875 	int dr = ctxt->modrm_reg;
2876 	u64 cr4;
2877 
2878 	if (dr > 7)
2879 		return emulate_ud(ctxt);
2880 
2881 	cr4 = ctxt->ops->get_cr(ctxt, 4);
2882 	if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2883 		return emulate_ud(ctxt);
2884 
2885 	if (check_dr7_gd(ctxt))
2886 		return emulate_db(ctxt);
2887 
2888 	return X86EMUL_CONTINUE;
2889 }
2890 
2891 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
2892 {
2893 	u64 new_val = ctxt->src.val64;
2894 	int dr = ctxt->modrm_reg;
2895 
2896 	if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
2897 		return emulate_gp(ctxt, 0);
2898 
2899 	return check_dr_read(ctxt);
2900 }
2901 
2902 static int check_svme(struct x86_emulate_ctxt *ctxt)
2903 {
2904 	u64 efer;
2905 
2906 	ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2907 
2908 	if (!(efer & EFER_SVME))
2909 		return emulate_ud(ctxt);
2910 
2911 	return X86EMUL_CONTINUE;
2912 }
2913 
2914 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2915 {
2916 	u64 rax = ctxt->regs[VCPU_REGS_RAX];
2917 
2918 	/* Valid physical address? */
2919 	if (rax & 0xffff000000000000ULL)
2920 		return emulate_gp(ctxt, 0);
2921 
2922 	return check_svme(ctxt);
2923 }
2924 
2925 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2926 {
2927 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2928 
2929 	if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2930 		return emulate_ud(ctxt);
2931 
2932 	return X86EMUL_CONTINUE;
2933 }
2934 
2935 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2936 {
2937 	u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2938 	u64 rcx = ctxt->regs[VCPU_REGS_RCX];
2939 
2940 	if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2941 	    (rcx > 3))
2942 		return emulate_gp(ctxt, 0);
2943 
2944 	return X86EMUL_CONTINUE;
2945 }
2946 
2947 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2948 {
2949 	ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
2950 	if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
2951 		return emulate_gp(ctxt, 0);
2952 
2953 	return X86EMUL_CONTINUE;
2954 }
2955 
2956 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2957 {
2958 	ctxt->src.bytes = min(ctxt->src.bytes, 4u);
2959 	if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
2960 		return emulate_gp(ctxt, 0);
2961 
2962 	return X86EMUL_CONTINUE;
2963 }
2964 
2965 #define D(_y) { .flags = (_y) }
2966 #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2967 #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
2968 		      .check_perm = (_p) }
2969 #define N    D(0)
2970 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
2971 #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2972 #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) }
2973 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2974 #define II(_f, _e, _i) \
2975 	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2976 #define IIP(_f, _e, _i, _p) \
2977 	{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
2978 	  .check_perm = (_p) }
2979 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2980 
2981 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
2982 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
2983 #define I2bv(_f, _e)  I((_f) | ByteOp, _e), I(_f, _e)
2984 
2985 #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e),		\
2986 		I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e),	\
2987 		I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
2988 
2989 static struct opcode group7_rm1[] = {
2990 	DI(SrcNone | ModRM | Priv, monitor),
2991 	DI(SrcNone | ModRM | Priv, mwait),
2992 	N, N, N, N, N, N,
2993 };
2994 
2995 static struct opcode group7_rm3[] = {
2996 	DIP(SrcNone | ModRM | Prot | Priv, vmrun,   check_svme_pa),
2997 	II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall),
2998 	DIP(SrcNone | ModRM | Prot | Priv, vmload,  check_svme_pa),
2999 	DIP(SrcNone | ModRM | Prot | Priv, vmsave,  check_svme_pa),
3000 	DIP(SrcNone | ModRM | Prot | Priv, stgi,    check_svme),
3001 	DIP(SrcNone | ModRM | Prot | Priv, clgi,    check_svme),
3002 	DIP(SrcNone | ModRM | Prot | Priv, skinit,  check_svme),
3003 	DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme),
3004 };
3005 
3006 static struct opcode group7_rm7[] = {
3007 	N,
3008 	DIP(SrcNone | ModRM, rdtscp, check_rdtsc),
3009 	N, N, N, N, N, N,
3010 };
3011 
3012 static struct opcode group1[] = {
3013 	I(Lock, em_add),
3014 	I(Lock, em_or),
3015 	I(Lock, em_adc),
3016 	I(Lock, em_sbb),
3017 	I(Lock, em_and),
3018 	I(Lock, em_sub),
3019 	I(Lock, em_xor),
3020 	I(0, em_cmp),
3021 };
3022 
3023 static struct opcode group1A[] = {
3024 	D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N,
3025 };
3026 
3027 static struct opcode group3[] = {
3028 	D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM),
3029 	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3030 	X4(D(SrcMem | ModRM)),
3031 };
3032 
3033 static struct opcode group4[] = {
3034 	D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock),
3035 	N, N, N, N, N, N,
3036 };
3037 
3038 static struct opcode group5[] = {
3039 	D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock),
3040 	D(SrcMem | ModRM | Stack),
3041 	I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far),
3042 	D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps),
3043 	D(SrcMem | ModRM | Stack), N,
3044 };
3045 
3046 static struct opcode group6[] = {
3047 	DI(ModRM | Prot,        sldt),
3048 	DI(ModRM | Prot,        str),
3049 	DI(ModRM | Prot | Priv, lldt),
3050 	DI(ModRM | Prot | Priv, ltr),
3051 	N, N, N, N,
3052 };
3053 
3054 static struct group_dual group7 = { {
3055 	DI(ModRM | Mov | DstMem | Priv, sgdt),
3056 	DI(ModRM | Mov | DstMem | Priv, sidt),
3057 	II(ModRM | SrcMem | Priv, em_lgdt, lgdt),
3058 	II(ModRM | SrcMem | Priv, em_lidt, lidt),
3059 	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3060 	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw),
3061 	II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3062 }, {
3063 	I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall),
3064 	EXT(0, group7_rm1),
3065 	N, EXT(0, group7_rm3),
3066 	II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N,
3067 	II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7),
3068 } };
3069 
3070 static struct opcode group8[] = {
3071 	N, N, N, N,
3072 	D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock),
3073 	D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock),
3074 };
3075 
3076 static struct group_dual group9 = { {
3077 	N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N,
3078 }, {
3079 	N, N, N, N, N, N, N, N,
3080 } };
3081 
3082 static struct opcode group11[] = {
3083 	I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)),
3084 };
3085 
3086 static struct gprefix pfx_0f_6f_0f_7f = {
3087 	N, N, N, I(Sse, em_movdqu),
3088 };
3089 
3090 static struct opcode opcode_table[256] = {
3091 	/* 0x00 - 0x07 */
3092 	I6ALU(Lock, em_add),
3093 	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3094 	/* 0x08 - 0x0F */
3095 	I6ALU(Lock, em_or),
3096 	D(ImplicitOps | Stack | No64), N,
3097 	/* 0x10 - 0x17 */
3098 	I6ALU(Lock, em_adc),
3099 	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3100 	/* 0x18 - 0x1F */
3101 	I6ALU(Lock, em_sbb),
3102 	D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64),
3103 	/* 0x20 - 0x27 */
3104 	I6ALU(Lock, em_and), N, N,
3105 	/* 0x28 - 0x2F */
3106 	I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
3107 	/* 0x30 - 0x37 */
3108 	I6ALU(Lock, em_xor), N, N,
3109 	/* 0x38 - 0x3F */
3110 	I6ALU(0, em_cmp), N, N,
3111 	/* 0x40 - 0x4F */
3112 	X16(D(DstReg)),
3113 	/* 0x50 - 0x57 */
3114 	X8(I(SrcReg | Stack, em_push)),
3115 	/* 0x58 - 0x5F */
3116 	X8(I(DstReg | Stack, em_pop)),
3117 	/* 0x60 - 0x67 */
3118 	I(ImplicitOps | Stack | No64, em_pusha),
3119 	I(ImplicitOps | Stack | No64, em_popa),
3120 	N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
3121 	N, N, N, N,
3122 	/* 0x68 - 0x6F */
3123 	I(SrcImm | Mov | Stack, em_push),
3124 	I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3125 	I(SrcImmByte | Mov | Stack, em_push),
3126 	I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3127 	D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3128 	D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3129 	/* 0x70 - 0x7F */
3130 	X16(D(SrcImmByte)),
3131 	/* 0x80 - 0x87 */
3132 	G(ByteOp | DstMem | SrcImm | ModRM | Group, group1),
3133 	G(DstMem | SrcImm | ModRM | Group, group1),
3134 	G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1),
3135 	G(DstMem | SrcImmByte | ModRM | Group, group1),
3136 	I2bv(DstMem | SrcReg | ModRM, em_test),
3137 	I2bv(DstMem | SrcReg | ModRM | Lock, em_xchg),
3138 	/* 0x88 - 0x8F */
3139 	I2bv(DstMem | SrcReg | ModRM | Mov, em_mov),
3140 	I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
3141 	I(DstMem | SrcNone | ModRM | Mov, em_mov_rm_sreg),
3142 	D(ModRM | SrcMem | NoAccess | DstReg),
3143 	I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
3144 	G(0, group1A),
3145 	/* 0x90 - 0x97 */
3146 	DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
3147 	/* 0x98 - 0x9F */
3148 	D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3149 	I(SrcImmFAddr | No64, em_call_far), N,
3150 	II(ImplicitOps | Stack, em_pushf, pushf),
3151 	II(ImplicitOps | Stack, em_popf, popf), N, N,
3152 	/* 0xA0 - 0xA7 */
3153 	I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3154 	I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov),
3155 	I2bv(SrcSI | DstDI | Mov | String, em_mov),
3156 	I2bv(SrcSI | DstDI | String, em_cmp),
3157 	/* 0xA8 - 0xAF */
3158 	I2bv(DstAcc | SrcImm, em_test),
3159 	I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3160 	I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3161 	I2bv(SrcAcc | DstDI | String, em_cmp),
3162 	/* 0xB0 - 0xB7 */
3163 	X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
3164 	/* 0xB8 - 0xBF */
3165 	X8(I(DstReg | SrcImm | Mov, em_mov)),
3166 	/* 0xC0 - 0xC7 */
3167 	D2bv(DstMem | SrcImmByte | ModRM),
3168 	I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
3169 	I(ImplicitOps | Stack, em_ret),
3170 	D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64),
3171 	G(ByteOp, group11), G(0, group11),
3172 	/* 0xC8 - 0xCF */
3173 	N, N, N, I(ImplicitOps | Stack, em_ret_far),
3174 	D(ImplicitOps), DI(SrcImmByte, intn),
3175 	D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3176 	/* 0xD0 - 0xD7 */
3177 	D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM),
3178 	N, N, N, N,
3179 	/* 0xD8 - 0xDF */
3180 	N, N, N, N, N, N, N, N,
3181 	/* 0xE0 - 0xE7 */
3182 	X3(I(SrcImmByte, em_loop)),
3183 	I(SrcImmByte, em_jcxz),
3184 	D2bvIP(SrcImmUByte | DstAcc, in,  check_perm_in),
3185 	D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out),
3186 	/* 0xE8 - 0xEF */
3187 	D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3188 	I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
3189 	D2bvIP(SrcDX | DstAcc, in,  check_perm_in),
3190 	D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3191 	/* 0xF0 - 0xF7 */
3192 	N, DI(ImplicitOps, icebp), N, N,
3193 	DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
3194 	G(ByteOp, group3), G(0, group3),
3195 	/* 0xF8 - 0xFF */
3196 	D(ImplicitOps), D(ImplicitOps),
3197 	I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
3198 	D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3199 };
3200 
3201 static struct opcode twobyte_table[256] = {
3202 	/* 0x00 - 0x0F */
3203 	G(0, group6), GD(0, &group7), N, N,
3204 	N, I(ImplicitOps | VendorSpecific, em_syscall),
3205 	II(ImplicitOps | Priv, em_clts, clts), N,
3206 	DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3207 	N, D(ImplicitOps | ModRM), N, N,
3208 	/* 0x10 - 0x1F */
3209 	N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N,
3210 	/* 0x20 - 0x2F */
3211 	DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read),
3212 	DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read),
3213 	DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write),
3214 	DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write),
3215 	N, N, N, N,
3216 	N, N, N, N, N, N, N, N,
3217 	/* 0x30 - 0x3F */
3218 	DI(ImplicitOps | Priv, wrmsr),
3219 	IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
3220 	DI(ImplicitOps | Priv, rdmsr),
3221 	DIP(ImplicitOps | Priv, rdpmc, check_rdpmc),
3222 	I(ImplicitOps | VendorSpecific, em_sysenter),
3223 	I(ImplicitOps | Priv | VendorSpecific, em_sysexit),
3224 	N, N,
3225 	N, N, N, N, N, N, N, N,
3226 	/* 0x40 - 0x4F */
3227 	X16(D(DstReg | SrcMem | ModRM | Mov)),
3228 	/* 0x50 - 0x5F */
3229 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3230 	/* 0x60 - 0x6F */
3231 	N, N, N, N,
3232 	N, N, N, N,
3233 	N, N, N, N,
3234 	N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
3235 	/* 0x70 - 0x7F */
3236 	N, N, N, N,
3237 	N, N, N, N,
3238 	N, N, N, N,
3239 	N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
3240 	/* 0x80 - 0x8F */
3241 	X16(D(SrcImm)),
3242 	/* 0x90 - 0x9F */
3243 	X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3244 	/* 0xA0 - 0xA7 */
3245 	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3246 	DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp),
3247 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3248 	D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3249 	/* 0xA8 - 0xAF */
3250 	D(ImplicitOps | Stack), D(ImplicitOps | Stack),
3251 	DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3252 	D(DstMem | SrcReg | Src2ImmByte | ModRM),
3253 	D(DstMem | SrcReg | Src2CL | ModRM),
3254 	D(ModRM), I(DstReg | SrcMem | ModRM, em_imul),
3255 	/* 0xB0 - 0xB7 */
3256 	D2bv(DstMem | SrcReg | ModRM | Lock),
3257 	D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3258 	D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM),
3259 	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3260 	/* 0xB8 - 0xBF */
3261 	N, N,
3262 	G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock),
3263 	D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM),
3264 	D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3265 	/* 0xC0 - 0xCF */
3266 	D2bv(DstMem | SrcReg | ModRM | Lock),
3267 	N, D(DstMem | SrcReg | ModRM | Mov),
3268 	N, N, N, GD(0, &group9),
3269 	N, N, N, N, N, N, N, N,
3270 	/* 0xD0 - 0xDF */
3271 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3272 	/* 0xE0 - 0xEF */
3273 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3274 	/* 0xF0 - 0xFF */
3275 	N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
3276 };
3277 
3278 #undef D
3279 #undef N
3280 #undef G
3281 #undef GD
3282 #undef I
3283 #undef GP
3284 #undef EXT
3285 
3286 #undef D2bv
3287 #undef D2bvIP
3288 #undef I2bv
3289 #undef I6ALU
3290 
3291 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
3292 {
3293 	unsigned size;
3294 
3295 	size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3296 	if (size == 8)
3297 		size = 4;
3298 	return size;
3299 }
3300 
3301 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
3302 		      unsigned size, bool sign_extension)
3303 {
3304 	int rc = X86EMUL_CONTINUE;
3305 
3306 	op->type = OP_IMM;
3307 	op->bytes = size;
3308 	op->addr.mem.ea = ctxt->_eip;
3309 	/* NB. Immediates are sign-extended as necessary. */
3310 	switch (op->bytes) {
3311 	case 1:
3312 		op->val = insn_fetch(s8, 1, ctxt->_eip);
3313 		break;
3314 	case 2:
3315 		op->val = insn_fetch(s16, 2, ctxt->_eip);
3316 		break;
3317 	case 4:
3318 		op->val = insn_fetch(s32, 4, ctxt->_eip);
3319 		break;
3320 	}
3321 	if (!sign_extension) {
3322 		switch (op->bytes) {
3323 		case 1:
3324 			op->val &= 0xff;
3325 			break;
3326 		case 2:
3327 			op->val &= 0xffff;
3328 			break;
3329 		case 4:
3330 			op->val &= 0xffffffff;
3331 			break;
3332 		}
3333 	}
3334 done:
3335 	return rc;
3336 }
3337 
3338 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3339 {
3340 	int rc = X86EMUL_CONTINUE;
3341 	int mode = ctxt->mode;
3342 	int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
3343 	bool op_prefix = false;
3344 	struct opcode opcode;
3345 	struct operand memop = { .type = OP_NONE }, *memopp = NULL;
3346 
3347 	ctxt->_eip = ctxt->eip;
3348 	ctxt->fetch.start = ctxt->_eip;
3349 	ctxt->fetch.end = ctxt->fetch.start + insn_len;
3350 	if (insn_len > 0)
3351 		memcpy(ctxt->fetch.data, insn, insn_len);
3352 
3353 	switch (mode) {
3354 	case X86EMUL_MODE_REAL:
3355 	case X86EMUL_MODE_VM86:
3356 	case X86EMUL_MODE_PROT16:
3357 		def_op_bytes = def_ad_bytes = 2;
3358 		break;
3359 	case X86EMUL_MODE_PROT32:
3360 		def_op_bytes = def_ad_bytes = 4;
3361 		break;
3362 #ifdef CONFIG_X86_64
3363 	case X86EMUL_MODE_PROT64:
3364 		def_op_bytes = 4;
3365 		def_ad_bytes = 8;
3366 		break;
3367 #endif
3368 	default:
3369 		return -1;
3370 	}
3371 
3372 	ctxt->op_bytes = def_op_bytes;
3373 	ctxt->ad_bytes = def_ad_bytes;
3374 
3375 	/* Legacy prefixes. */
3376 	for (;;) {
3377 		switch (ctxt->b = insn_fetch(u8, 1, ctxt->_eip)) {
3378 		case 0x66:	/* operand-size override */
3379 			op_prefix = true;
3380 			/* switch between 2/4 bytes */
3381 			ctxt->op_bytes = def_op_bytes ^ 6;
3382 			break;
3383 		case 0x67:	/* address-size override */
3384 			if (mode == X86EMUL_MODE_PROT64)
3385 				/* switch between 4/8 bytes */
3386 				ctxt->ad_bytes = def_ad_bytes ^ 12;
3387 			else
3388 				/* switch between 2/4 bytes */
3389 				ctxt->ad_bytes = def_ad_bytes ^ 6;
3390 			break;
3391 		case 0x26:	/* ES override */
3392 		case 0x2e:	/* CS override */
3393 		case 0x36:	/* SS override */
3394 		case 0x3e:	/* DS override */
3395 			set_seg_override(ctxt, (ctxt->b >> 3) & 3);
3396 			break;
3397 		case 0x64:	/* FS override */
3398 		case 0x65:	/* GS override */
3399 			set_seg_override(ctxt, ctxt->b & 7);
3400 			break;
3401 		case 0x40 ... 0x4f: /* REX */
3402 			if (mode != X86EMUL_MODE_PROT64)
3403 				goto done_prefixes;
3404 			ctxt->rex_prefix = ctxt->b;
3405 			continue;
3406 		case 0xf0:	/* LOCK */
3407 			ctxt->lock_prefix = 1;
3408 			break;
3409 		case 0xf2:	/* REPNE/REPNZ */
3410 		case 0xf3:	/* REP/REPE/REPZ */
3411 			ctxt->rep_prefix = ctxt->b;
3412 			break;
3413 		default:
3414 			goto done_prefixes;
3415 		}
3416 
3417 		/* Any legacy prefix after a REX prefix nullifies its effect. */
3418 
3419 		ctxt->rex_prefix = 0;
3420 	}
3421 
3422 done_prefixes:
3423 
3424 	/* REX prefix. */
3425 	if (ctxt->rex_prefix & 8)
3426 		ctxt->op_bytes = 8;	/* REX.W */
3427 
3428 	/* Opcode byte(s). */
3429 	opcode = opcode_table[ctxt->b];
3430 	/* Two-byte opcode? */
3431 	if (ctxt->b == 0x0f) {
3432 		ctxt->twobyte = 1;
3433 		ctxt->b = insn_fetch(u8, 1, ctxt->_eip);
3434 		opcode = twobyte_table[ctxt->b];
3435 	}
3436 	ctxt->d = opcode.flags;
3437 
3438 	while (ctxt->d & GroupMask) {
3439 		switch (ctxt->d & GroupMask) {
3440 		case Group:
3441 			ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
3442 			--ctxt->_eip;
3443 			goffset = (ctxt->modrm >> 3) & 7;
3444 			opcode = opcode.u.group[goffset];
3445 			break;
3446 		case GroupDual:
3447 			ctxt->modrm = insn_fetch(u8, 1, ctxt->_eip);
3448 			--ctxt->_eip;
3449 			goffset = (ctxt->modrm >> 3) & 7;
3450 			if ((ctxt->modrm >> 6) == 3)
3451 				opcode = opcode.u.gdual->mod3[goffset];
3452 			else
3453 				opcode = opcode.u.gdual->mod012[goffset];
3454 			break;
3455 		case RMExt:
3456 			goffset = ctxt->modrm & 7;
3457 			opcode = opcode.u.group[goffset];
3458 			break;
3459 		case Prefix:
3460 			if (ctxt->rep_prefix && op_prefix)
3461 				return X86EMUL_UNHANDLEABLE;
3462 			simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
3463 			switch (simd_prefix) {
3464 			case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
3465 			case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
3466 			case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
3467 			case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
3468 			}
3469 			break;
3470 		default:
3471 			return X86EMUL_UNHANDLEABLE;
3472 		}
3473 
3474 		ctxt->d &= ~GroupMask;
3475 		ctxt->d |= opcode.flags;
3476 	}
3477 
3478 	ctxt->execute = opcode.u.execute;
3479 	ctxt->check_perm = opcode.check_perm;
3480 	ctxt->intercept = opcode.intercept;
3481 
3482 	/* Unrecognised? */
3483 	if (ctxt->d == 0 || (ctxt->d & Undefined))
3484 		return -1;
3485 
3486 	if (!(ctxt->d & VendorSpecific) && ctxt->only_vendor_specific_insn)
3487 		return -1;
3488 
3489 	if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
3490 		ctxt->op_bytes = 8;
3491 
3492 	if (ctxt->d & Op3264) {
3493 		if (mode == X86EMUL_MODE_PROT64)
3494 			ctxt->op_bytes = 8;
3495 		else
3496 			ctxt->op_bytes = 4;
3497 	}
3498 
3499 	if (ctxt->d & Sse)
3500 		ctxt->op_bytes = 16;
3501 
3502 	/* ModRM and SIB bytes. */
3503 	if (ctxt->d & ModRM) {
3504 		rc = decode_modrm(ctxt, &memop);
3505 		if (!ctxt->has_seg_override)
3506 			set_seg_override(ctxt, ctxt->modrm_seg);
3507 	} else if (ctxt->d & MemAbs)
3508 		rc = decode_abs(ctxt, &memop);
3509 	if (rc != X86EMUL_CONTINUE)
3510 		goto done;
3511 
3512 	if (!ctxt->has_seg_override)
3513 		set_seg_override(ctxt, VCPU_SREG_DS);
3514 
3515 	memop.addr.mem.seg = seg_override(ctxt);
3516 
3517 	if (memop.type == OP_MEM && ctxt->ad_bytes != 8)
3518 		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
3519 
3520 	/*
3521 	 * Decode and fetch the source operand: register, memory
3522 	 * or immediate.
3523 	 */
3524 	switch (ctxt->d & SrcMask) {
3525 	case SrcNone:
3526 		break;
3527 	case SrcReg:
3528 		decode_register_operand(ctxt, &ctxt->src, 0);
3529 		break;
3530 	case SrcMem16:
3531 		memop.bytes = 2;
3532 		goto srcmem_common;
3533 	case SrcMem32:
3534 		memop.bytes = 4;
3535 		goto srcmem_common;
3536 	case SrcMem:
3537 		memop.bytes = (ctxt->d & ByteOp) ? 1 :
3538 							   ctxt->op_bytes;
3539 	srcmem_common:
3540 		ctxt->src = memop;
3541 		memopp = &ctxt->src;
3542 		break;
3543 	case SrcImmU16:
3544 		rc = decode_imm(ctxt, &ctxt->src, 2, false);
3545 		break;
3546 	case SrcImm:
3547 		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), true);
3548 		break;
3549 	case SrcImmU:
3550 		rc = decode_imm(ctxt, &ctxt->src, imm_size(ctxt), false);
3551 		break;
3552 	case SrcImmByte:
3553 		rc = decode_imm(ctxt, &ctxt->src, 1, true);
3554 		break;
3555 	case SrcImmUByte:
3556 		rc = decode_imm(ctxt, &ctxt->src, 1, false);
3557 		break;
3558 	case SrcAcc:
3559 		ctxt->src.type = OP_REG;
3560 		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3561 		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3562 		fetch_register_operand(&ctxt->src);
3563 		break;
3564 	case SrcOne:
3565 		ctxt->src.bytes = 1;
3566 		ctxt->src.val = 1;
3567 		break;
3568 	case SrcSI:
3569 		ctxt->src.type = OP_MEM;
3570 		ctxt->src.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3571 		ctxt->src.addr.mem.ea =
3572 			register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]);
3573 		ctxt->src.addr.mem.seg = seg_override(ctxt);
3574 		ctxt->src.val = 0;
3575 		break;
3576 	case SrcImmFAddr:
3577 		ctxt->src.type = OP_IMM;
3578 		ctxt->src.addr.mem.ea = ctxt->_eip;
3579 		ctxt->src.bytes = ctxt->op_bytes + 2;
3580 		insn_fetch_arr(ctxt->src.valptr, ctxt->src.bytes, ctxt->_eip);
3581 		break;
3582 	case SrcMemFAddr:
3583 		memop.bytes = ctxt->op_bytes + 2;
3584 		goto srcmem_common;
3585 		break;
3586 	case SrcDX:
3587 		ctxt->src.type = OP_REG;
3588 		ctxt->src.bytes = 2;
3589 		ctxt->src.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3590 		fetch_register_operand(&ctxt->src);
3591 		break;
3592 	}
3593 
3594 	if (rc != X86EMUL_CONTINUE)
3595 		goto done;
3596 
3597 	/*
3598 	 * Decode and fetch the second source operand: register, memory
3599 	 * or immediate.
3600 	 */
3601 	switch (ctxt->d & Src2Mask) {
3602 	case Src2None:
3603 		break;
3604 	case Src2CL:
3605 		ctxt->src2.bytes = 1;
3606 		ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8;
3607 		break;
3608 	case Src2ImmByte:
3609 		rc = decode_imm(ctxt, &ctxt->src2, 1, true);
3610 		break;
3611 	case Src2One:
3612 		ctxt->src2.bytes = 1;
3613 		ctxt->src2.val = 1;
3614 		break;
3615 	case Src2Imm:
3616 		rc = decode_imm(ctxt, &ctxt->src2, imm_size(ctxt), true);
3617 		break;
3618 	}
3619 
3620 	if (rc != X86EMUL_CONTINUE)
3621 		goto done;
3622 
3623 	/* Decode and fetch the destination operand: register or memory. */
3624 	switch (ctxt->d & DstMask) {
3625 	case DstReg:
3626 		decode_register_operand(ctxt, &ctxt->dst,
3627 			 ctxt->twobyte && (ctxt->b == 0xb6 || ctxt->b == 0xb7));
3628 		break;
3629 	case DstImmUByte:
3630 		ctxt->dst.type = OP_IMM;
3631 		ctxt->dst.addr.mem.ea = ctxt->_eip;
3632 		ctxt->dst.bytes = 1;
3633 		ctxt->dst.val = insn_fetch(u8, 1, ctxt->_eip);
3634 		break;
3635 	case DstMem:
3636 	case DstMem64:
3637 		ctxt->dst = memop;
3638 		memopp = &ctxt->dst;
3639 		if ((ctxt->d & DstMask) == DstMem64)
3640 			ctxt->dst.bytes = 8;
3641 		else
3642 			ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3643 		if (ctxt->d & BitOp)
3644 			fetch_bit_operand(ctxt);
3645 		ctxt->dst.orig_val = ctxt->dst.val;
3646 		break;
3647 	case DstAcc:
3648 		ctxt->dst.type = OP_REG;
3649 		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3650 		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RAX];
3651 		fetch_register_operand(&ctxt->dst);
3652 		ctxt->dst.orig_val = ctxt->dst.val;
3653 		break;
3654 	case DstDI:
3655 		ctxt->dst.type = OP_MEM;
3656 		ctxt->dst.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3657 		ctxt->dst.addr.mem.ea =
3658 			register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]);
3659 		ctxt->dst.addr.mem.seg = VCPU_SREG_ES;
3660 		ctxt->dst.val = 0;
3661 		break;
3662 	case DstDX:
3663 		ctxt->dst.type = OP_REG;
3664 		ctxt->dst.bytes = 2;
3665 		ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX];
3666 		fetch_register_operand(&ctxt->dst);
3667 		break;
3668 	case ImplicitOps:
3669 		/* Special instructions do their own operand decoding. */
3670 	default:
3671 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3672 		break;
3673 	}
3674 
3675 done:
3676 	if (memopp && memopp->type == OP_MEM && ctxt->rip_relative)
3677 		memopp->addr.mem.ea += ctxt->_eip;
3678 
3679 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3680 }
3681 
3682 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3683 {
3684 	/* The second termination condition only applies for REPE
3685 	 * and REPNE. Test if the repeat string operation prefix is
3686 	 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
3687 	 * corresponding termination condition according to:
3688 	 * 	- if REPE/REPZ and ZF = 0 then done
3689 	 * 	- if REPNE/REPNZ and ZF = 1 then done
3690 	 */
3691 	if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
3692 	     (ctxt->b == 0xae) || (ctxt->b == 0xaf))
3693 	    && (((ctxt->rep_prefix == REPE_PREFIX) &&
3694 		 ((ctxt->eflags & EFLG_ZF) == 0))
3695 		|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
3696 		    ((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
3697 		return true;
3698 
3699 	return false;
3700 }
3701 
3702 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3703 {
3704 	struct x86_emulate_ops *ops = ctxt->ops;
3705 	u64 msr_data;
3706 	int rc = X86EMUL_CONTINUE;
3707 	int saved_dst_type = ctxt->dst.type;
3708 
3709 	ctxt->mem_read.pos = 0;
3710 
3711 	if (ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) {
3712 		rc = emulate_ud(ctxt);
3713 		goto done;
3714 	}
3715 
3716 	/* LOCK prefix is allowed only with some instructions */
3717 	if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
3718 		rc = emulate_ud(ctxt);
3719 		goto done;
3720 	}
3721 
3722 	if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
3723 		rc = emulate_ud(ctxt);
3724 		goto done;
3725 	}
3726 
3727 	if ((ctxt->d & Sse)
3728 	    && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3729 		|| !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3730 		rc = emulate_ud(ctxt);
3731 		goto done;
3732 	}
3733 
3734 	if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3735 		rc = emulate_nm(ctxt);
3736 		goto done;
3737 	}
3738 
3739 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3740 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3741 					      X86_ICPT_PRE_EXCEPT);
3742 		if (rc != X86EMUL_CONTINUE)
3743 			goto done;
3744 	}
3745 
3746 	/* Privileged instruction can be executed only in CPL=0 */
3747 	if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
3748 		rc = emulate_gp(ctxt, 0);
3749 		goto done;
3750 	}
3751 
3752 	/* Instruction can only be executed in protected mode */
3753 	if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) {
3754 		rc = emulate_ud(ctxt);
3755 		goto done;
3756 	}
3757 
3758 	/* Do instruction specific permission checks */
3759 	if (ctxt->check_perm) {
3760 		rc = ctxt->check_perm(ctxt);
3761 		if (rc != X86EMUL_CONTINUE)
3762 			goto done;
3763 	}
3764 
3765 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3766 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3767 					      X86_ICPT_POST_EXCEPT);
3768 		if (rc != X86EMUL_CONTINUE)
3769 			goto done;
3770 	}
3771 
3772 	if (ctxt->rep_prefix && (ctxt->d & String)) {
3773 		/* All REP prefixes have the same first termination condition */
3774 		if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) {
3775 			ctxt->eip = ctxt->_eip;
3776 			goto done;
3777 		}
3778 	}
3779 
3780 	if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
3781 		rc = segmented_read(ctxt, ctxt->src.addr.mem,
3782 				    ctxt->src.valptr, ctxt->src.bytes);
3783 		if (rc != X86EMUL_CONTINUE)
3784 			goto done;
3785 		ctxt->src.orig_val64 = ctxt->src.val64;
3786 	}
3787 
3788 	if (ctxt->src2.type == OP_MEM) {
3789 		rc = segmented_read(ctxt, ctxt->src2.addr.mem,
3790 				    &ctxt->src2.val, ctxt->src2.bytes);
3791 		if (rc != X86EMUL_CONTINUE)
3792 			goto done;
3793 	}
3794 
3795 	if ((ctxt->d & DstMask) == ImplicitOps)
3796 		goto special_insn;
3797 
3798 
3799 	if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
3800 		/* optimisation - avoid slow emulated read if Mov */
3801 		rc = segmented_read(ctxt, ctxt->dst.addr.mem,
3802 				   &ctxt->dst.val, ctxt->dst.bytes);
3803 		if (rc != X86EMUL_CONTINUE)
3804 			goto done;
3805 	}
3806 	ctxt->dst.orig_val = ctxt->dst.val;
3807 
3808 special_insn:
3809 
3810 	if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
3811 		rc = emulator_check_intercept(ctxt, ctxt->intercept,
3812 					      X86_ICPT_POST_MEMACCESS);
3813 		if (rc != X86EMUL_CONTINUE)
3814 			goto done;
3815 	}
3816 
3817 	if (ctxt->execute) {
3818 		rc = ctxt->execute(ctxt);
3819 		if (rc != X86EMUL_CONTINUE)
3820 			goto done;
3821 		goto writeback;
3822 	}
3823 
3824 	if (ctxt->twobyte)
3825 		goto twobyte_insn;
3826 
3827 	switch (ctxt->b) {
3828 	case 0x06:		/* push es */
3829 		rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3830 		break;
3831 	case 0x07:		/* pop es */
3832 		rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3833 		break;
3834 	case 0x0e:		/* push cs */
3835 		rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3836 		break;
3837 	case 0x16:		/* push ss */
3838 		rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3839 		break;
3840 	case 0x17:		/* pop ss */
3841 		rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3842 		break;
3843 	case 0x1e:		/* push ds */
3844 		rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3845 		break;
3846 	case 0x1f:		/* pop ds */
3847 		rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3848 		break;
3849 	case 0x40 ... 0x47: /* inc r16/r32 */
3850 		emulate_1op("inc", ctxt->dst, ctxt->eflags);
3851 		break;
3852 	case 0x48 ... 0x4f: /* dec r16/r32 */
3853 		emulate_1op("dec", ctxt->dst, ctxt->eflags);
3854 		break;
3855 	case 0x63:		/* movsxd */
3856 		if (ctxt->mode != X86EMUL_MODE_PROT64)
3857 			goto cannot_emulate;
3858 		ctxt->dst.val = (s32) ctxt->src.val;
3859 		break;
3860 	case 0x6c:		/* insb */
3861 	case 0x6d:		/* insw/insd */
3862 		ctxt->src.val = ctxt->regs[VCPU_REGS_RDX];
3863 		goto do_io_in;
3864 	case 0x6e:		/* outsb */
3865 	case 0x6f:		/* outsw/outsd */
3866 		ctxt->dst.val = ctxt->regs[VCPU_REGS_RDX];
3867 		goto do_io_out;
3868 		break;
3869 	case 0x70 ... 0x7f: /* jcc (short) */
3870 		if (test_cc(ctxt->b, ctxt->eflags))
3871 			jmp_rel(ctxt, ctxt->src.val);
3872 		break;
3873 	case 0x8d: /* lea r16/r32, m */
3874 		ctxt->dst.val = ctxt->src.addr.mem.ea;
3875 		break;
3876 	case 0x8f:		/* pop (sole member of Grp1a) */
3877 		rc = em_grp1a(ctxt);
3878 		break;
3879 	case 0x90 ... 0x97: /* nop / xchg reg, rax */
3880 		if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX])
3881 			break;
3882 		rc = em_xchg(ctxt);
3883 		break;
3884 	case 0x98: /* cbw/cwde/cdqe */
3885 		switch (ctxt->op_bytes) {
3886 		case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
3887 		case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
3888 		case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
3889 		}
3890 		break;
3891 	case 0xc0 ... 0xc1:
3892 		rc = em_grp2(ctxt);
3893 		break;
3894 	case 0xc4:		/* les */
3895 		rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
3896 		break;
3897 	case 0xc5:		/* lds */
3898 		rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
3899 		break;
3900 	case 0xcc:		/* int3 */
3901 		rc = emulate_int(ctxt, 3);
3902 		break;
3903 	case 0xcd:		/* int n */
3904 		rc = emulate_int(ctxt, ctxt->src.val);
3905 		break;
3906 	case 0xce:		/* into */
3907 		if (ctxt->eflags & EFLG_OF)
3908 			rc = emulate_int(ctxt, 4);
3909 		break;
3910 	case 0xd0 ... 0xd1:	/* Grp2 */
3911 		rc = em_grp2(ctxt);
3912 		break;
3913 	case 0xd2 ... 0xd3:	/* Grp2 */
3914 		ctxt->src.val = ctxt->regs[VCPU_REGS_RCX];
3915 		rc = em_grp2(ctxt);
3916 		break;
3917 	case 0xe4: 	/* inb */
3918 	case 0xe5: 	/* in */
3919 		goto do_io_in;
3920 	case 0xe6: /* outb */
3921 	case 0xe7: /* out */
3922 		goto do_io_out;
3923 	case 0xe8: /* call (near) */ {
3924 		long int rel = ctxt->src.val;
3925 		ctxt->src.val = (unsigned long) ctxt->_eip;
3926 		jmp_rel(ctxt, rel);
3927 		rc = em_push(ctxt);
3928 		break;
3929 	}
3930 	case 0xe9: /* jmp rel */
3931 	case 0xeb: /* jmp rel short */
3932 		jmp_rel(ctxt, ctxt->src.val);
3933 		ctxt->dst.type = OP_NONE; /* Disable writeback. */
3934 		break;
3935 	case 0xec: /* in al,dx */
3936 	case 0xed: /* in (e/r)ax,dx */
3937 	do_io_in:
3938 		if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3939 				     &ctxt->dst.val))
3940 			goto done; /* IO is needed */
3941 		break;
3942 	case 0xee: /* out dx,al */
3943 	case 0xef: /* out dx,(e/r)ax */
3944 	do_io_out:
3945 		ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3946 				      &ctxt->src.val, 1);
3947 		ctxt->dst.type = OP_NONE;	/* Disable writeback. */
3948 		break;
3949 	case 0xf4:              /* hlt */
3950 		ctxt->ops->halt(ctxt);
3951 		break;
3952 	case 0xf5:	/* cmc */
3953 		/* complement carry flag from eflags reg */
3954 		ctxt->eflags ^= EFLG_CF;
3955 		break;
3956 	case 0xf6 ... 0xf7:	/* Grp3 */
3957 		rc = em_grp3(ctxt);
3958 		break;
3959 	case 0xf8: /* clc */
3960 		ctxt->eflags &= ~EFLG_CF;
3961 		break;
3962 	case 0xf9: /* stc */
3963 		ctxt->eflags |= EFLG_CF;
3964 		break;
3965 	case 0xfc: /* cld */
3966 		ctxt->eflags &= ~EFLG_DF;
3967 		break;
3968 	case 0xfd: /* std */
3969 		ctxt->eflags |= EFLG_DF;
3970 		break;
3971 	case 0xfe: /* Grp4 */
3972 		rc = em_grp45(ctxt);
3973 		break;
3974 	case 0xff: /* Grp5 */
3975 		rc = em_grp45(ctxt);
3976 		break;
3977 	default:
3978 		goto cannot_emulate;
3979 	}
3980 
3981 	if (rc != X86EMUL_CONTINUE)
3982 		goto done;
3983 
3984 writeback:
3985 	rc = writeback(ctxt);
3986 	if (rc != X86EMUL_CONTINUE)
3987 		goto done;
3988 
3989 	/*
3990 	 * restore dst type in case the decoding will be reused
3991 	 * (happens for string instruction )
3992 	 */
3993 	ctxt->dst.type = saved_dst_type;
3994 
3995 	if ((ctxt->d & SrcMask) == SrcSI)
3996 		string_addr_inc(ctxt, seg_override(ctxt),
3997 				VCPU_REGS_RSI, &ctxt->src);
3998 
3999 	if ((ctxt->d & DstMask) == DstDI)
4000 		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
4001 				&ctxt->dst);
4002 
4003 	if (ctxt->rep_prefix && (ctxt->d & String)) {
4004 		struct read_cache *r = &ctxt->io_read;
4005 		register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
4006 
4007 		if (!string_insn_completed(ctxt)) {
4008 			/*
4009 			 * Re-enter guest when pio read ahead buffer is empty
4010 			 * or, if it is not used, after each 1024 iteration.
4011 			 */
4012 			if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) &&
4013 			    (r->end == 0 || r->end != r->pos)) {
4014 				/*
4015 				 * Reset read cache. Usually happens before
4016 				 * decode, but since instruction is restarted
4017 				 * we have to do it here.
4018 				 */
4019 				ctxt->mem_read.end = 0;
4020 				return EMULATION_RESTART;
4021 			}
4022 			goto done; /* skip rip writeback */
4023 		}
4024 	}
4025 
4026 	ctxt->eip = ctxt->_eip;
4027 
4028 done:
4029 	if (rc == X86EMUL_PROPAGATE_FAULT)
4030 		ctxt->have_exception = true;
4031 	if (rc == X86EMUL_INTERCEPTED)
4032 		return EMULATION_INTERCEPTED;
4033 
4034 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4035 
4036 twobyte_insn:
4037 	switch (ctxt->b) {
4038 	case 0x09:		/* wbinvd */
4039 		(ctxt->ops->wbinvd)(ctxt);
4040 		break;
4041 	case 0x08:		/* invd */
4042 	case 0x0d:		/* GrpP (prefetch) */
4043 	case 0x18:		/* Grp16 (prefetch/nop) */
4044 		break;
4045 	case 0x20: /* mov cr, reg */
4046 		ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
4047 		break;
4048 	case 0x21: /* mov from dr to reg */
4049 		ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
4050 		break;
4051 	case 0x22: /* mov reg, cr */
4052 		if (ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val)) {
4053 			emulate_gp(ctxt, 0);
4054 			rc = X86EMUL_PROPAGATE_FAULT;
4055 			goto done;
4056 		}
4057 		ctxt->dst.type = OP_NONE;
4058 		break;
4059 	case 0x23: /* mov from reg to dr */
4060 		if (ops->set_dr(ctxt, ctxt->modrm_reg, ctxt->src.val &
4061 				((ctxt->mode == X86EMUL_MODE_PROT64) ?
4062 				 ~0ULL : ~0U)) < 0) {
4063 			/* #UD condition is already handled by the code above */
4064 			emulate_gp(ctxt, 0);
4065 			rc = X86EMUL_PROPAGATE_FAULT;
4066 			goto done;
4067 		}
4068 
4069 		ctxt->dst.type = OP_NONE;	/* no writeback */
4070 		break;
4071 	case 0x30:
4072 		/* wrmsr */
4073 		msr_data = (u32)ctxt->regs[VCPU_REGS_RAX]
4074 			| ((u64)ctxt->regs[VCPU_REGS_RDX] << 32);
4075 		if (ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) {
4076 			emulate_gp(ctxt, 0);
4077 			rc = X86EMUL_PROPAGATE_FAULT;
4078 			goto done;
4079 		}
4080 		rc = X86EMUL_CONTINUE;
4081 		break;
4082 	case 0x32:
4083 		/* rdmsr */
4084 		if (ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) {
4085 			emulate_gp(ctxt, 0);
4086 			rc = X86EMUL_PROPAGATE_FAULT;
4087 			goto done;
4088 		} else {
4089 			ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data;
4090 			ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32;
4091 		}
4092 		rc = X86EMUL_CONTINUE;
4093 		break;
4094 	case 0x40 ... 0x4f:	/* cmov */
4095 		ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
4096 		if (!test_cc(ctxt->b, ctxt->eflags))
4097 			ctxt->dst.type = OP_NONE; /* no writeback */
4098 		break;
4099 	case 0x80 ... 0x8f: /* jnz rel, etc*/
4100 		if (test_cc(ctxt->b, ctxt->eflags))
4101 			jmp_rel(ctxt, ctxt->src.val);
4102 		break;
4103 	case 0x90 ... 0x9f:     /* setcc r/m8 */
4104 		ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4105 		break;
4106 	case 0xa0:	  /* push fs */
4107 		rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4108 		break;
4109 	case 0xa1:	 /* pop fs */
4110 		rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4111 		break;
4112 	case 0xa3:
4113 	      bt:		/* bt */
4114 		ctxt->dst.type = OP_NONE;
4115 		/* only subword offset */
4116 		ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
4117 		emulate_2op_SrcV_nobyte("bt", ctxt->src, ctxt->dst, ctxt->eflags);
4118 		break;
4119 	case 0xa4: /* shld imm8, r, r/m */
4120 	case 0xa5: /* shld cl, r, r/m */
4121 		emulate_2op_cl("shld", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
4122 		break;
4123 	case 0xa8:	/* push gs */
4124 		rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4125 		break;
4126 	case 0xa9:	/* pop gs */
4127 		rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4128 		break;
4129 	case 0xab:
4130 	      bts:		/* bts */
4131 		emulate_2op_SrcV_nobyte("bts", ctxt->src, ctxt->dst, ctxt->eflags);
4132 		break;
4133 	case 0xac: /* shrd imm8, r, r/m */
4134 	case 0xad: /* shrd cl, r, r/m */
4135 		emulate_2op_cl("shrd", ctxt->src2, ctxt->src, ctxt->dst, ctxt->eflags);
4136 		break;
4137 	case 0xae:              /* clflush */
4138 		break;
4139 	case 0xb0 ... 0xb1:	/* cmpxchg */
4140 		/*
4141 		 * Save real source value, then compare EAX against
4142 		 * destination.
4143 		 */
4144 		ctxt->src.orig_val = ctxt->src.val;
4145 		ctxt->src.val = ctxt->regs[VCPU_REGS_RAX];
4146 		emulate_2op_SrcV("cmp", ctxt->src, ctxt->dst, ctxt->eflags);
4147 		if (ctxt->eflags & EFLG_ZF) {
4148 			/* Success: write back to memory. */
4149 			ctxt->dst.val = ctxt->src.orig_val;
4150 		} else {
4151 			/* Failure: write the value we saw to EAX. */
4152 			ctxt->dst.type = OP_REG;
4153 			ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX];
4154 		}
4155 		break;
4156 	case 0xb2:		/* lss */
4157 		rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4158 		break;
4159 	case 0xb3:
4160 	      btr:		/* btr */
4161 		emulate_2op_SrcV_nobyte("btr", ctxt->src, ctxt->dst, ctxt->eflags);
4162 		break;
4163 	case 0xb4:		/* lfs */
4164 		rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4165 		break;
4166 	case 0xb5:		/* lgs */
4167 		rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4168 		break;
4169 	case 0xb6 ... 0xb7:	/* movzx */
4170 		ctxt->dst.bytes = ctxt->op_bytes;
4171 		ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
4172 						       : (u16) ctxt->src.val;
4173 		break;
4174 	case 0xba:		/* Grp8 */
4175 		switch (ctxt->modrm_reg & 3) {
4176 		case 0:
4177 			goto bt;
4178 		case 1:
4179 			goto bts;
4180 		case 2:
4181 			goto btr;
4182 		case 3:
4183 			goto btc;
4184 		}
4185 		break;
4186 	case 0xbb:
4187 	      btc:		/* btc */
4188 		emulate_2op_SrcV_nobyte("btc", ctxt->src, ctxt->dst, ctxt->eflags);
4189 		break;
4190 	case 0xbc: {		/* bsf */
4191 		u8 zf;
4192 		__asm__ ("bsf %2, %0; setz %1"
4193 			 : "=r"(ctxt->dst.val), "=q"(zf)
4194 			 : "r"(ctxt->src.val));
4195 		ctxt->eflags &= ~X86_EFLAGS_ZF;
4196 		if (zf) {
4197 			ctxt->eflags |= X86_EFLAGS_ZF;
4198 			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4199 		}
4200 		break;
4201 	}
4202 	case 0xbd: {		/* bsr */
4203 		u8 zf;
4204 		__asm__ ("bsr %2, %0; setz %1"
4205 			 : "=r"(ctxt->dst.val), "=q"(zf)
4206 			 : "r"(ctxt->src.val));
4207 		ctxt->eflags &= ~X86_EFLAGS_ZF;
4208 		if (zf) {
4209 			ctxt->eflags |= X86_EFLAGS_ZF;
4210 			ctxt->dst.type = OP_NONE;	/* Disable writeback. */
4211 		}
4212 		break;
4213 	}
4214 	case 0xbe ... 0xbf:	/* movsx */
4215 		ctxt->dst.bytes = ctxt->op_bytes;
4216 		ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
4217 							(s16) ctxt->src.val;
4218 		break;
4219 	case 0xc0 ... 0xc1:	/* xadd */
4220 		emulate_2op_SrcV("add", ctxt->src, ctxt->dst, ctxt->eflags);
4221 		/* Write back the register source. */
4222 		ctxt->src.val = ctxt->dst.orig_val;
4223 		write_register_operand(&ctxt->src);
4224 		break;
4225 	case 0xc3:		/* movnti */
4226 		ctxt->dst.bytes = ctxt->op_bytes;
4227 		ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
4228 							(u64) ctxt->src.val;
4229 		break;
4230 	case 0xc7:		/* Grp9 (cmpxchg8b) */
4231 		rc = em_grp9(ctxt);
4232 		break;
4233 	default:
4234 		goto cannot_emulate;
4235 	}
4236 
4237 	if (rc != X86EMUL_CONTINUE)
4238 		goto done;
4239 
4240 	goto writeback;
4241 
4242 cannot_emulate:
4243 	return EMULATION_FAILED;
4244 }
4245