xref: /linux/arch/x86/kvm/trace.h (revision 60063497a95e716c9a689af3be2687d261f115b4)
1 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2 #define _TRACE_KVM_H
3 
4 #include <linux/tracepoint.h>
5 
6 #undef TRACE_SYSTEM
7 #define TRACE_SYSTEM kvm
8 
9 /*
10  * Tracepoint for guest mode entry.
11  */
12 TRACE_EVENT(kvm_entry,
13 	TP_PROTO(unsigned int vcpu_id),
14 	TP_ARGS(vcpu_id),
15 
16 	TP_STRUCT__entry(
17 		__field(	unsigned int,	vcpu_id		)
18 	),
19 
20 	TP_fast_assign(
21 		__entry->vcpu_id	= vcpu_id;
22 	),
23 
24 	TP_printk("vcpu %u", __entry->vcpu_id)
25 );
26 
27 /*
28  * Tracepoint for hypercall.
29  */
30 TRACE_EVENT(kvm_hypercall,
31 	TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
32 		 unsigned long a2, unsigned long a3),
33 	TP_ARGS(nr, a0, a1, a2, a3),
34 
35 	TP_STRUCT__entry(
36 		__field(	unsigned long, 	nr		)
37 		__field(	unsigned long,	a0		)
38 		__field(	unsigned long,	a1		)
39 		__field(	unsigned long,	a2		)
40 		__field(	unsigned long,	a3		)
41 	),
42 
43 	TP_fast_assign(
44 		__entry->nr		= nr;
45 		__entry->a0		= a0;
46 		__entry->a1		= a1;
47 		__entry->a2		= a2;
48 		__entry->a3		= a3;
49 	),
50 
51 	TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
52 		 __entry->nr, __entry->a0, __entry->a1,  __entry->a2,
53 		 __entry->a3)
54 );
55 
56 /*
57  * Tracepoint for hypercall.
58  */
59 TRACE_EVENT(kvm_hv_hypercall,
60 	TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
61 		 __u64 ingpa, __u64 outgpa),
62 	TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
63 
64 	TP_STRUCT__entry(
65 		__field(	__u16,		rep_cnt		)
66 		__field(	__u16,		rep_idx		)
67 		__field(	__u64,		ingpa		)
68 		__field(	__u64,		outgpa		)
69 		__field(	__u16, 		code		)
70 		__field(	bool,		fast		)
71 	),
72 
73 	TP_fast_assign(
74 		__entry->rep_cnt	= rep_cnt;
75 		__entry->rep_idx	= rep_idx;
76 		__entry->ingpa		= ingpa;
77 		__entry->outgpa		= outgpa;
78 		__entry->code		= code;
79 		__entry->fast		= fast;
80 	),
81 
82 	TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
83 		  __entry->code, __entry->fast ? "fast" : "slow",
84 		  __entry->rep_cnt, __entry->rep_idx,  __entry->ingpa,
85 		  __entry->outgpa)
86 );
87 
88 /*
89  * Tracepoint for PIO.
90  */
91 TRACE_EVENT(kvm_pio,
92 	TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
93 		 unsigned int count),
94 	TP_ARGS(rw, port, size, count),
95 
96 	TP_STRUCT__entry(
97 		__field(	unsigned int, 	rw		)
98 		__field(	unsigned int, 	port		)
99 		__field(	unsigned int, 	size		)
100 		__field(	unsigned int,	count		)
101 	),
102 
103 	TP_fast_assign(
104 		__entry->rw		= rw;
105 		__entry->port		= port;
106 		__entry->size		= size;
107 		__entry->count		= count;
108 	),
109 
110 	TP_printk("pio_%s at 0x%x size %d count %d",
111 		  __entry->rw ? "write" : "read",
112 		  __entry->port, __entry->size, __entry->count)
113 );
114 
115 /*
116  * Tracepoint for cpuid.
117  */
118 TRACE_EVENT(kvm_cpuid,
119 	TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
120 		 unsigned long rcx, unsigned long rdx),
121 	TP_ARGS(function, rax, rbx, rcx, rdx),
122 
123 	TP_STRUCT__entry(
124 		__field(	unsigned int,	function	)
125 		__field(	unsigned long,	rax		)
126 		__field(	unsigned long,	rbx		)
127 		__field(	unsigned long,	rcx		)
128 		__field(	unsigned long,	rdx		)
129 	),
130 
131 	TP_fast_assign(
132 		__entry->function	= function;
133 		__entry->rax		= rax;
134 		__entry->rbx		= rbx;
135 		__entry->rcx		= rcx;
136 		__entry->rdx		= rdx;
137 	),
138 
139 	TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
140 		  __entry->function, __entry->rax,
141 		  __entry->rbx, __entry->rcx, __entry->rdx)
142 );
143 
144 #define AREG(x) { APIC_##x, "APIC_" #x }
145 
146 #define kvm_trace_symbol_apic						    \
147 	AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI),    \
148 	AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR),  \
149 	AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
150 	AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR),   \
151 	AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT),  \
152 	AREG(ECTRL)
153 /*
154  * Tracepoint for apic access.
155  */
156 TRACE_EVENT(kvm_apic,
157 	TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
158 	TP_ARGS(rw, reg, val),
159 
160 	TP_STRUCT__entry(
161 		__field(	unsigned int,	rw		)
162 		__field(	unsigned int,	reg		)
163 		__field(	unsigned int,	val		)
164 	),
165 
166 	TP_fast_assign(
167 		__entry->rw		= rw;
168 		__entry->reg		= reg;
169 		__entry->val		= val;
170 	),
171 
172 	TP_printk("apic_%s %s = 0x%x",
173 		  __entry->rw ? "write" : "read",
174 		  __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
175 		  __entry->val)
176 );
177 
178 #define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
179 #define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
180 
181 #define KVM_ISA_VMX   1
182 #define KVM_ISA_SVM   2
183 
184 /*
185  * Tracepoint for kvm guest exit:
186  */
187 TRACE_EVENT(kvm_exit,
188 	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
189 	TP_ARGS(exit_reason, vcpu, isa),
190 
191 	TP_STRUCT__entry(
192 		__field(	unsigned int,	exit_reason	)
193 		__field(	unsigned long,	guest_rip	)
194 		__field(	u32,	        isa             )
195 		__field(	u64,	        info1           )
196 		__field(	u64,	        info2           )
197 	),
198 
199 	TP_fast_assign(
200 		__entry->exit_reason	= exit_reason;
201 		__entry->guest_rip	= kvm_rip_read(vcpu);
202 		__entry->isa            = isa;
203 		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
204 					   &__entry->info2);
205 	),
206 
207 	TP_printk("reason %s rip 0x%lx info %llx %llx",
208 		 ftrace_print_symbols_seq(p, __entry->exit_reason,
209 					  kvm_x86_ops->exit_reasons_str),
210 		 __entry->guest_rip, __entry->info1, __entry->info2)
211 );
212 
213 /*
214  * Tracepoint for kvm interrupt injection:
215  */
216 TRACE_EVENT(kvm_inj_virq,
217 	TP_PROTO(unsigned int irq),
218 	TP_ARGS(irq),
219 
220 	TP_STRUCT__entry(
221 		__field(	unsigned int,	irq		)
222 	),
223 
224 	TP_fast_assign(
225 		__entry->irq		= irq;
226 	),
227 
228 	TP_printk("irq %u", __entry->irq)
229 );
230 
231 #define EXS(x) { x##_VECTOR, "#" #x }
232 
233 #define kvm_trace_sym_exc						\
234 	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
235 	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
236 	EXS(MF), EXS(MC)
237 
238 /*
239  * Tracepoint for kvm interrupt injection:
240  */
241 TRACE_EVENT(kvm_inj_exception,
242 	TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
243 	TP_ARGS(exception, has_error, error_code),
244 
245 	TP_STRUCT__entry(
246 		__field(	u8,	exception	)
247 		__field(	u8,	has_error	)
248 		__field(	u32,	error_code	)
249 	),
250 
251 	TP_fast_assign(
252 		__entry->exception	= exception;
253 		__entry->has_error	= has_error;
254 		__entry->error_code	= error_code;
255 	),
256 
257 	TP_printk("%s (0x%x)",
258 		  __print_symbolic(__entry->exception, kvm_trace_sym_exc),
259 		  /* FIXME: don't print error_code if not present */
260 		  __entry->has_error ? __entry->error_code : 0)
261 );
262 
263 /*
264  * Tracepoint for page fault.
265  */
266 TRACE_EVENT(kvm_page_fault,
267 	TP_PROTO(unsigned long fault_address, unsigned int error_code),
268 	TP_ARGS(fault_address, error_code),
269 
270 	TP_STRUCT__entry(
271 		__field(	unsigned long,	fault_address	)
272 		__field(	unsigned int,	error_code	)
273 	),
274 
275 	TP_fast_assign(
276 		__entry->fault_address	= fault_address;
277 		__entry->error_code	= error_code;
278 	),
279 
280 	TP_printk("address %lx error_code %x",
281 		  __entry->fault_address, __entry->error_code)
282 );
283 
284 /*
285  * Tracepoint for guest MSR access.
286  */
287 TRACE_EVENT(kvm_msr,
288 	TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
289 	TP_ARGS(write, ecx, data, exception),
290 
291 	TP_STRUCT__entry(
292 		__field(	unsigned,	write		)
293 		__field(	u32,		ecx		)
294 		__field(	u64,		data		)
295 		__field(	u8,		exception	)
296 	),
297 
298 	TP_fast_assign(
299 		__entry->write		= write;
300 		__entry->ecx		= ecx;
301 		__entry->data		= data;
302 		__entry->exception	= exception;
303 	),
304 
305 	TP_printk("msr_%s %x = 0x%llx%s",
306 		  __entry->write ? "write" : "read",
307 		  __entry->ecx, __entry->data,
308 		  __entry->exception ? " (#GP)" : "")
309 );
310 
311 #define trace_kvm_msr_read(ecx, data)      trace_kvm_msr(0, ecx, data, false)
312 #define trace_kvm_msr_write(ecx, data)     trace_kvm_msr(1, ecx, data, false)
313 #define trace_kvm_msr_read_ex(ecx)         trace_kvm_msr(0, ecx, 0, true)
314 #define trace_kvm_msr_write_ex(ecx, data)  trace_kvm_msr(1, ecx, data, true)
315 
316 /*
317  * Tracepoint for guest CR access.
318  */
319 TRACE_EVENT(kvm_cr,
320 	TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
321 	TP_ARGS(rw, cr, val),
322 
323 	TP_STRUCT__entry(
324 		__field(	unsigned int,	rw		)
325 		__field(	unsigned int,	cr		)
326 		__field(	unsigned long,	val		)
327 	),
328 
329 	TP_fast_assign(
330 		__entry->rw		= rw;
331 		__entry->cr		= cr;
332 		__entry->val		= val;
333 	),
334 
335 	TP_printk("cr_%s %x = 0x%lx",
336 		  __entry->rw ? "write" : "read",
337 		  __entry->cr, __entry->val)
338 );
339 
340 #define trace_kvm_cr_read(cr, val)		trace_kvm_cr(0, cr, val)
341 #define trace_kvm_cr_write(cr, val)		trace_kvm_cr(1, cr, val)
342 
343 TRACE_EVENT(kvm_pic_set_irq,
344 	    TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
345 	    TP_ARGS(chip, pin, elcr, imr, coalesced),
346 
347 	TP_STRUCT__entry(
348 		__field(	__u8,		chip		)
349 		__field(	__u8,		pin		)
350 		__field(	__u8,		elcr		)
351 		__field(	__u8,		imr		)
352 		__field(	bool,		coalesced	)
353 	),
354 
355 	TP_fast_assign(
356 		__entry->chip		= chip;
357 		__entry->pin		= pin;
358 		__entry->elcr		= elcr;
359 		__entry->imr		= imr;
360 		__entry->coalesced	= coalesced;
361 	),
362 
363 	TP_printk("chip %u pin %u (%s%s)%s",
364 		  __entry->chip, __entry->pin,
365 		  (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
366 		  (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
367 		  __entry->coalesced ? " (coalesced)" : "")
368 );
369 
370 #define kvm_apic_dst_shorthand		\
371 	{0x0, "dst"},			\
372 	{0x1, "self"},			\
373 	{0x2, "all"},			\
374 	{0x3, "all-but-self"}
375 
376 TRACE_EVENT(kvm_apic_ipi,
377 	    TP_PROTO(__u32 icr_low, __u32 dest_id),
378 	    TP_ARGS(icr_low, dest_id),
379 
380 	TP_STRUCT__entry(
381 		__field(	__u32,		icr_low		)
382 		__field(	__u32,		dest_id		)
383 	),
384 
385 	TP_fast_assign(
386 		__entry->icr_low	= icr_low;
387 		__entry->dest_id	= dest_id;
388 	),
389 
390 	TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
391 		  __entry->dest_id, (u8)__entry->icr_low,
392 		  __print_symbolic((__entry->icr_low >> 8 & 0x7),
393 				   kvm_deliver_mode),
394 		  (__entry->icr_low & (1<<11)) ? "logical" : "physical",
395 		  (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
396 		  (__entry->icr_low & (1<<15)) ? "level" : "edge",
397 		  __print_symbolic((__entry->icr_low >> 18 & 0x3),
398 				   kvm_apic_dst_shorthand))
399 );
400 
401 TRACE_EVENT(kvm_apic_accept_irq,
402 	    TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
403 	    TP_ARGS(apicid, dm, tm, vec, coalesced),
404 
405 	TP_STRUCT__entry(
406 		__field(	__u32,		apicid		)
407 		__field(	__u16,		dm		)
408 		__field(	__u8,		tm		)
409 		__field(	__u8,		vec		)
410 		__field(	bool,		coalesced	)
411 	),
412 
413 	TP_fast_assign(
414 		__entry->apicid		= apicid;
415 		__entry->dm		= dm;
416 		__entry->tm		= tm;
417 		__entry->vec		= vec;
418 		__entry->coalesced	= coalesced;
419 	),
420 
421 	TP_printk("apicid %x vec %u (%s|%s)%s",
422 		  __entry->apicid, __entry->vec,
423 		  __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
424 		  __entry->tm ? "level" : "edge",
425 		  __entry->coalesced ? " (coalesced)" : "")
426 );
427 
428 /*
429  * Tracepoint for nested VMRUN
430  */
431 TRACE_EVENT(kvm_nested_vmrun,
432 	    TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
433 		     __u32 event_inj, bool npt),
434 	    TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
435 
436 	TP_STRUCT__entry(
437 		__field(	__u64,		rip		)
438 		__field(	__u64,		vmcb		)
439 		__field(	__u64,		nested_rip	)
440 		__field(	__u32,		int_ctl		)
441 		__field(	__u32,		event_inj	)
442 		__field(	bool,		npt		)
443 	),
444 
445 	TP_fast_assign(
446 		__entry->rip		= rip;
447 		__entry->vmcb		= vmcb;
448 		__entry->nested_rip	= nested_rip;
449 		__entry->int_ctl	= int_ctl;
450 		__entry->event_inj	= event_inj;
451 		__entry->npt		= npt;
452 	),
453 
454 	TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
455 		  "event_inj: 0x%08x npt: %s",
456 		__entry->rip, __entry->vmcb, __entry->nested_rip,
457 		__entry->int_ctl, __entry->event_inj,
458 		__entry->npt ? "on" : "off")
459 );
460 
461 TRACE_EVENT(kvm_nested_intercepts,
462 	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
463 	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
464 
465 	TP_STRUCT__entry(
466 		__field(	__u16,		cr_read		)
467 		__field(	__u16,		cr_write	)
468 		__field(	__u32,		exceptions	)
469 		__field(	__u64,		intercept	)
470 	),
471 
472 	TP_fast_assign(
473 		__entry->cr_read	= cr_read;
474 		__entry->cr_write	= cr_write;
475 		__entry->exceptions	= exceptions;
476 		__entry->intercept	= intercept;
477 	),
478 
479 	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
480 		__entry->cr_read, __entry->cr_write, __entry->exceptions,
481 		__entry->intercept)
482 );
483 /*
484  * Tracepoint for #VMEXIT while nested
485  */
486 TRACE_EVENT(kvm_nested_vmexit,
487 	    TP_PROTO(__u64 rip, __u32 exit_code,
488 		     __u64 exit_info1, __u64 exit_info2,
489 		     __u32 exit_int_info, __u32 exit_int_info_err),
490 	    TP_ARGS(rip, exit_code, exit_info1, exit_info2,
491 		    exit_int_info, exit_int_info_err),
492 
493 	TP_STRUCT__entry(
494 		__field(	__u64,		rip			)
495 		__field(	__u32,		exit_code		)
496 		__field(	__u64,		exit_info1		)
497 		__field(	__u64,		exit_info2		)
498 		__field(	__u32,		exit_int_info		)
499 		__field(	__u32,		exit_int_info_err	)
500 	),
501 
502 	TP_fast_assign(
503 		__entry->rip			= rip;
504 		__entry->exit_code		= exit_code;
505 		__entry->exit_info1		= exit_info1;
506 		__entry->exit_info2		= exit_info2;
507 		__entry->exit_int_info		= exit_int_info;
508 		__entry->exit_int_info_err	= exit_int_info_err;
509 	),
510 	TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
511 		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
512 		  __entry->rip,
513 		  ftrace_print_symbols_seq(p, __entry->exit_code,
514 					   kvm_x86_ops->exit_reasons_str),
515 		  __entry->exit_info1, __entry->exit_info2,
516 		  __entry->exit_int_info, __entry->exit_int_info_err)
517 );
518 
519 /*
520  * Tracepoint for #VMEXIT reinjected to the guest
521  */
522 TRACE_EVENT(kvm_nested_vmexit_inject,
523 	    TP_PROTO(__u32 exit_code,
524 		     __u64 exit_info1, __u64 exit_info2,
525 		     __u32 exit_int_info, __u32 exit_int_info_err),
526 	    TP_ARGS(exit_code, exit_info1, exit_info2,
527 		    exit_int_info, exit_int_info_err),
528 
529 	TP_STRUCT__entry(
530 		__field(	__u32,		exit_code		)
531 		__field(	__u64,		exit_info1		)
532 		__field(	__u64,		exit_info2		)
533 		__field(	__u32,		exit_int_info		)
534 		__field(	__u32,		exit_int_info_err	)
535 	),
536 
537 	TP_fast_assign(
538 		__entry->exit_code		= exit_code;
539 		__entry->exit_info1		= exit_info1;
540 		__entry->exit_info2		= exit_info2;
541 		__entry->exit_int_info		= exit_int_info;
542 		__entry->exit_int_info_err	= exit_int_info_err;
543 	),
544 
545 	TP_printk("reason: %s ext_inf1: 0x%016llx "
546 		  "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
547 		  ftrace_print_symbols_seq(p, __entry->exit_code,
548 					   kvm_x86_ops->exit_reasons_str),
549 		__entry->exit_info1, __entry->exit_info2,
550 		__entry->exit_int_info, __entry->exit_int_info_err)
551 );
552 
553 /*
554  * Tracepoint for nested #vmexit because of interrupt pending
555  */
556 TRACE_EVENT(kvm_nested_intr_vmexit,
557 	    TP_PROTO(__u64 rip),
558 	    TP_ARGS(rip),
559 
560 	TP_STRUCT__entry(
561 		__field(	__u64,	rip	)
562 	),
563 
564 	TP_fast_assign(
565 		__entry->rip	=	rip
566 	),
567 
568 	TP_printk("rip: 0x%016llx", __entry->rip)
569 );
570 
571 /*
572  * Tracepoint for nested #vmexit because of interrupt pending
573  */
574 TRACE_EVENT(kvm_invlpga,
575 	    TP_PROTO(__u64 rip, int asid, u64 address),
576 	    TP_ARGS(rip, asid, address),
577 
578 	TP_STRUCT__entry(
579 		__field(	__u64,	rip	)
580 		__field(	int,	asid	)
581 		__field(	__u64,	address	)
582 	),
583 
584 	TP_fast_assign(
585 		__entry->rip		=	rip;
586 		__entry->asid		=	asid;
587 		__entry->address	=	address;
588 	),
589 
590 	TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
591 		  __entry->rip, __entry->asid, __entry->address)
592 );
593 
594 /*
595  * Tracepoint for nested #vmexit because of interrupt pending
596  */
597 TRACE_EVENT(kvm_skinit,
598 	    TP_PROTO(__u64 rip, __u32 slb),
599 	    TP_ARGS(rip, slb),
600 
601 	TP_STRUCT__entry(
602 		__field(	__u64,	rip	)
603 		__field(	__u32,	slb	)
604 	),
605 
606 	TP_fast_assign(
607 		__entry->rip		=	rip;
608 		__entry->slb		=	slb;
609 	),
610 
611 	TP_printk("rip: 0x%016llx slb: 0x%08x",
612 		  __entry->rip, __entry->slb)
613 );
614 
615 #define __print_insn(insn, ilen) ({		                 \
616 	int i;							 \
617 	const char *ret = p->buffer + p->len;			 \
618 								 \
619 	for (i = 0; i < ilen; ++i)				 \
620 		trace_seq_printf(p, " %02x", insn[i]);		 \
621 	trace_seq_printf(p, "%c", 0);				 \
622 	ret;							 \
623 	})
624 
625 #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
626 #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
627 #define KVM_EMUL_INSN_F_CS_D   (1 << 2)
628 #define KVM_EMUL_INSN_F_CS_L   (1 << 3)
629 
630 #define kvm_trace_symbol_emul_flags	                  \
631 	{ 0,   			    "real" },		  \
632 	{ KVM_EMUL_INSN_F_CR0_PE			  \
633 	  | KVM_EMUL_INSN_F_EFL_VM, "vm16" },		  \
634 	{ KVM_EMUL_INSN_F_CR0_PE,   "prot16" },		  \
635 	{ KVM_EMUL_INSN_F_CR0_PE			  \
636 	  | KVM_EMUL_INSN_F_CS_D,   "prot32" },		  \
637 	{ KVM_EMUL_INSN_F_CR0_PE			  \
638 	  | KVM_EMUL_INSN_F_CS_L,   "prot64" }
639 
640 #define kei_decode_mode(mode) ({			\
641 	u8 flags = 0xff;				\
642 	switch (mode) {					\
643 	case X86EMUL_MODE_REAL:				\
644 		flags = 0;				\
645 		break;					\
646 	case X86EMUL_MODE_VM86:				\
647 		flags = KVM_EMUL_INSN_F_EFL_VM;		\
648 		break;					\
649 	case X86EMUL_MODE_PROT16:			\
650 		flags = KVM_EMUL_INSN_F_CR0_PE;		\
651 		break;					\
652 	case X86EMUL_MODE_PROT32:			\
653 		flags = KVM_EMUL_INSN_F_CR0_PE		\
654 			| KVM_EMUL_INSN_F_CS_D;		\
655 		break;					\
656 	case X86EMUL_MODE_PROT64:			\
657 		flags = KVM_EMUL_INSN_F_CR0_PE		\
658 			| KVM_EMUL_INSN_F_CS_L;		\
659 		break;					\
660 	}						\
661 	flags;						\
662 	})
663 
664 TRACE_EVENT(kvm_emulate_insn,
665 	TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
666 	TP_ARGS(vcpu, failed),
667 
668 	TP_STRUCT__entry(
669 		__field(    __u64, rip                       )
670 		__field(    __u32, csbase                    )
671 		__field(    __u8,  len                       )
672 		__array(    __u8,  insn,    15	             )
673 		__field(    __u8,  flags       	   	     )
674 		__field(    __u8,  failed                    )
675 		),
676 
677 	TP_fast_assign(
678 		__entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
679 		__entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
680 		__entry->len = vcpu->arch.emulate_ctxt._eip
681 			       - vcpu->arch.emulate_ctxt.fetch.start;
682 		memcpy(__entry->insn,
683 		       vcpu->arch.emulate_ctxt.fetch.data,
684 		       15);
685 		__entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
686 		__entry->failed = failed;
687 		),
688 
689 	TP_printk("%x:%llx:%s (%s)%s",
690 		  __entry->csbase, __entry->rip,
691 		  __print_insn(__entry->insn, __entry->len),
692 		  __print_symbolic(__entry->flags,
693 				   kvm_trace_symbol_emul_flags),
694 		  __entry->failed ? " failed" : ""
695 		)
696 	);
697 
698 #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
699 #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
700 
701 TRACE_EVENT(
702 	vcpu_match_mmio,
703 	TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
704 	TP_ARGS(gva, gpa, write, gpa_match),
705 
706 	TP_STRUCT__entry(
707 		__field(gva_t, gva)
708 		__field(gpa_t, gpa)
709 		__field(bool, write)
710 		__field(bool, gpa_match)
711 		),
712 
713 	TP_fast_assign(
714 		__entry->gva = gva;
715 		__entry->gpa = gpa;
716 		__entry->write = write;
717 		__entry->gpa_match = gpa_match
718 		),
719 
720 	TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
721 		  __entry->write ? "Write" : "Read",
722 		  __entry->gpa_match ? "GPA" : "GVA")
723 );
724 #endif /* _TRACE_KVM_H */
725 
726 #undef TRACE_INCLUDE_PATH
727 #define TRACE_INCLUDE_PATH arch/x86/kvm
728 #undef TRACE_INCLUDE_FILE
729 #define TRACE_INCLUDE_FILE trace
730 
731 /* This part must be outside protection */
732 #include <trace/define_trace.h>
733