trace.h 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  3. #define _TRACE_KVM_H
  4. #include <linux/tracepoint.h>
  5. #include <asm/vmx.h>
  6. #include <asm/svm.h>
  7. #include <asm/clocksource.h>
  8. #include <asm/pvclock-abi.h>
  9. #undef TRACE_SYSTEM
  10. #define TRACE_SYSTEM kvm
  11. /*
  12. * Tracepoint for guest mode entry.
  13. */
  14. TRACE_EVENT(kvm_entry,
  15. TP_PROTO(unsigned int vcpu_id),
  16. TP_ARGS(vcpu_id),
  17. TP_STRUCT__entry(
  18. __field( unsigned int, vcpu_id )
  19. ),
  20. TP_fast_assign(
  21. __entry->vcpu_id = vcpu_id;
  22. ),
  23. TP_printk("vcpu %u", __entry->vcpu_id)
  24. );
  25. /*
  26. * Tracepoint for hypercall.
  27. */
  28. TRACE_EVENT(kvm_hypercall,
  29. TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
  30. unsigned long a2, unsigned long a3),
  31. TP_ARGS(nr, a0, a1, a2, a3),
  32. TP_STRUCT__entry(
  33. __field( unsigned long, nr )
  34. __field( unsigned long, a0 )
  35. __field( unsigned long, a1 )
  36. __field( unsigned long, a2 )
  37. __field( unsigned long, a3 )
  38. ),
  39. TP_fast_assign(
  40. __entry->nr = nr;
  41. __entry->a0 = a0;
  42. __entry->a1 = a1;
  43. __entry->a2 = a2;
  44. __entry->a3 = a3;
  45. ),
  46. TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
  47. __entry->nr, __entry->a0, __entry->a1, __entry->a2,
  48. __entry->a3)
  49. );
  50. /*
  51. * Tracepoint for hypercall.
  52. */
  53. TRACE_EVENT(kvm_hv_hypercall,
  54. TP_PROTO(__u16 code, bool fast, __u16 rep_cnt, __u16 rep_idx,
  55. __u64 ingpa, __u64 outgpa),
  56. TP_ARGS(code, fast, rep_cnt, rep_idx, ingpa, outgpa),
  57. TP_STRUCT__entry(
  58. __field( __u16, rep_cnt )
  59. __field( __u16, rep_idx )
  60. __field( __u64, ingpa )
  61. __field( __u64, outgpa )
  62. __field( __u16, code )
  63. __field( bool, fast )
  64. ),
  65. TP_fast_assign(
  66. __entry->rep_cnt = rep_cnt;
  67. __entry->rep_idx = rep_idx;
  68. __entry->ingpa = ingpa;
  69. __entry->outgpa = outgpa;
  70. __entry->code = code;
  71. __entry->fast = fast;
  72. ),
  73. TP_printk("code 0x%x %s cnt 0x%x idx 0x%x in 0x%llx out 0x%llx",
  74. __entry->code, __entry->fast ? "fast" : "slow",
  75. __entry->rep_cnt, __entry->rep_idx, __entry->ingpa,
  76. __entry->outgpa)
  77. );
  78. /*
  79. * Tracepoint for PIO.
  80. */
  81. #define KVM_PIO_IN 0
  82. #define KVM_PIO_OUT 1
  83. TRACE_EVENT(kvm_pio,
  84. TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
  85. unsigned int count, void *data),
  86. TP_ARGS(rw, port, size, count, data),
  87. TP_STRUCT__entry(
  88. __field( unsigned int, rw )
  89. __field( unsigned int, port )
  90. __field( unsigned int, size )
  91. __field( unsigned int, count )
  92. __field( unsigned int, val )
  93. ),
  94. TP_fast_assign(
  95. __entry->rw = rw;
  96. __entry->port = port;
  97. __entry->size = size;
  98. __entry->count = count;
  99. if (size == 1)
  100. __entry->val = *(unsigned char *)data;
  101. else if (size == 2)
  102. __entry->val = *(unsigned short *)data;
  103. else
  104. __entry->val = *(unsigned int *)data;
  105. ),
  106. TP_printk("pio_%s at 0x%x size %d count %d val 0x%x %s",
  107. __entry->rw ? "write" : "read",
  108. __entry->port, __entry->size, __entry->count, __entry->val,
  109. __entry->count > 1 ? "(...)" : "")
  110. );
  111. /*
  112. * Tracepoint for fast mmio.
  113. */
  114. TRACE_EVENT(kvm_fast_mmio,
  115. TP_PROTO(u64 gpa),
  116. TP_ARGS(gpa),
  117. TP_STRUCT__entry(
  118. __field(u64, gpa)
  119. ),
  120. TP_fast_assign(
  121. __entry->gpa = gpa;
  122. ),
  123. TP_printk("fast mmio at gpa 0x%llx", __entry->gpa)
  124. );
  125. /*
  126. * Tracepoint for cpuid.
  127. */
  128. TRACE_EVENT(kvm_cpuid,
  129. TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
  130. unsigned long rcx, unsigned long rdx, bool found),
  131. TP_ARGS(function, rax, rbx, rcx, rdx, found),
  132. TP_STRUCT__entry(
  133. __field( unsigned int, function )
  134. __field( unsigned long, rax )
  135. __field( unsigned long, rbx )
  136. __field( unsigned long, rcx )
  137. __field( unsigned long, rdx )
  138. __field( bool, found )
  139. ),
  140. TP_fast_assign(
  141. __entry->function = function;
  142. __entry->rax = rax;
  143. __entry->rbx = rbx;
  144. __entry->rcx = rcx;
  145. __entry->rdx = rdx;
  146. __entry->found = found;
  147. ),
  148. TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s",
  149. __entry->function, __entry->rax,
  150. __entry->rbx, __entry->rcx, __entry->rdx,
  151. __entry->found ? "found" : "not found")
  152. );
  153. #define AREG(x) { APIC_##x, "APIC_" #x }
  154. #define kvm_trace_symbol_apic \
  155. AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \
  156. AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \
  157. AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
  158. AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \
  159. AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \
  160. AREG(ECTRL)
  161. /*
  162. * Tracepoint for apic access.
  163. */
  164. TRACE_EVENT(kvm_apic,
  165. TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
  166. TP_ARGS(rw, reg, val),
  167. TP_STRUCT__entry(
  168. __field( unsigned int, rw )
  169. __field( unsigned int, reg )
  170. __field( unsigned int, val )
  171. ),
  172. TP_fast_assign(
  173. __entry->rw = rw;
  174. __entry->reg = reg;
  175. __entry->val = val;
  176. ),
  177. TP_printk("apic_%s %s = 0x%x",
  178. __entry->rw ? "write" : "read",
  179. __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
  180. __entry->val)
  181. );
  182. #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
  183. #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
  184. #define KVM_ISA_VMX 1
  185. #define KVM_ISA_SVM 2
  186. /*
  187. * Tracepoint for kvm guest exit:
  188. */
  189. TRACE_EVENT(kvm_exit,
  190. TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
  191. TP_ARGS(exit_reason, vcpu, isa),
  192. TP_STRUCT__entry(
  193. __field( unsigned int, exit_reason )
  194. __field( unsigned long, guest_rip )
  195. __field( u32, isa )
  196. __field( u64, info1 )
  197. __field( u64, info2 )
  198. ),
  199. TP_fast_assign(
  200. __entry->exit_reason = exit_reason;
  201. __entry->guest_rip = kvm_rip_read(vcpu);
  202. __entry->isa = isa;
  203. kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
  204. &__entry->info2);
  205. ),
  206. TP_printk("reason %s rip 0x%lx info %llx %llx",
  207. (__entry->isa == KVM_ISA_VMX) ?
  208. __print_symbolic(__entry->exit_reason, VMX_EXIT_REASONS) :
  209. __print_symbolic(__entry->exit_reason, SVM_EXIT_REASONS),
  210. __entry->guest_rip, __entry->info1, __entry->info2)
  211. );
  212. /*
  213. * Tracepoint for kvm interrupt injection:
  214. */
  215. TRACE_EVENT(kvm_inj_virq,
  216. TP_PROTO(unsigned int irq),
  217. TP_ARGS(irq),
  218. TP_STRUCT__entry(
  219. __field( unsigned int, irq )
  220. ),
  221. TP_fast_assign(
  222. __entry->irq = irq;
  223. ),
  224. TP_printk("irq %u", __entry->irq)
  225. );
  226. #define EXS(x) { x##_VECTOR, "#" #x }
  227. #define kvm_trace_sym_exc \
  228. EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
  229. EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
  230. EXS(MF), EXS(AC), EXS(MC)
  231. /*
  232. * Tracepoint for kvm interrupt injection:
  233. */
  234. TRACE_EVENT(kvm_inj_exception,
  235. TP_PROTO(unsigned exception, bool has_error, unsigned error_code),
  236. TP_ARGS(exception, has_error, error_code),
  237. TP_STRUCT__entry(
  238. __field( u8, exception )
  239. __field( u8, has_error )
  240. __field( u32, error_code )
  241. ),
  242. TP_fast_assign(
  243. __entry->exception = exception;
  244. __entry->has_error = has_error;
  245. __entry->error_code = error_code;
  246. ),
  247. TP_printk("%s (0x%x)",
  248. __print_symbolic(__entry->exception, kvm_trace_sym_exc),
  249. /* FIXME: don't print error_code if not present */
  250. __entry->has_error ? __entry->error_code : 0)
  251. );
  252. /*
  253. * Tracepoint for page fault.
  254. */
  255. TRACE_EVENT(kvm_page_fault,
  256. TP_PROTO(unsigned long fault_address, unsigned int error_code),
  257. TP_ARGS(fault_address, error_code),
  258. TP_STRUCT__entry(
  259. __field( unsigned long, fault_address )
  260. __field( unsigned int, error_code )
  261. ),
  262. TP_fast_assign(
  263. __entry->fault_address = fault_address;
  264. __entry->error_code = error_code;
  265. ),
  266. TP_printk("address %lx error_code %x",
  267. __entry->fault_address, __entry->error_code)
  268. );
  269. /*
  270. * Tracepoint for guest MSR access.
  271. */
  272. TRACE_EVENT(kvm_msr,
  273. TP_PROTO(unsigned write, u32 ecx, u64 data, bool exception),
  274. TP_ARGS(write, ecx, data, exception),
  275. TP_STRUCT__entry(
  276. __field( unsigned, write )
  277. __field( u32, ecx )
  278. __field( u64, data )
  279. __field( u8, exception )
  280. ),
  281. TP_fast_assign(
  282. __entry->write = write;
  283. __entry->ecx = ecx;
  284. __entry->data = data;
  285. __entry->exception = exception;
  286. ),
  287. TP_printk("msr_%s %x = 0x%llx%s",
  288. __entry->write ? "write" : "read",
  289. __entry->ecx, __entry->data,
  290. __entry->exception ? " (#GP)" : "")
  291. );
  292. #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data, false)
  293. #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data, false)
  294. #define trace_kvm_msr_read_ex(ecx) trace_kvm_msr(0, ecx, 0, true)
  295. #define trace_kvm_msr_write_ex(ecx, data) trace_kvm_msr(1, ecx, data, true)
  296. /*
  297. * Tracepoint for guest CR access.
  298. */
  299. TRACE_EVENT(kvm_cr,
  300. TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
  301. TP_ARGS(rw, cr, val),
  302. TP_STRUCT__entry(
  303. __field( unsigned int, rw )
  304. __field( unsigned int, cr )
  305. __field( unsigned long, val )
  306. ),
  307. TP_fast_assign(
  308. __entry->rw = rw;
  309. __entry->cr = cr;
  310. __entry->val = val;
  311. ),
  312. TP_printk("cr_%s %x = 0x%lx",
  313. __entry->rw ? "write" : "read",
  314. __entry->cr, __entry->val)
  315. );
  316. #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
  317. #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
  318. TRACE_EVENT(kvm_pic_set_irq,
  319. TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
  320. TP_ARGS(chip, pin, elcr, imr, coalesced),
  321. TP_STRUCT__entry(
  322. __field( __u8, chip )
  323. __field( __u8, pin )
  324. __field( __u8, elcr )
  325. __field( __u8, imr )
  326. __field( bool, coalesced )
  327. ),
  328. TP_fast_assign(
  329. __entry->chip = chip;
  330. __entry->pin = pin;
  331. __entry->elcr = elcr;
  332. __entry->imr = imr;
  333. __entry->coalesced = coalesced;
  334. ),
  335. TP_printk("chip %u pin %u (%s%s)%s",
  336. __entry->chip, __entry->pin,
  337. (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
  338. (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
  339. __entry->coalesced ? " (coalesced)" : "")
  340. );
  341. #define kvm_apic_dst_shorthand \
  342. {0x0, "dst"}, \
  343. {0x1, "self"}, \
  344. {0x2, "all"}, \
  345. {0x3, "all-but-self"}
  346. TRACE_EVENT(kvm_apic_ipi,
  347. TP_PROTO(__u32 icr_low, __u32 dest_id),
  348. TP_ARGS(icr_low, dest_id),
  349. TP_STRUCT__entry(
  350. __field( __u32, icr_low )
  351. __field( __u32, dest_id )
  352. ),
  353. TP_fast_assign(
  354. __entry->icr_low = icr_low;
  355. __entry->dest_id = dest_id;
  356. ),
  357. TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
  358. __entry->dest_id, (u8)__entry->icr_low,
  359. __print_symbolic((__entry->icr_low >> 8 & 0x7),
  360. kvm_deliver_mode),
  361. (__entry->icr_low & (1<<11)) ? "logical" : "physical",
  362. (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
  363. (__entry->icr_low & (1<<15)) ? "level" : "edge",
  364. __print_symbolic((__entry->icr_low >> 18 & 0x3),
  365. kvm_apic_dst_shorthand))
  366. );
  367. TRACE_EVENT(kvm_apic_accept_irq,
  368. TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
  369. TP_ARGS(apicid, dm, tm, vec),
  370. TP_STRUCT__entry(
  371. __field( __u32, apicid )
  372. __field( __u16, dm )
  373. __field( __u16, tm )
  374. __field( __u8, vec )
  375. ),
  376. TP_fast_assign(
  377. __entry->apicid = apicid;
  378. __entry->dm = dm;
  379. __entry->tm = tm;
  380. __entry->vec = vec;
  381. ),
  382. TP_printk("apicid %x vec %u (%s|%s)",
  383. __entry->apicid, __entry->vec,
  384. __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
  385. __entry->tm ? "level" : "edge")
  386. );
  387. TRACE_EVENT(kvm_eoi,
  388. TP_PROTO(struct kvm_lapic *apic, int vector),
  389. TP_ARGS(apic, vector),
  390. TP_STRUCT__entry(
  391. __field( __u32, apicid )
  392. __field( int, vector )
  393. ),
  394. TP_fast_assign(
  395. __entry->apicid = apic->vcpu->vcpu_id;
  396. __entry->vector = vector;
  397. ),
  398. TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
  399. );
  400. TRACE_EVENT(kvm_pv_eoi,
  401. TP_PROTO(struct kvm_lapic *apic, int vector),
  402. TP_ARGS(apic, vector),
  403. TP_STRUCT__entry(
  404. __field( __u32, apicid )
  405. __field( int, vector )
  406. ),
  407. TP_fast_assign(
  408. __entry->apicid = apic->vcpu->vcpu_id;
  409. __entry->vector = vector;
  410. ),
  411. TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
  412. );
  413. /*
  414. * Tracepoint for nested VMRUN
  415. */
  416. TRACE_EVENT(kvm_nested_vmrun,
  417. TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
  418. __u32 event_inj, bool npt),
  419. TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
  420. TP_STRUCT__entry(
  421. __field( __u64, rip )
  422. __field( __u64, vmcb )
  423. __field( __u64, nested_rip )
  424. __field( __u32, int_ctl )
  425. __field( __u32, event_inj )
  426. __field( bool, npt )
  427. ),
  428. TP_fast_assign(
  429. __entry->rip = rip;
  430. __entry->vmcb = vmcb;
  431. __entry->nested_rip = nested_rip;
  432. __entry->int_ctl = int_ctl;
  433. __entry->event_inj = event_inj;
  434. __entry->npt = npt;
  435. ),
  436. TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
  437. "event_inj: 0x%08x npt: %s",
  438. __entry->rip, __entry->vmcb, __entry->nested_rip,
  439. __entry->int_ctl, __entry->event_inj,
  440. __entry->npt ? "on" : "off")
  441. );
  442. TRACE_EVENT(kvm_nested_intercepts,
  443. TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
  444. TP_ARGS(cr_read, cr_write, exceptions, intercept),
  445. TP_STRUCT__entry(
  446. __field( __u16, cr_read )
  447. __field( __u16, cr_write )
  448. __field( __u32, exceptions )
  449. __field( __u64, intercept )
  450. ),
  451. TP_fast_assign(
  452. __entry->cr_read = cr_read;
  453. __entry->cr_write = cr_write;
  454. __entry->exceptions = exceptions;
  455. __entry->intercept = intercept;
  456. ),
  457. TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
  458. __entry->cr_read, __entry->cr_write, __entry->exceptions,
  459. __entry->intercept)
  460. );
  461. /*
  462. * Tracepoint for #VMEXIT while nested
  463. */
  464. TRACE_EVENT(kvm_nested_vmexit,
  465. TP_PROTO(__u64 rip, __u32 exit_code,
  466. __u64 exit_info1, __u64 exit_info2,
  467. __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
  468. TP_ARGS(rip, exit_code, exit_info1, exit_info2,
  469. exit_int_info, exit_int_info_err, isa),
  470. TP_STRUCT__entry(
  471. __field( __u64, rip )
  472. __field( __u32, exit_code )
  473. __field( __u64, exit_info1 )
  474. __field( __u64, exit_info2 )
  475. __field( __u32, exit_int_info )
  476. __field( __u32, exit_int_info_err )
  477. __field( __u32, isa )
  478. ),
  479. TP_fast_assign(
  480. __entry->rip = rip;
  481. __entry->exit_code = exit_code;
  482. __entry->exit_info1 = exit_info1;
  483. __entry->exit_info2 = exit_info2;
  484. __entry->exit_int_info = exit_int_info;
  485. __entry->exit_int_info_err = exit_int_info_err;
  486. __entry->isa = isa;
  487. ),
  488. TP_printk("rip: 0x%016llx reason: %s ext_inf1: 0x%016llx "
  489. "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
  490. __entry->rip,
  491. (__entry->isa == KVM_ISA_VMX) ?
  492. __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
  493. __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
  494. __entry->exit_info1, __entry->exit_info2,
  495. __entry->exit_int_info, __entry->exit_int_info_err)
  496. );
  497. /*
  498. * Tracepoint for #VMEXIT reinjected to the guest
  499. */
  500. TRACE_EVENT(kvm_nested_vmexit_inject,
  501. TP_PROTO(__u32 exit_code,
  502. __u64 exit_info1, __u64 exit_info2,
  503. __u32 exit_int_info, __u32 exit_int_info_err, __u32 isa),
  504. TP_ARGS(exit_code, exit_info1, exit_info2,
  505. exit_int_info, exit_int_info_err, isa),
  506. TP_STRUCT__entry(
  507. __field( __u32, exit_code )
  508. __field( __u64, exit_info1 )
  509. __field( __u64, exit_info2 )
  510. __field( __u32, exit_int_info )
  511. __field( __u32, exit_int_info_err )
  512. __field( __u32, isa )
  513. ),
  514. TP_fast_assign(
  515. __entry->exit_code = exit_code;
  516. __entry->exit_info1 = exit_info1;
  517. __entry->exit_info2 = exit_info2;
  518. __entry->exit_int_info = exit_int_info;
  519. __entry->exit_int_info_err = exit_int_info_err;
  520. __entry->isa = isa;
  521. ),
  522. TP_printk("reason: %s ext_inf1: 0x%016llx "
  523. "ext_inf2: 0x%016llx ext_int: 0x%08x ext_int_err: 0x%08x",
  524. (__entry->isa == KVM_ISA_VMX) ?
  525. __print_symbolic(__entry->exit_code, VMX_EXIT_REASONS) :
  526. __print_symbolic(__entry->exit_code, SVM_EXIT_REASONS),
  527. __entry->exit_info1, __entry->exit_info2,
  528. __entry->exit_int_info, __entry->exit_int_info_err)
  529. );
  530. /*
  531. * Tracepoint for nested #vmexit because of interrupt pending
  532. */
  533. TRACE_EVENT(kvm_nested_intr_vmexit,
  534. TP_PROTO(__u64 rip),
  535. TP_ARGS(rip),
  536. TP_STRUCT__entry(
  537. __field( __u64, rip )
  538. ),
  539. TP_fast_assign(
  540. __entry->rip = rip
  541. ),
  542. TP_printk("rip: 0x%016llx", __entry->rip)
  543. );
  544. /*
  545. * Tracepoint for nested #vmexit because of interrupt pending
  546. */
  547. TRACE_EVENT(kvm_invlpga,
  548. TP_PROTO(__u64 rip, int asid, u64 address),
  549. TP_ARGS(rip, asid, address),
  550. TP_STRUCT__entry(
  551. __field( __u64, rip )
  552. __field( int, asid )
  553. __field( __u64, address )
  554. ),
  555. TP_fast_assign(
  556. __entry->rip = rip;
  557. __entry->asid = asid;
  558. __entry->address = address;
  559. ),
  560. TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
  561. __entry->rip, __entry->asid, __entry->address)
  562. );
  563. /*
  564. * Tracepoint for nested #vmexit because of interrupt pending
  565. */
  566. TRACE_EVENT(kvm_skinit,
  567. TP_PROTO(__u64 rip, __u32 slb),
  568. TP_ARGS(rip, slb),
  569. TP_STRUCT__entry(
  570. __field( __u64, rip )
  571. __field( __u32, slb )
  572. ),
  573. TP_fast_assign(
  574. __entry->rip = rip;
  575. __entry->slb = slb;
  576. ),
  577. TP_printk("rip: 0x%016llx slb: 0x%08x",
  578. __entry->rip, __entry->slb)
  579. );
  580. #define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
  581. #define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
  582. #define KVM_EMUL_INSN_F_CS_D (1 << 2)
  583. #define KVM_EMUL_INSN_F_CS_L (1 << 3)
  584. #define kvm_trace_symbol_emul_flags \
  585. { 0, "real" }, \
  586. { KVM_EMUL_INSN_F_CR0_PE \
  587. | KVM_EMUL_INSN_F_EFL_VM, "vm16" }, \
  588. { KVM_EMUL_INSN_F_CR0_PE, "prot16" }, \
  589. { KVM_EMUL_INSN_F_CR0_PE \
  590. | KVM_EMUL_INSN_F_CS_D, "prot32" }, \
  591. { KVM_EMUL_INSN_F_CR0_PE \
  592. | KVM_EMUL_INSN_F_CS_L, "prot64" }
  593. #define kei_decode_mode(mode) ({ \
  594. u8 flags = 0xff; \
  595. switch (mode) { \
  596. case X86EMUL_MODE_REAL: \
  597. flags = 0; \
  598. break; \
  599. case X86EMUL_MODE_VM86: \
  600. flags = KVM_EMUL_INSN_F_EFL_VM; \
  601. break; \
  602. case X86EMUL_MODE_PROT16: \
  603. flags = KVM_EMUL_INSN_F_CR0_PE; \
  604. break; \
  605. case X86EMUL_MODE_PROT32: \
  606. flags = KVM_EMUL_INSN_F_CR0_PE \
  607. | KVM_EMUL_INSN_F_CS_D; \
  608. break; \
  609. case X86EMUL_MODE_PROT64: \
  610. flags = KVM_EMUL_INSN_F_CR0_PE \
  611. | KVM_EMUL_INSN_F_CS_L; \
  612. break; \
  613. } \
  614. flags; \
  615. })
  616. TRACE_EVENT(kvm_emulate_insn,
  617. TP_PROTO(struct kvm_vcpu *vcpu, __u8 failed),
  618. TP_ARGS(vcpu, failed),
  619. TP_STRUCT__entry(
  620. __field( __u64, rip )
  621. __field( __u32, csbase )
  622. __field( __u8, len )
  623. __array( __u8, insn, 15 )
  624. __field( __u8, flags )
  625. __field( __u8, failed )
  626. ),
  627. TP_fast_assign(
  628. __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
  629. __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
  630. - vcpu->arch.emulate_ctxt.fetch.data;
  631. __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
  632. memcpy(__entry->insn,
  633. vcpu->arch.emulate_ctxt.fetch.data,
  634. 15);
  635. __entry->flags = kei_decode_mode(vcpu->arch.emulate_ctxt.mode);
  636. __entry->failed = failed;
  637. ),
  638. TP_printk("%x:%llx:%s (%s)%s",
  639. __entry->csbase, __entry->rip,
  640. __print_hex(__entry->insn, __entry->len),
  641. __print_symbolic(__entry->flags,
  642. kvm_trace_symbol_emul_flags),
  643. __entry->failed ? " failed" : ""
  644. )
  645. );
  646. #define trace_kvm_emulate_insn_start(vcpu) trace_kvm_emulate_insn(vcpu, 0)
  647. #define trace_kvm_emulate_insn_failed(vcpu) trace_kvm_emulate_insn(vcpu, 1)
  648. TRACE_EVENT(
  649. vcpu_match_mmio,
  650. TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
  651. TP_ARGS(gva, gpa, write, gpa_match),
  652. TP_STRUCT__entry(
  653. __field(gva_t, gva)
  654. __field(gpa_t, gpa)
  655. __field(bool, write)
  656. __field(bool, gpa_match)
  657. ),
  658. TP_fast_assign(
  659. __entry->gva = gva;
  660. __entry->gpa = gpa;
  661. __entry->write = write;
  662. __entry->gpa_match = gpa_match
  663. ),
  664. TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
  665. __entry->write ? "Write" : "Read",
  666. __entry->gpa_match ? "GPA" : "GVA")
  667. );
  668. TRACE_EVENT(kvm_write_tsc_offset,
  669. TP_PROTO(unsigned int vcpu_id, __u64 previous_tsc_offset,
  670. __u64 next_tsc_offset),
  671. TP_ARGS(vcpu_id, previous_tsc_offset, next_tsc_offset),
  672. TP_STRUCT__entry(
  673. __field( unsigned int, vcpu_id )
  674. __field( __u64, previous_tsc_offset )
  675. __field( __u64, next_tsc_offset )
  676. ),
  677. TP_fast_assign(
  678. __entry->vcpu_id = vcpu_id;
  679. __entry->previous_tsc_offset = previous_tsc_offset;
  680. __entry->next_tsc_offset = next_tsc_offset;
  681. ),
  682. TP_printk("vcpu=%u prev=%llu next=%llu", __entry->vcpu_id,
  683. __entry->previous_tsc_offset, __entry->next_tsc_offset)
  684. );
  685. #ifdef CONFIG_X86_64
  686. #define host_clocks \
  687. {VCLOCK_NONE, "none"}, \
  688. {VCLOCK_TSC, "tsc"} \
  689. TRACE_EVENT(kvm_update_master_clock,
  690. TP_PROTO(bool use_master_clock, unsigned int host_clock, bool offset_matched),
  691. TP_ARGS(use_master_clock, host_clock, offset_matched),
  692. TP_STRUCT__entry(
  693. __field( bool, use_master_clock )
  694. __field( unsigned int, host_clock )
  695. __field( bool, offset_matched )
  696. ),
  697. TP_fast_assign(
  698. __entry->use_master_clock = use_master_clock;
  699. __entry->host_clock = host_clock;
  700. __entry->offset_matched = offset_matched;
  701. ),
  702. TP_printk("masterclock %d hostclock %s offsetmatched %u",
  703. __entry->use_master_clock,
  704. __print_symbolic(__entry->host_clock, host_clocks),
  705. __entry->offset_matched)
  706. );
  707. TRACE_EVENT(kvm_track_tsc,
  708. TP_PROTO(unsigned int vcpu_id, unsigned int nr_matched,
  709. unsigned int online_vcpus, bool use_master_clock,
  710. unsigned int host_clock),
  711. TP_ARGS(vcpu_id, nr_matched, online_vcpus, use_master_clock,
  712. host_clock),
  713. TP_STRUCT__entry(
  714. __field( unsigned int, vcpu_id )
  715. __field( unsigned int, nr_vcpus_matched_tsc )
  716. __field( unsigned int, online_vcpus )
  717. __field( bool, use_master_clock )
  718. __field( unsigned int, host_clock )
  719. ),
  720. TP_fast_assign(
  721. __entry->vcpu_id = vcpu_id;
  722. __entry->nr_vcpus_matched_tsc = nr_matched;
  723. __entry->online_vcpus = online_vcpus;
  724. __entry->use_master_clock = use_master_clock;
  725. __entry->host_clock = host_clock;
  726. ),
  727. TP_printk("vcpu_id %u masterclock %u offsetmatched %u nr_online %u"
  728. " hostclock %s",
  729. __entry->vcpu_id, __entry->use_master_clock,
  730. __entry->nr_vcpus_matched_tsc, __entry->online_vcpus,
  731. __print_symbolic(__entry->host_clock, host_clocks))
  732. );
  733. #endif /* CONFIG_X86_64 */
  734. /*
  735. * Tracepoint for PML full VMEXIT.
  736. */
  737. TRACE_EVENT(kvm_pml_full,
  738. TP_PROTO(unsigned int vcpu_id),
  739. TP_ARGS(vcpu_id),
  740. TP_STRUCT__entry(
  741. __field( unsigned int, vcpu_id )
  742. ),
  743. TP_fast_assign(
  744. __entry->vcpu_id = vcpu_id;
  745. ),
  746. TP_printk("vcpu %d: PML full", __entry->vcpu_id)
  747. );
  748. TRACE_EVENT(kvm_ple_window,
  749. TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
  750. TP_ARGS(grow, vcpu_id, new, old),
  751. TP_STRUCT__entry(
  752. __field( bool, grow )
  753. __field( unsigned int, vcpu_id )
  754. __field( int, new )
  755. __field( int, old )
  756. ),
  757. TP_fast_assign(
  758. __entry->grow = grow;
  759. __entry->vcpu_id = vcpu_id;
  760. __entry->new = new;
  761. __entry->old = old;
  762. ),
  763. TP_printk("vcpu %u: ple_window %d (%s %d)",
  764. __entry->vcpu_id,
  765. __entry->new,
  766. __entry->grow ? "grow" : "shrink",
  767. __entry->old)
  768. );
  769. #define trace_kvm_ple_window_grow(vcpu_id, new, old) \
  770. trace_kvm_ple_window(true, vcpu_id, new, old)
  771. #define trace_kvm_ple_window_shrink(vcpu_id, new, old) \
  772. trace_kvm_ple_window(false, vcpu_id, new, old)
  773. TRACE_EVENT(kvm_pvclock_update,
  774. TP_PROTO(unsigned int vcpu_id, struct pvclock_vcpu_time_info *pvclock),
  775. TP_ARGS(vcpu_id, pvclock),
  776. TP_STRUCT__entry(
  777. __field( unsigned int, vcpu_id )
  778. __field( __u32, version )
  779. __field( __u64, tsc_timestamp )
  780. __field( __u64, system_time )
  781. __field( __u32, tsc_to_system_mul )
  782. __field( __s8, tsc_shift )
  783. __field( __u8, flags )
  784. ),
  785. TP_fast_assign(
  786. __entry->vcpu_id = vcpu_id;
  787. __entry->version = pvclock->version;
  788. __entry->tsc_timestamp = pvclock->tsc_timestamp;
  789. __entry->system_time = pvclock->system_time;
  790. __entry->tsc_to_system_mul = pvclock->tsc_to_system_mul;
  791. __entry->tsc_shift = pvclock->tsc_shift;
  792. __entry->flags = pvclock->flags;
  793. ),
  794. TP_printk("vcpu_id %u, pvclock { version %u, tsc_timestamp 0x%llx, "
  795. "system_time 0x%llx, tsc_to_system_mul 0x%x, tsc_shift %d, "
  796. "flags 0x%x }",
  797. __entry->vcpu_id,
  798. __entry->version,
  799. __entry->tsc_timestamp,
  800. __entry->system_time,
  801. __entry->tsc_to_system_mul,
  802. __entry->tsc_shift,
  803. __entry->flags)
  804. );
  805. TRACE_EVENT(kvm_wait_lapic_expire,
  806. TP_PROTO(unsigned int vcpu_id, s64 delta),
  807. TP_ARGS(vcpu_id, delta),
  808. TP_STRUCT__entry(
  809. __field( unsigned int, vcpu_id )
  810. __field( s64, delta )
  811. ),
  812. TP_fast_assign(
  813. __entry->vcpu_id = vcpu_id;
  814. __entry->delta = delta;
  815. ),
  816. TP_printk("vcpu %u: delta %lld (%s)",
  817. __entry->vcpu_id,
  818. __entry->delta,
  819. __entry->delta < 0 ? "early" : "late")
  820. );
  821. TRACE_EVENT(kvm_enter_smm,
  822. TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
  823. TP_ARGS(vcpu_id, smbase, entering),
  824. TP_STRUCT__entry(
  825. __field( unsigned int, vcpu_id )
  826. __field( u64, smbase )
  827. __field( bool, entering )
  828. ),
  829. TP_fast_assign(
  830. __entry->vcpu_id = vcpu_id;
  831. __entry->smbase = smbase;
  832. __entry->entering = entering;
  833. ),
  834. TP_printk("vcpu %u: %s SMM, smbase 0x%llx",
  835. __entry->vcpu_id,
  836. __entry->entering ? "entering" : "leaving",
  837. __entry->smbase)
  838. );
  839. /*
  840. * Tracepoint for VT-d posted-interrupts.
  841. */
  842. TRACE_EVENT(kvm_pi_irte_update,
  843. TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
  844. unsigned int gsi, unsigned int gvec,
  845. u64 pi_desc_addr, bool set),
  846. TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
  847. TP_STRUCT__entry(
  848. __field( unsigned int, host_irq )
  849. __field( unsigned int, vcpu_id )
  850. __field( unsigned int, gsi )
  851. __field( unsigned int, gvec )
  852. __field( u64, pi_desc_addr )
  853. __field( bool, set )
  854. ),
  855. TP_fast_assign(
  856. __entry->host_irq = host_irq;
  857. __entry->vcpu_id = vcpu_id;
  858. __entry->gsi = gsi;
  859. __entry->gvec = gvec;
  860. __entry->pi_desc_addr = pi_desc_addr;
  861. __entry->set = set;
  862. ),
  863. TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
  864. "gvec: 0x%x, pi_desc_addr: 0x%llx",
  865. __entry->set ? "enabled and being updated" : "disabled",
  866. __entry->host_irq,
  867. __entry->vcpu_id,
  868. __entry->gsi,
  869. __entry->gvec,
  870. __entry->pi_desc_addr)
  871. );
  872. /*
  873. * Tracepoint for kvm_hv_notify_acked_sint.
  874. */
  875. TRACE_EVENT(kvm_hv_notify_acked_sint,
  876. TP_PROTO(int vcpu_id, u32 sint),
  877. TP_ARGS(vcpu_id, sint),
  878. TP_STRUCT__entry(
  879. __field(int, vcpu_id)
  880. __field(u32, sint)
  881. ),
  882. TP_fast_assign(
  883. __entry->vcpu_id = vcpu_id;
  884. __entry->sint = sint;
  885. ),
  886. TP_printk("vcpu_id %d sint %u", __entry->vcpu_id, __entry->sint)
  887. );
  888. /*
  889. * Tracepoint for synic_set_irq.
  890. */
  891. TRACE_EVENT(kvm_hv_synic_set_irq,
  892. TP_PROTO(int vcpu_id, u32 sint, int vector, int ret),
  893. TP_ARGS(vcpu_id, sint, vector, ret),
  894. TP_STRUCT__entry(
  895. __field(int, vcpu_id)
  896. __field(u32, sint)
  897. __field(int, vector)
  898. __field(int, ret)
  899. ),
  900. TP_fast_assign(
  901. __entry->vcpu_id = vcpu_id;
  902. __entry->sint = sint;
  903. __entry->vector = vector;
  904. __entry->ret = ret;
  905. ),
  906. TP_printk("vcpu_id %d sint %u vector %d ret %d",
  907. __entry->vcpu_id, __entry->sint, __entry->vector,
  908. __entry->ret)
  909. );
  910. /*
  911. * Tracepoint for kvm_hv_synic_send_eoi.
  912. */
  913. TRACE_EVENT(kvm_hv_synic_send_eoi,
  914. TP_PROTO(int vcpu_id, int vector),
  915. TP_ARGS(vcpu_id, vector),
  916. TP_STRUCT__entry(
  917. __field(int, vcpu_id)
  918. __field(u32, sint)
  919. __field(int, vector)
  920. __field(int, ret)
  921. ),
  922. TP_fast_assign(
  923. __entry->vcpu_id = vcpu_id;
  924. __entry->vector = vector;
  925. ),
  926. TP_printk("vcpu_id %d vector %d", __entry->vcpu_id, __entry->vector)
  927. );
  928. /*
  929. * Tracepoint for synic_set_msr.
  930. */
  931. TRACE_EVENT(kvm_hv_synic_set_msr,
  932. TP_PROTO(int vcpu_id, u32 msr, u64 data, bool host),
  933. TP_ARGS(vcpu_id, msr, data, host),
  934. TP_STRUCT__entry(
  935. __field(int, vcpu_id)
  936. __field(u32, msr)
  937. __field(u64, data)
  938. __field(bool, host)
  939. ),
  940. TP_fast_assign(
  941. __entry->vcpu_id = vcpu_id;
  942. __entry->msr = msr;
  943. __entry->data = data;
  944. __entry->host = host
  945. ),
  946. TP_printk("vcpu_id %d msr 0x%x data 0x%llx host %d",
  947. __entry->vcpu_id, __entry->msr, __entry->data, __entry->host)
  948. );
  949. /*
  950. * Tracepoint for stimer_set_config.
  951. */
  952. TRACE_EVENT(kvm_hv_stimer_set_config,
  953. TP_PROTO(int vcpu_id, int timer_index, u64 config, bool host),
  954. TP_ARGS(vcpu_id, timer_index, config, host),
  955. TP_STRUCT__entry(
  956. __field(int, vcpu_id)
  957. __field(int, timer_index)
  958. __field(u64, config)
  959. __field(bool, host)
  960. ),
  961. TP_fast_assign(
  962. __entry->vcpu_id = vcpu_id;
  963. __entry->timer_index = timer_index;
  964. __entry->config = config;
  965. __entry->host = host;
  966. ),
  967. TP_printk("vcpu_id %d timer %d config 0x%llx host %d",
  968. __entry->vcpu_id, __entry->timer_index, __entry->config,
  969. __entry->host)
  970. );
  971. /*
  972. * Tracepoint for stimer_set_count.
  973. */
  974. TRACE_EVENT(kvm_hv_stimer_set_count,
  975. TP_PROTO(int vcpu_id, int timer_index, u64 count, bool host),
  976. TP_ARGS(vcpu_id, timer_index, count, host),
  977. TP_STRUCT__entry(
  978. __field(int, vcpu_id)
  979. __field(int, timer_index)
  980. __field(u64, count)
  981. __field(bool, host)
  982. ),
  983. TP_fast_assign(
  984. __entry->vcpu_id = vcpu_id;
  985. __entry->timer_index = timer_index;
  986. __entry->count = count;
  987. __entry->host = host;
  988. ),
  989. TP_printk("vcpu_id %d timer %d count %llu host %d",
  990. __entry->vcpu_id, __entry->timer_index, __entry->count,
  991. __entry->host)
  992. );
  993. /*
  994. * Tracepoint for stimer_start(periodic timer case).
  995. */
  996. TRACE_EVENT(kvm_hv_stimer_start_periodic,
  997. TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 exp_time),
  998. TP_ARGS(vcpu_id, timer_index, time_now, exp_time),
  999. TP_STRUCT__entry(
  1000. __field(int, vcpu_id)
  1001. __field(int, timer_index)
  1002. __field(u64, time_now)
  1003. __field(u64, exp_time)
  1004. ),
  1005. TP_fast_assign(
  1006. __entry->vcpu_id = vcpu_id;
  1007. __entry->timer_index = timer_index;
  1008. __entry->time_now = time_now;
  1009. __entry->exp_time = exp_time;
  1010. ),
  1011. TP_printk("vcpu_id %d timer %d time_now %llu exp_time %llu",
  1012. __entry->vcpu_id, __entry->timer_index, __entry->time_now,
  1013. __entry->exp_time)
  1014. );
  1015. /*
  1016. * Tracepoint for stimer_start(one-shot timer case).
  1017. */
  1018. TRACE_EVENT(kvm_hv_stimer_start_one_shot,
  1019. TP_PROTO(int vcpu_id, int timer_index, u64 time_now, u64 count),
  1020. TP_ARGS(vcpu_id, timer_index, time_now, count),
  1021. TP_STRUCT__entry(
  1022. __field(int, vcpu_id)
  1023. __field(int, timer_index)
  1024. __field(u64, time_now)
  1025. __field(u64, count)
  1026. ),
  1027. TP_fast_assign(
  1028. __entry->vcpu_id = vcpu_id;
  1029. __entry->timer_index = timer_index;
  1030. __entry->time_now = time_now;
  1031. __entry->count = count;
  1032. ),
  1033. TP_printk("vcpu_id %d timer %d time_now %llu count %llu",
  1034. __entry->vcpu_id, __entry->timer_index, __entry->time_now,
  1035. __entry->count)
  1036. );
  1037. /*
  1038. * Tracepoint for stimer_timer_callback.
  1039. */
  1040. TRACE_EVENT(kvm_hv_stimer_callback,
  1041. TP_PROTO(int vcpu_id, int timer_index),
  1042. TP_ARGS(vcpu_id, timer_index),
  1043. TP_STRUCT__entry(
  1044. __field(int, vcpu_id)
  1045. __field(int, timer_index)
  1046. ),
  1047. TP_fast_assign(
  1048. __entry->vcpu_id = vcpu_id;
  1049. __entry->timer_index = timer_index;
  1050. ),
  1051. TP_printk("vcpu_id %d timer %d",
  1052. __entry->vcpu_id, __entry->timer_index)
  1053. );
  1054. /*
  1055. * Tracepoint for stimer_expiration.
  1056. */
  1057. TRACE_EVENT(kvm_hv_stimer_expiration,
  1058. TP_PROTO(int vcpu_id, int timer_index, int msg_send_result),
  1059. TP_ARGS(vcpu_id, timer_index, msg_send_result),
  1060. TP_STRUCT__entry(
  1061. __field(int, vcpu_id)
  1062. __field(int, timer_index)
  1063. __field(int, msg_send_result)
  1064. ),
  1065. TP_fast_assign(
  1066. __entry->vcpu_id = vcpu_id;
  1067. __entry->timer_index = timer_index;
  1068. __entry->msg_send_result = msg_send_result;
  1069. ),
  1070. TP_printk("vcpu_id %d timer %d msg send result %d",
  1071. __entry->vcpu_id, __entry->timer_index,
  1072. __entry->msg_send_result)
  1073. );
  1074. /*
  1075. * Tracepoint for stimer_cleanup.
  1076. */
  1077. TRACE_EVENT(kvm_hv_stimer_cleanup,
  1078. TP_PROTO(int vcpu_id, int timer_index),
  1079. TP_ARGS(vcpu_id, timer_index),
  1080. TP_STRUCT__entry(
  1081. __field(int, vcpu_id)
  1082. __field(int, timer_index)
  1083. ),
  1084. TP_fast_assign(
  1085. __entry->vcpu_id = vcpu_id;
  1086. __entry->timer_index = timer_index;
  1087. ),
  1088. TP_printk("vcpu_id %d timer %d",
  1089. __entry->vcpu_id, __entry->timer_index)
  1090. );
  1091. /*
  1092. * Tracepoint for AMD AVIC
  1093. */
  1094. TRACE_EVENT(kvm_avic_incomplete_ipi,
  1095. TP_PROTO(u32 vcpu, u32 icrh, u32 icrl, u32 id, u32 index),
  1096. TP_ARGS(vcpu, icrh, icrl, id, index),
  1097. TP_STRUCT__entry(
  1098. __field(u32, vcpu)
  1099. __field(u32, icrh)
  1100. __field(u32, icrl)
  1101. __field(u32, id)
  1102. __field(u32, index)
  1103. ),
  1104. TP_fast_assign(
  1105. __entry->vcpu = vcpu;
  1106. __entry->icrh = icrh;
  1107. __entry->icrl = icrl;
  1108. __entry->id = id;
  1109. __entry->index = index;
  1110. ),
  1111. TP_printk("vcpu=%u, icrh:icrl=%#010x:%08x, id=%u, index=%u\n",
  1112. __entry->vcpu, __entry->icrh, __entry->icrl,
  1113. __entry->id, __entry->index)
  1114. );
  1115. TRACE_EVENT(kvm_avic_unaccelerated_access,
  1116. TP_PROTO(u32 vcpu, u32 offset, bool ft, bool rw, u32 vec),
  1117. TP_ARGS(vcpu, offset, ft, rw, vec),
  1118. TP_STRUCT__entry(
  1119. __field(u32, vcpu)
  1120. __field(u32, offset)
  1121. __field(bool, ft)
  1122. __field(bool, rw)
  1123. __field(u32, vec)
  1124. ),
  1125. TP_fast_assign(
  1126. __entry->vcpu = vcpu;
  1127. __entry->offset = offset;
  1128. __entry->ft = ft;
  1129. __entry->rw = rw;
  1130. __entry->vec = vec;
  1131. ),
  1132. TP_printk("vcpu=%u, offset=%#x(%s), %s, %s, vec=%#x\n",
  1133. __entry->vcpu,
  1134. __entry->offset,
  1135. __print_symbolic(__entry->offset, kvm_trace_symbol_apic),
  1136. __entry->ft ? "trap" : "fault",
  1137. __entry->rw ? "write" : "read",
  1138. __entry->vec)
  1139. );
  1140. TRACE_EVENT(kvm_hv_timer_state,
  1141. TP_PROTO(unsigned int vcpu_id, unsigned int hv_timer_in_use),
  1142. TP_ARGS(vcpu_id, hv_timer_in_use),
  1143. TP_STRUCT__entry(
  1144. __field(unsigned int, vcpu_id)
  1145. __field(unsigned int, hv_timer_in_use)
  1146. ),
  1147. TP_fast_assign(
  1148. __entry->vcpu_id = vcpu_id;
  1149. __entry->hv_timer_in_use = hv_timer_in_use;
  1150. ),
  1151. TP_printk("vcpu_id %x hv_timer %x\n",
  1152. __entry->vcpu_id,
  1153. __entry->hv_timer_in_use)
  1154. );
  1155. /*
  1156. * Tracepoint for kvm_hv_flush_tlb.
  1157. */
  1158. TRACE_EVENT(kvm_hv_flush_tlb,
  1159. TP_PROTO(u64 processor_mask, u64 address_space, u64 flags),
  1160. TP_ARGS(processor_mask, address_space, flags),
  1161. TP_STRUCT__entry(
  1162. __field(u64, processor_mask)
  1163. __field(u64, address_space)
  1164. __field(u64, flags)
  1165. ),
  1166. TP_fast_assign(
  1167. __entry->processor_mask = processor_mask;
  1168. __entry->address_space = address_space;
  1169. __entry->flags = flags;
  1170. ),
  1171. TP_printk("processor_mask 0x%llx address_space 0x%llx flags 0x%llx",
  1172. __entry->processor_mask, __entry->address_space,
  1173. __entry->flags)
  1174. );
  1175. /*
  1176. * Tracepoint for kvm_hv_flush_tlb_ex.
  1177. */
  1178. TRACE_EVENT(kvm_hv_flush_tlb_ex,
  1179. TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags),
  1180. TP_ARGS(valid_bank_mask, format, address_space, flags),
  1181. TP_STRUCT__entry(
  1182. __field(u64, valid_bank_mask)
  1183. __field(u64, format)
  1184. __field(u64, address_space)
  1185. __field(u64, flags)
  1186. ),
  1187. TP_fast_assign(
  1188. __entry->valid_bank_mask = valid_bank_mask;
  1189. __entry->format = format;
  1190. __entry->address_space = address_space;
  1191. __entry->flags = flags;
  1192. ),
  1193. TP_printk("valid_bank_mask 0x%llx format 0x%llx "
  1194. "address_space 0x%llx flags 0x%llx",
  1195. __entry->valid_bank_mask, __entry->format,
  1196. __entry->address_space, __entry->flags)
  1197. );
  1198. #endif /* _TRACE_KVM_H */
  1199. #undef TRACE_INCLUDE_PATH
  1200. #define TRACE_INCLUDE_PATH arch/x86/kvm
  1201. #undef TRACE_INCLUDE_FILE
  1202. #define TRACE_INCLUDE_FILE trace
  1203. /* This part must be outside protection */
  1204. #include <trace/define_trace.h>