trace.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  7. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  8. */
  9. #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
  10. #define _TRACE_KVM_H
  11. #include <linux/tracepoint.h>
  12. #undef TRACE_SYSTEM
  13. #define TRACE_SYSTEM kvm
  14. #define TRACE_INCLUDE_PATH .
  15. #define TRACE_INCLUDE_FILE trace
  16. /*
  17. * Tracepoints for VM enters
  18. */
  19. DECLARE_EVENT_CLASS(kvm_transition,
  20. TP_PROTO(struct kvm_vcpu *vcpu),
  21. TP_ARGS(vcpu),
  22. TP_STRUCT__entry(
  23. __field(unsigned long, pc)
  24. ),
  25. TP_fast_assign(
  26. __entry->pc = vcpu->arch.pc;
  27. ),
  28. TP_printk("PC: 0x%08lx",
  29. __entry->pc)
  30. );
  31. DEFINE_EVENT(kvm_transition, kvm_enter,
  32. TP_PROTO(struct kvm_vcpu *vcpu),
  33. TP_ARGS(vcpu));
  34. DEFINE_EVENT(kvm_transition, kvm_reenter,
  35. TP_PROTO(struct kvm_vcpu *vcpu),
  36. TP_ARGS(vcpu));
  37. DEFINE_EVENT(kvm_transition, kvm_out,
  38. TP_PROTO(struct kvm_vcpu *vcpu),
  39. TP_ARGS(vcpu));
  40. /* The first 32 exit reasons correspond to Cause.ExcCode */
  41. #define KVM_TRACE_EXIT_INT 0
  42. #define KVM_TRACE_EXIT_TLBMOD 1
  43. #define KVM_TRACE_EXIT_TLBMISS_LD 2
  44. #define KVM_TRACE_EXIT_TLBMISS_ST 3
  45. #define KVM_TRACE_EXIT_ADDRERR_LD 4
  46. #define KVM_TRACE_EXIT_ADDRERR_ST 5
  47. #define KVM_TRACE_EXIT_SYSCALL 8
  48. #define KVM_TRACE_EXIT_BREAK_INST 9
  49. #define KVM_TRACE_EXIT_RESVD_INST 10
  50. #define KVM_TRACE_EXIT_COP_UNUSABLE 11
  51. #define KVM_TRACE_EXIT_TRAP_INST 13
  52. #define KVM_TRACE_EXIT_MSA_FPE 14
  53. #define KVM_TRACE_EXIT_FPE 15
  54. #define KVM_TRACE_EXIT_MSA_DISABLED 21
  55. /* Further exit reasons */
  56. #define KVM_TRACE_EXIT_WAIT 32
  57. #define KVM_TRACE_EXIT_CACHE 33
  58. #define KVM_TRACE_EXIT_SIGNAL 34
  59. /* Tracepoints for VM exits */
  60. #define kvm_trace_symbol_exit_types \
  61. { KVM_TRACE_EXIT_INT, "Interrupt" }, \
  62. { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \
  63. { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \
  64. { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \
  65. { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \
  66. { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \
  67. { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \
  68. { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \
  69. { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \
  70. { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \
  71. { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \
  72. { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
  73. { KVM_TRACE_EXIT_FPE, "FPE" }, \
  74. { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
  75. { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
  76. { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
  77. { KVM_TRACE_EXIT_SIGNAL, "Signal" }
  78. TRACE_EVENT(kvm_exit,
  79. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
  80. TP_ARGS(vcpu, reason),
  81. TP_STRUCT__entry(
  82. __field(unsigned long, pc)
  83. __field(unsigned int, reason)
  84. ),
  85. TP_fast_assign(
  86. __entry->pc = vcpu->arch.pc;
  87. __entry->reason = reason;
  88. ),
  89. TP_printk("[%s]PC: 0x%08lx",
  90. __print_symbolic(__entry->reason,
  91. kvm_trace_symbol_exit_types),
  92. __entry->pc)
  93. );
  94. #define KVM_TRACE_MFC0 0
  95. #define KVM_TRACE_MTC0 1
  96. #define KVM_TRACE_DMFC0 2
  97. #define KVM_TRACE_DMTC0 3
  98. #define KVM_TRACE_RDHWR 4
  99. #define KVM_TRACE_HWR_COP0 0
  100. #define KVM_TRACE_HWR_HWR 1
  101. #define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \
  102. ((REG) << 3) | (SEL))
  103. #define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \
  104. ((REG) << 3) | (SEL))
  105. #define kvm_trace_symbol_hwr_ops \
  106. { KVM_TRACE_MFC0, "MFC0" }, \
  107. { KVM_TRACE_MTC0, "MTC0" }, \
  108. { KVM_TRACE_DMFC0, "DMFC0" }, \
  109. { KVM_TRACE_DMTC0, "DMTC0" }, \
  110. { KVM_TRACE_RDHWR, "RDHWR" }
  111. #define kvm_trace_symbol_hwr_cop \
  112. { KVM_TRACE_HWR_COP0, "COP0" }, \
  113. { KVM_TRACE_HWR_HWR, "HWR" }
  114. #define kvm_trace_symbol_hwr_regs \
  115. { KVM_TRACE_COP0( 0, 0), "Index" }, \
  116. { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \
  117. { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \
  118. { KVM_TRACE_COP0( 4, 0), "Context" }, \
  119. { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \
  120. { KVM_TRACE_COP0( 5, 0), "PageMask" }, \
  121. { KVM_TRACE_COP0( 6, 0), "Wired" }, \
  122. { KVM_TRACE_COP0( 7, 0), "HWREna" }, \
  123. { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \
  124. { KVM_TRACE_COP0( 9, 0), "Count" }, \
  125. { KVM_TRACE_COP0(10, 0), "EntryHi" }, \
  126. { KVM_TRACE_COP0(11, 0), "Compare" }, \
  127. { KVM_TRACE_COP0(12, 0), "Status" }, \
  128. { KVM_TRACE_COP0(12, 1), "IntCtl" }, \
  129. { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \
  130. { KVM_TRACE_COP0(13, 0), "Cause" }, \
  131. { KVM_TRACE_COP0(14, 0), "EPC" }, \
  132. { KVM_TRACE_COP0(15, 0), "PRId" }, \
  133. { KVM_TRACE_COP0(15, 1), "EBase" }, \
  134. { KVM_TRACE_COP0(16, 0), "Config" }, \
  135. { KVM_TRACE_COP0(16, 1), "Config1" }, \
  136. { KVM_TRACE_COP0(16, 2), "Config2" }, \
  137. { KVM_TRACE_COP0(16, 3), "Config3" }, \
  138. { KVM_TRACE_COP0(16, 4), "Config4" }, \
  139. { KVM_TRACE_COP0(16, 5), "Config5" }, \
  140. { KVM_TRACE_COP0(16, 7), "Config7" }, \
  141. { KVM_TRACE_COP0(26, 0), "ECC" }, \
  142. { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
  143. { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
  144. { KVM_TRACE_COP0(31, 3), "KScratch2" }, \
  145. { KVM_TRACE_COP0(31, 4), "KScratch3" }, \
  146. { KVM_TRACE_COP0(31, 5), "KScratch4" }, \
  147. { KVM_TRACE_COP0(31, 6), "KScratch5" }, \
  148. { KVM_TRACE_COP0(31, 7), "KScratch6" }, \
  149. { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \
  150. { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \
  151. { KVM_TRACE_HWR( 2, 0), "CC" }, \
  152. { KVM_TRACE_HWR( 3, 0), "CCRes" }, \
  153. { KVM_TRACE_HWR(29, 0), "ULR" }
  154. TRACE_EVENT(kvm_hwr,
  155. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
  156. unsigned long val),
  157. TP_ARGS(vcpu, op, reg, val),
  158. TP_STRUCT__entry(
  159. __field(unsigned long, val)
  160. __field(u16, reg)
  161. __field(u8, op)
  162. ),
  163. TP_fast_assign(
  164. __entry->val = val;
  165. __entry->reg = reg;
  166. __entry->op = op;
  167. ),
  168. TP_printk("%s %s (%s:%u:%u) 0x%08lx",
  169. __print_symbolic(__entry->op,
  170. kvm_trace_symbol_hwr_ops),
  171. __print_symbolic(__entry->reg,
  172. kvm_trace_symbol_hwr_regs),
  173. __print_symbolic(__entry->reg >> 8,
  174. kvm_trace_symbol_hwr_cop),
  175. (__entry->reg >> 3) & 0x1f,
  176. __entry->reg & 0x7,
  177. __entry->val)
  178. );
  179. #define KVM_TRACE_AUX_RESTORE 0
  180. #define KVM_TRACE_AUX_SAVE 1
  181. #define KVM_TRACE_AUX_ENABLE 2
  182. #define KVM_TRACE_AUX_DISABLE 3
  183. #define KVM_TRACE_AUX_DISCARD 4
  184. #define KVM_TRACE_AUX_FPU 1
  185. #define KVM_TRACE_AUX_MSA 2
  186. #define KVM_TRACE_AUX_FPU_MSA 3
  187. #define kvm_trace_symbol_aux_op \
  188. { KVM_TRACE_AUX_RESTORE, "restore" }, \
  189. { KVM_TRACE_AUX_SAVE, "save" }, \
  190. { KVM_TRACE_AUX_ENABLE, "enable" }, \
  191. { KVM_TRACE_AUX_DISABLE, "disable" }, \
  192. { KVM_TRACE_AUX_DISCARD, "discard" }
  193. #define kvm_trace_symbol_aux_state \
  194. { KVM_TRACE_AUX_FPU, "FPU" }, \
  195. { KVM_TRACE_AUX_MSA, "MSA" }, \
  196. { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
  197. TRACE_EVENT(kvm_aux,
  198. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
  199. unsigned int state),
  200. TP_ARGS(vcpu, op, state),
  201. TP_STRUCT__entry(
  202. __field(unsigned long, pc)
  203. __field(u8, op)
  204. __field(u8, state)
  205. ),
  206. TP_fast_assign(
  207. __entry->pc = vcpu->arch.pc;
  208. __entry->op = op;
  209. __entry->state = state;
  210. ),
  211. TP_printk("%s %s PC: 0x%08lx",
  212. __print_symbolic(__entry->op,
  213. kvm_trace_symbol_aux_op),
  214. __print_symbolic(__entry->state,
  215. kvm_trace_symbol_aux_state),
  216. __entry->pc)
  217. );
  218. TRACE_EVENT(kvm_asid_change,
  219. TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
  220. unsigned int new_asid),
  221. TP_ARGS(vcpu, old_asid, new_asid),
  222. TP_STRUCT__entry(
  223. __field(unsigned long, pc)
  224. __field(u8, old_asid)
  225. __field(u8, new_asid)
  226. ),
  227. TP_fast_assign(
  228. __entry->pc = vcpu->arch.pc;
  229. __entry->old_asid = old_asid;
  230. __entry->new_asid = new_asid;
  231. ),
  232. TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
  233. __entry->pc,
  234. __entry->old_asid,
  235. __entry->new_asid)
  236. );
  237. #endif /* _TRACE_KVM_H */
  238. /* This part must be outside protection */
  239. #include <trace/define_trace.h>