kvm_cache_regs.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef ASM_KVM_CACHE_REGS_H
  3. #define ASM_KVM_CACHE_REGS_H
  4. #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
  5. #define KVM_POSSIBLE_CR4_GUEST_BITS \
  6. (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
  7. | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
  8. static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
  9. enum kvm_reg reg)
  10. {
  11. if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
  12. kvm_x86_ops->cache_reg(vcpu, reg);
  13. return vcpu->arch.regs[reg];
  14. }
  15. static inline void kvm_register_write(struct kvm_vcpu *vcpu,
  16. enum kvm_reg reg,
  17. unsigned long val)
  18. {
  19. vcpu->arch.regs[reg] = val;
  20. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
  21. __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
  22. }
  23. static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
  24. {
  25. return kvm_register_read(vcpu, VCPU_REGS_RIP);
  26. }
  27. static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
  28. {
  29. kvm_register_write(vcpu, VCPU_REGS_RIP, val);
  30. }
  31. static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
  32. {
  33. might_sleep(); /* on svm */
  34. if (!test_bit(VCPU_EXREG_PDPTR,
  35. (unsigned long *)&vcpu->arch.regs_avail))
  36. kvm_x86_ops->cache_reg(vcpu, (enum kvm_reg)VCPU_EXREG_PDPTR);
  37. return vcpu->arch.walk_mmu->pdptrs[index];
  38. }
  39. static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
  40. {
  41. ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
  42. if (tmask & vcpu->arch.cr0_guest_owned_bits)
  43. kvm_x86_ops->decache_cr0_guest_bits(vcpu);
  44. return vcpu->arch.cr0 & mask;
  45. }
  46. static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
  47. {
  48. return kvm_read_cr0_bits(vcpu, ~0UL);
  49. }
  50. static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
  51. {
  52. ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
  53. if (tmask & vcpu->arch.cr4_guest_owned_bits)
  54. kvm_x86_ops->decache_cr4_guest_bits(vcpu);
  55. return vcpu->arch.cr4 & mask;
  56. }
  57. static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
  58. {
  59. if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
  60. kvm_x86_ops->decache_cr3(vcpu);
  61. return vcpu->arch.cr3;
  62. }
  63. static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
  64. {
  65. return kvm_read_cr4_bits(vcpu, ~0UL);
  66. }
  67. static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
  68. {
  69. return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
  70. | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
  71. }
  72. static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
  73. {
  74. vcpu->arch.hflags |= HF_GUEST_MASK;
  75. }
  76. static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
  77. {
  78. vcpu->arch.hflags &= ~HF_GUEST_MASK;
  79. if (vcpu->arch.load_eoi_exitmap_pending) {
  80. vcpu->arch.load_eoi_exitmap_pending = false;
  81. kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
  82. }
  83. }
  84. static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
  85. {
  86. return vcpu->arch.hflags & HF_GUEST_MASK;
  87. }
  88. static inline bool is_smm(struct kvm_vcpu *vcpu)
  89. {
  90. return vcpu->arch.hflags & HF_SMM_MASK;
  91. }
  92. #endif