msa.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * MIPS SIMD Architecture (MSA) context handling code for KVM.
  7. *
  8. * Copyright (C) 2015 Imagination Technologies Ltd.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asm-offsets.h>
  12. #include <asm/asmmacro.h>
  13. #include <asm/regdef.h>
  14. .set noreorder
  15. .set noat
  16. LEAF(__kvm_save_msa)
  17. st_d 0, VCPU_FPR0, a0
  18. st_d 1, VCPU_FPR1, a0
  19. st_d 2, VCPU_FPR2, a0
  20. st_d 3, VCPU_FPR3, a0
  21. st_d 4, VCPU_FPR4, a0
  22. st_d 5, VCPU_FPR5, a0
  23. st_d 6, VCPU_FPR6, a0
  24. st_d 7, VCPU_FPR7, a0
  25. st_d 8, VCPU_FPR8, a0
  26. st_d 9, VCPU_FPR9, a0
  27. st_d 10, VCPU_FPR10, a0
  28. st_d 11, VCPU_FPR11, a0
  29. st_d 12, VCPU_FPR12, a0
  30. st_d 13, VCPU_FPR13, a0
  31. st_d 14, VCPU_FPR14, a0
  32. st_d 15, VCPU_FPR15, a0
  33. st_d 16, VCPU_FPR16, a0
  34. st_d 17, VCPU_FPR17, a0
  35. st_d 18, VCPU_FPR18, a0
  36. st_d 19, VCPU_FPR19, a0
  37. st_d 20, VCPU_FPR20, a0
  38. st_d 21, VCPU_FPR21, a0
  39. st_d 22, VCPU_FPR22, a0
  40. st_d 23, VCPU_FPR23, a0
  41. st_d 24, VCPU_FPR24, a0
  42. st_d 25, VCPU_FPR25, a0
  43. st_d 26, VCPU_FPR26, a0
  44. st_d 27, VCPU_FPR27, a0
  45. st_d 28, VCPU_FPR28, a0
  46. st_d 29, VCPU_FPR29, a0
  47. st_d 30, VCPU_FPR30, a0
  48. st_d 31, VCPU_FPR31, a0
  49. jr ra
  50. nop
  51. END(__kvm_save_msa)
  52. LEAF(__kvm_restore_msa)
  53. ld_d 0, VCPU_FPR0, a0
  54. ld_d 1, VCPU_FPR1, a0
  55. ld_d 2, VCPU_FPR2, a0
  56. ld_d 3, VCPU_FPR3, a0
  57. ld_d 4, VCPU_FPR4, a0
  58. ld_d 5, VCPU_FPR5, a0
  59. ld_d 6, VCPU_FPR6, a0
  60. ld_d 7, VCPU_FPR7, a0
  61. ld_d 8, VCPU_FPR8, a0
  62. ld_d 9, VCPU_FPR9, a0
  63. ld_d 10, VCPU_FPR10, a0
  64. ld_d 11, VCPU_FPR11, a0
  65. ld_d 12, VCPU_FPR12, a0
  66. ld_d 13, VCPU_FPR13, a0
  67. ld_d 14, VCPU_FPR14, a0
  68. ld_d 15, VCPU_FPR15, a0
  69. ld_d 16, VCPU_FPR16, a0
  70. ld_d 17, VCPU_FPR17, a0
  71. ld_d 18, VCPU_FPR18, a0
  72. ld_d 19, VCPU_FPR19, a0
  73. ld_d 20, VCPU_FPR20, a0
  74. ld_d 21, VCPU_FPR21, a0
  75. ld_d 22, VCPU_FPR22, a0
  76. ld_d 23, VCPU_FPR23, a0
  77. ld_d 24, VCPU_FPR24, a0
  78. ld_d 25, VCPU_FPR25, a0
  79. ld_d 26, VCPU_FPR26, a0
  80. ld_d 27, VCPU_FPR27, a0
  81. ld_d 28, VCPU_FPR28, a0
  82. ld_d 29, VCPU_FPR29, a0
  83. ld_d 30, VCPU_FPR30, a0
  84. ld_d 31, VCPU_FPR31, a0
  85. jr ra
  86. nop
  87. END(__kvm_restore_msa)
  88. .macro kvm_restore_msa_upper wr, off, base
  89. .set push
  90. .set noat
  91. #ifdef CONFIG_64BIT
  92. ld $1, \off(\base)
  93. insert_d \wr, 1
  94. #elif defined(CONFIG_CPU_LITTLE_ENDIAN)
  95. lw $1, \off(\base)
  96. insert_w \wr, 2
  97. lw $1, (\off+4)(\base)
  98. insert_w \wr, 3
  99. #else /* CONFIG_CPU_BIG_ENDIAN */
  100. lw $1, (\off+4)(\base)
  101. insert_w \wr, 2
  102. lw $1, \off(\base)
  103. insert_w \wr, 3
  104. #endif
  105. .set pop
  106. .endm
  107. LEAF(__kvm_restore_msa_upper)
  108. kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0
  109. kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0
  110. kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0
  111. kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0
  112. kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0
  113. kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0
  114. kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0
  115. kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0
  116. kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0
  117. kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0
  118. kvm_restore_msa_upper 10, VCPU_FPR10+8, a0
  119. kvm_restore_msa_upper 11, VCPU_FPR11+8, a0
  120. kvm_restore_msa_upper 12, VCPU_FPR12+8, a0
  121. kvm_restore_msa_upper 13, VCPU_FPR13+8, a0
  122. kvm_restore_msa_upper 14, VCPU_FPR14+8, a0
  123. kvm_restore_msa_upper 15, VCPU_FPR15+8, a0
  124. kvm_restore_msa_upper 16, VCPU_FPR16+8, a0
  125. kvm_restore_msa_upper 17, VCPU_FPR17+8, a0
  126. kvm_restore_msa_upper 18, VCPU_FPR18+8, a0
  127. kvm_restore_msa_upper 19, VCPU_FPR19+8, a0
  128. kvm_restore_msa_upper 20, VCPU_FPR20+8, a0
  129. kvm_restore_msa_upper 21, VCPU_FPR21+8, a0
  130. kvm_restore_msa_upper 22, VCPU_FPR22+8, a0
  131. kvm_restore_msa_upper 23, VCPU_FPR23+8, a0
  132. kvm_restore_msa_upper 24, VCPU_FPR24+8, a0
  133. kvm_restore_msa_upper 25, VCPU_FPR25+8, a0
  134. kvm_restore_msa_upper 26, VCPU_FPR26+8, a0
  135. kvm_restore_msa_upper 27, VCPU_FPR27+8, a0
  136. kvm_restore_msa_upper 28, VCPU_FPR28+8, a0
  137. kvm_restore_msa_upper 29, VCPU_FPR29+8, a0
  138. kvm_restore_msa_upper 30, VCPU_FPR30+8, a0
  139. kvm_restore_msa_upper 31, VCPU_FPR31+8, a0
  140. jr ra
  141. nop
  142. END(__kvm_restore_msa_upper)
  143. LEAF(__kvm_restore_msacsr)
  144. lw t0, VCPU_MSA_CSR(a0)
  145. /*
  146. * The ctcmsa must stay at this offset in __kvm_restore_msacsr.
  147. * See kvm_mips_csr_die_notify() which handles t0 containing a value
  148. * which triggers an MSA FP Exception, which must be stepped over and
  149. * ignored since the set cause bits must remain there for the guest.
  150. */
  151. _ctcmsa MSA_CSR, t0
  152. jr ra
  153. nop
  154. END(__kvm_restore_msacsr)