tlb.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
  7. * TLB handlers run from KSEG0
  8. *
  9. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  10. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/smp.h>
  14. #include <linux/mm.h>
  15. #include <linux/delay.h>
  16. #include <linux/export.h>
  17. #include <linux/kvm_host.h>
  18. #include <linux/srcu.h>
  19. #include <asm/cpu.h>
  20. #include <asm/bootinfo.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlb.h>
  25. #include <asm/tlbdebug.h>
  26. #undef CONFIG_MIPS_MT
  27. #include <asm/r4kcache.h>
  28. #define CONFIG_MIPS_MT
  29. #define KVM_GUEST_PC_TLB 0
  30. #define KVM_GUEST_SP_TLB 1
  31. atomic_t kvm_mips_instance;
  32. EXPORT_SYMBOL_GPL(kvm_mips_instance);
  33. static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
  34. {
  35. int cpu = smp_processor_id();
  36. return vcpu->arch.guest_kernel_asid[cpu] &
  37. cpu_asid_mask(&cpu_data[cpu]);
  38. }
  39. static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
  40. {
  41. int cpu = smp_processor_id();
  42. return vcpu->arch.guest_user_asid[cpu] &
  43. cpu_asid_mask(&cpu_data[cpu]);
  44. }
  45. inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
  46. {
  47. return vcpu->kvm->arch.commpage_tlb;
  48. }
  49. /* Structure defining an tlb entry data set. */
  50. void kvm_mips_dump_host_tlbs(void)
  51. {
  52. unsigned long flags;
  53. local_irq_save(flags);
  54. kvm_info("HOST TLBs:\n");
  55. dump_tlb_regs();
  56. pr_info("\n");
  57. dump_tlb_all();
  58. local_irq_restore(flags);
  59. }
  60. EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
  61. void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
  62. {
  63. struct mips_coproc *cop0 = vcpu->arch.cop0;
  64. struct kvm_mips_tlb tlb;
  65. int i;
  66. kvm_info("Guest TLBs:\n");
  67. kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
  68. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  69. tlb = vcpu->arch.guest_tlb[i];
  70. kvm_info("TLB%c%3d Hi 0x%08lx ",
  71. (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
  72. ? ' ' : '*',
  73. i, tlb.tlb_hi);
  74. kvm_info("Lo0=0x%09llx %c%c attr %lx ",
  75. (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
  76. (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
  77. (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
  78. (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
  79. kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
  80. (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
  81. (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
  82. (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
  83. (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
  84. tlb.tlb_mask);
  85. }
  86. }
  87. EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
  88. /* XXXKYMA: Must be called with interrupts disabled */
  89. /* set flush_dcache_mask == 0 if no dcache flush required */
  90. int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
  91. unsigned long entrylo0, unsigned long entrylo1,
  92. int flush_dcache_mask)
  93. {
  94. unsigned long flags;
  95. unsigned long old_entryhi;
  96. int idx;
  97. local_irq_save(flags);
  98. old_entryhi = read_c0_entryhi();
  99. write_c0_entryhi(entryhi);
  100. mtc0_tlbw_hazard();
  101. tlb_probe();
  102. tlb_probe_hazard();
  103. idx = read_c0_index();
  104. if (idx > current_cpu_data.tlbsize) {
  105. kvm_err("%s: Invalid Index: %d\n", __func__, idx);
  106. kvm_mips_dump_host_tlbs();
  107. local_irq_restore(flags);
  108. return -1;
  109. }
  110. write_c0_entrylo0(entrylo0);
  111. write_c0_entrylo1(entrylo1);
  112. mtc0_tlbw_hazard();
  113. if (idx < 0)
  114. tlb_write_random();
  115. else
  116. tlb_write_indexed();
  117. tlbw_use_hazard();
  118. kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  119. vcpu->arch.pc, idx, read_c0_entryhi(),
  120. read_c0_entrylo0(), read_c0_entrylo1());
  121. /* Flush D-cache */
  122. if (flush_dcache_mask) {
  123. if (entrylo0 & ENTRYLO_V) {
  124. ++vcpu->stat.flush_dcache_exits;
  125. flush_data_cache_page((entryhi & VPN2_MASK) &
  126. ~flush_dcache_mask);
  127. }
  128. if (entrylo1 & ENTRYLO_V) {
  129. ++vcpu->stat.flush_dcache_exits;
  130. flush_data_cache_page(((entryhi & VPN2_MASK) &
  131. ~flush_dcache_mask) |
  132. (0x1 << PAGE_SHIFT));
  133. }
  134. }
  135. /* Restore old ASID */
  136. write_c0_entryhi(old_entryhi);
  137. mtc0_tlbw_hazard();
  138. local_irq_restore(flags);
  139. return 0;
  140. }
  141. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
  142. int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  143. struct kvm_vcpu *vcpu)
  144. {
  145. kvm_pfn_t pfn;
  146. unsigned long flags, old_entryhi = 0, vaddr = 0;
  147. unsigned long entrylo[2] = { 0, 0 };
  148. unsigned int pair_idx;
  149. pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
  150. pair_idx = (badvaddr >> PAGE_SHIFT) & 1;
  151. entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) |
  152. ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
  153. ENTRYLO_D | ENTRYLO_V;
  154. local_irq_save(flags);
  155. old_entryhi = read_c0_entryhi();
  156. vaddr = badvaddr & (PAGE_MASK << 1);
  157. write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
  158. write_c0_entrylo0(entrylo[0]);
  159. write_c0_entrylo1(entrylo[1]);
  160. write_c0_index(kvm_mips_get_commpage_asid(vcpu));
  161. mtc0_tlbw_hazard();
  162. tlb_write_indexed();
  163. tlbw_use_hazard();
  164. kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
  165. vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
  166. read_c0_entrylo0(), read_c0_entrylo1());
  167. /* Restore old ASID */
  168. write_c0_entryhi(old_entryhi);
  169. mtc0_tlbw_hazard();
  170. local_irq_restore(flags);
  171. return 0;
  172. }
  173. EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
  174. int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
  175. {
  176. int i;
  177. int index = -1;
  178. struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
  179. for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
  180. if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
  181. TLB_HI_ASID_HIT(tlb[i], entryhi)) {
  182. index = i;
  183. break;
  184. }
  185. }
  186. kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
  187. __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
  188. return index;
  189. }
  190. EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
  191. int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
  192. {
  193. unsigned long old_entryhi, flags;
  194. int idx;
  195. local_irq_save(flags);
  196. old_entryhi = read_c0_entryhi();
  197. if (KVM_GUEST_KERNEL_MODE(vcpu))
  198. write_c0_entryhi((vaddr & VPN2_MASK) |
  199. kvm_mips_get_kernel_asid(vcpu));
  200. else {
  201. write_c0_entryhi((vaddr & VPN2_MASK) |
  202. kvm_mips_get_user_asid(vcpu));
  203. }
  204. mtc0_tlbw_hazard();
  205. tlb_probe();
  206. tlb_probe_hazard();
  207. idx = read_c0_index();
  208. /* Restore old ASID */
  209. write_c0_entryhi(old_entryhi);
  210. mtc0_tlbw_hazard();
  211. local_irq_restore(flags);
  212. kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
  213. return idx;
  214. }
  215. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
  216. int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
  217. {
  218. int idx;
  219. unsigned long flags, old_entryhi;
  220. local_irq_save(flags);
  221. old_entryhi = read_c0_entryhi();
  222. write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
  223. mtc0_tlbw_hazard();
  224. tlb_probe();
  225. tlb_probe_hazard();
  226. idx = read_c0_index();
  227. if (idx >= current_cpu_data.tlbsize)
  228. BUG();
  229. if (idx > 0) {
  230. write_c0_entryhi(UNIQUE_ENTRYHI(idx));
  231. write_c0_entrylo0(0);
  232. write_c0_entrylo1(0);
  233. mtc0_tlbw_hazard();
  234. tlb_write_indexed();
  235. tlbw_use_hazard();
  236. }
  237. write_c0_entryhi(old_entryhi);
  238. mtc0_tlbw_hazard();
  239. local_irq_restore(flags);
  240. if (idx > 0)
  241. kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
  242. (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
  243. return 0;
  244. }
  245. EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
  246. void kvm_mips_flush_host_tlb(int skip_kseg0)
  247. {
  248. unsigned long flags;
  249. unsigned long old_entryhi, entryhi;
  250. unsigned long old_pagemask;
  251. int entry = 0;
  252. int maxentry = current_cpu_data.tlbsize;
  253. local_irq_save(flags);
  254. old_entryhi = read_c0_entryhi();
  255. old_pagemask = read_c0_pagemask();
  256. /* Blast 'em all away. */
  257. for (entry = 0; entry < maxentry; entry++) {
  258. write_c0_index(entry);
  259. if (skip_kseg0) {
  260. mtc0_tlbr_hazard();
  261. tlb_read();
  262. tlb_read_hazard();
  263. entryhi = read_c0_entryhi();
  264. /* Don't blow away guest kernel entries */
  265. if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
  266. continue;
  267. write_c0_pagemask(old_pagemask);
  268. }
  269. /* Make sure all entries differ. */
  270. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  271. write_c0_entrylo0(0);
  272. write_c0_entrylo1(0);
  273. mtc0_tlbw_hazard();
  274. tlb_write_indexed();
  275. tlbw_use_hazard();
  276. }
  277. write_c0_entryhi(old_entryhi);
  278. write_c0_pagemask(old_pagemask);
  279. mtc0_tlbw_hazard();
  280. local_irq_restore(flags);
  281. }
  282. EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
  283. void kvm_local_flush_tlb_all(void)
  284. {
  285. unsigned long flags;
  286. unsigned long old_ctx;
  287. int entry = 0;
  288. local_irq_save(flags);
  289. /* Save old context and create impossible VPN2 value */
  290. old_ctx = read_c0_entryhi();
  291. write_c0_entrylo0(0);
  292. write_c0_entrylo1(0);
  293. /* Blast 'em all away. */
  294. while (entry < current_cpu_data.tlbsize) {
  295. /* Make sure all entries differ. */
  296. write_c0_entryhi(UNIQUE_ENTRYHI(entry));
  297. write_c0_index(entry);
  298. mtc0_tlbw_hazard();
  299. tlb_write_indexed();
  300. tlbw_use_hazard();
  301. entry++;
  302. }
  303. write_c0_entryhi(old_ctx);
  304. mtc0_tlbw_hazard();
  305. local_irq_restore(flags);
  306. }
  307. EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);