dump_tlb.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * Dump R4x00 TLB for debugging purposes.
  3. *
  4. * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
  5. * Copyright (C) 1999 by Silicon Graphics, Inc.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <asm/hazards.h>
  10. #include <asm/mipsregs.h>
  11. #include <asm/page.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/tlbdebug.h>
  14. static inline const char *msk2str(unsigned int mask)
  15. {
  16. switch (mask) {
  17. case PM_4K: return "4kb";
  18. case PM_16K: return "16kb";
  19. case PM_64K: return "64kb";
  20. case PM_256K: return "256kb";
  21. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  22. case PM_8K: return "8kb";
  23. case PM_32K: return "32kb";
  24. case PM_128K: return "128kb";
  25. case PM_512K: return "512kb";
  26. case PM_2M: return "2Mb";
  27. case PM_8M: return "8Mb";
  28. case PM_32M: return "32Mb";
  29. #endif
  30. #ifndef CONFIG_CPU_VR41XX
  31. case PM_1M: return "1Mb";
  32. case PM_4M: return "4Mb";
  33. case PM_16M: return "16Mb";
  34. case PM_64M: return "64Mb";
  35. case PM_256M: return "256Mb";
  36. case PM_1G: return "1Gb";
  37. #endif
  38. }
  39. return "";
  40. }
  41. static void dump_tlb(int first, int last)
  42. {
  43. unsigned long s_entryhi, entryhi, asid;
  44. unsigned long long entrylo0, entrylo1, pa;
  45. unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
  46. #ifdef CONFIG_32BIT
  47. bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
  48. int pwidth = xpa ? 11 : 8;
  49. int vwidth = 8;
  50. #else
  51. bool xpa = false;
  52. int pwidth = 11;
  53. int vwidth = 11;
  54. #endif
  55. s_pagemask = read_c0_pagemask();
  56. s_entryhi = read_c0_entryhi();
  57. s_index = read_c0_index();
  58. asid = s_entryhi & 0xff;
  59. for (i = first; i <= last; i++) {
  60. write_c0_index(i);
  61. mtc0_tlbr_hazard();
  62. tlb_read();
  63. tlb_read_hazard();
  64. pagemask = read_c0_pagemask();
  65. entryhi = read_c0_entryhi();
  66. entrylo0 = read_c0_entrylo0();
  67. entrylo1 = read_c0_entrylo1();
  68. /* EHINV bit marks entire entry as invalid */
  69. if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
  70. continue;
  71. /*
  72. * Prior to tlbinv, unused entries have a virtual address of
  73. * CKSEG0.
  74. */
  75. if ((entryhi & ~0x1ffffUL) == CKSEG0)
  76. continue;
  77. /*
  78. * ASID takes effect in absence of G (global) bit.
  79. * We check both G bits, even though architecturally they should
  80. * match one another, because some revisions of the SB1 core may
  81. * leave only a single G bit set after a machine check exception
  82. * due to duplicate TLB entry.
  83. */
  84. if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) &&
  85. (entryhi & 0xff) != asid)
  86. continue;
  87. /*
  88. * Only print entries in use
  89. */
  90. printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
  91. c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
  92. c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
  93. printk("va=%0*lx asid=%02lx\n",
  94. vwidth, (entryhi & ~0x1fffUL),
  95. entryhi & 0xff);
  96. /* RI/XI are in awkward places, so mask them off separately */
  97. pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
  98. if (xpa)
  99. pa |= (unsigned long long)readx_c0_entrylo0() << 30;
  100. pa = (pa << 6) & PAGE_MASK;
  101. printk("\t[");
  102. if (cpu_has_rixi)
  103. printk("ri=%d xi=%d ",
  104. (entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
  105. (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
  106. printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
  107. pwidth, pa, c0,
  108. (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0,
  109. (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0,
  110. (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0);
  111. /* RI/XI are in awkward places, so mask them off separately */
  112. pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
  113. if (xpa)
  114. pa |= (unsigned long long)readx_c0_entrylo1() << 30;
  115. pa = (pa << 6) & PAGE_MASK;
  116. if (cpu_has_rixi)
  117. printk("ri=%d xi=%d ",
  118. (entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
  119. (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
  120. printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
  121. pwidth, pa, c1,
  122. (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0,
  123. (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0,
  124. (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0);
  125. }
  126. printk("\n");
  127. write_c0_entryhi(s_entryhi);
  128. write_c0_index(s_index);
  129. write_c0_pagemask(s_pagemask);
  130. }
  131. void dump_tlb_all(void)
  132. {
  133. dump_tlb(0, current_cpu_data.tlbsize - 1);
  134. }