clear_page.S 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /* clear_page.S: UltraSparc optimized clear page.
  3. *
  4. * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com)
  5. * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
  6. */
  7. #include <asm/visasm.h>
  8. #include <asm/thread_info.h>
  9. #include <asm/page.h>
  10. #include <asm/pgtable.h>
  11. #include <asm/spitfire.h>
  12. #include <asm/head.h>
  13. #include <asm/export.h>
  14. /* What we used to do was lock a TLB entry into a specific
  15. * TLB slot, clear the page with interrupts disabled, then
  16. * restore the original TLB entry. This was great for
  17. * disturbing the TLB as little as possible, but it meant
  18. * we had to keep interrupts disabled for a long time.
  19. *
  20. * Now, we simply use the normal TLB loading mechanism,
  21. * and this makes the cpu choose a slot all by itself.
  22. * Then we do a normal TLB flush on exit. We need only
  23. * disable preemption during the clear.
  24. */
  25. .text
  26. .globl _clear_page
  27. EXPORT_SYMBOL(_clear_page)
  28. _clear_page: /* %o0=dest */
  29. ba,pt %xcc, clear_page_common
  30. clr %o4
  31. /* This thing is pretty important, it shows up
  32. * on the profiles via do_anonymous_page().
  33. */
  34. .align 32
  35. .globl clear_user_page
  36. EXPORT_SYMBOL(clear_user_page)
  37. clear_user_page: /* %o0=dest, %o1=vaddr */
  38. lduw [%g6 + TI_PRE_COUNT], %o2
  39. sethi %hi(PAGE_OFFSET), %g2
  40. sethi %hi(PAGE_SIZE), %o4
  41. ldx [%g2 + %lo(PAGE_OFFSET)], %g2
  42. sethi %hi(PAGE_KERNEL_LOCKED), %g3
  43. ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
  44. sub %o0, %g2, %g1 ! paddr
  45. and %o1, %o4, %o0 ! vaddr D-cache alias bit
  46. or %g1, %g3, %g1 ! TTE data
  47. sethi %hi(TLBTEMP_BASE), %o3
  48. add %o2, 1, %o4
  49. add %o0, %o3, %o0 ! TTE vaddr
  50. /* Disable preemption. */
  51. mov TLB_TAG_ACCESS, %g3
  52. stw %o4, [%g6 + TI_PRE_COUNT]
  53. /* Load TLB entry. */
  54. rdpr %pstate, %o4
  55. wrpr %o4, PSTATE_IE, %pstate
  56. stxa %o0, [%g3] ASI_DMMU
  57. stxa %g1, [%g0] ASI_DTLB_DATA_IN
  58. sethi %hi(KERNBASE), %g1
  59. flush %g1
  60. wrpr %o4, 0x0, %pstate
  61. mov 1, %o4
  62. clear_page_common:
  63. VISEntryHalf
  64. membar #StoreLoad | #StoreStore | #LoadStore
  65. fzero %f0
  66. sethi %hi(PAGE_SIZE/64), %o1
  67. mov %o0, %g1 ! remember vaddr for tlbflush
  68. fzero %f2
  69. or %o1, %lo(PAGE_SIZE/64), %o1
  70. faddd %f0, %f2, %f4
  71. fmuld %f0, %f2, %f6
  72. faddd %f0, %f2, %f8
  73. fmuld %f0, %f2, %f10
  74. faddd %f0, %f2, %f12
  75. fmuld %f0, %f2, %f14
  76. 1: stda %f0, [%o0 + %g0] ASI_BLK_P
  77. subcc %o1, 1, %o1
  78. bne,pt %icc, 1b
  79. add %o0, 0x40, %o0
  80. membar #Sync
  81. VISExitHalf
  82. brz,pn %o4, out
  83. nop
  84. stxa %g0, [%g1] ASI_DMMU_DEMAP
  85. membar #Sync
  86. stw %o2, [%g6 + TI_PRE_COUNT]
  87. out: retl
  88. nop