coherency_ll.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * Coherency fabric: low level functions
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. *
  12. * This file implements the assembly function to add a CPU to the
  13. * coherency fabric. This function is called by each of the secondary
  14. * CPUs during their early boot in an SMP kernel, this why this
  15. * function have to callable from assembly. It can also be called by a
  16. * primary CPU from C code during its boot.
  17. */
  18. #include <linux/linkage.h>
  19. #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
  20. #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
  21. #include <asm/assembler.h>
  22. #include <asm/cp15.h>
  23. .text
  24. /*
  25. * Returns the coherency base address in r1 (r0 is untouched), or 0 if
  26. * the coherency fabric is not enabled.
  27. */
  28. ENTRY(ll_get_coherency_base)
  29. mrc p15, 0, r1, c1, c0, 0
  30. tst r1, #CR_M @ Check MMU bit enabled
  31. bne 1f
  32. /*
  33. * MMU is disabled, use the physical address of the coherency
  34. * base address. However, if the coherency fabric isn't mapped
  35. * (i.e its virtual address is zero), it means coherency is
  36. * not enabled, so we return 0.
  37. */
  38. ldr r1, =coherency_base
  39. cmp r1, #0
  40. beq 2f
  41. adr r1, 3f
  42. ldr r3, [r1]
  43. ldr r1, [r1, r3]
  44. b 2f
  45. 1:
  46. /*
  47. * MMU is enabled, use the virtual address of the coherency
  48. * base address.
  49. */
  50. ldr r1, =coherency_base
  51. ldr r1, [r1]
  52. 2:
  53. ret lr
  54. ENDPROC(ll_get_coherency_base)
  55. /*
  56. * Returns the coherency CPU mask in r3 (r0 is untouched). This
  57. * coherency CPU mask can be used with the coherency fabric
  58. * configuration and control registers. Note that the mask is already
  59. * endian-swapped as appropriate so that the calling functions do not
  60. * have to care about endianness issues while accessing the coherency
  61. * fabric registers
  62. */
  63. ENTRY(ll_get_coherency_cpumask)
  64. mrc 15, 0, r3, cr0, cr0, 5
  65. and r3, r3, #15
  66. mov r2, #(1 << 24)
  67. lsl r3, r2, r3
  68. ARM_BE8(rev r3, r3)
  69. ret lr
  70. ENDPROC(ll_get_coherency_cpumask)
  71. /*
  72. * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
  73. * ll_disable_coherency() use the strex/ldrex instructions while the
  74. * MMU can be disabled. The Armada XP SoC has an exclusive monitor
  75. * that tracks transactions to Device and/or SO memory and thanks to
  76. * that, exclusive transactions are functional even when the MMU is
  77. * disabled.
  78. */
  79. ENTRY(ll_add_cpu_to_smp_group)
  80. /*
  81. * As r0 is not modified by ll_get_coherency_base() and
  82. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  83. * and avoid it being modified by the branch and link
  84. * calls. This function is used very early in the secondary
  85. * CPU boot, and no stack is available at this point.
  86. */
  87. mov r0, lr
  88. bl ll_get_coherency_base
  89. /* Bail out if the coherency is not enabled */
  90. cmp r1, #0
  91. reteq r0
  92. bl ll_get_coherency_cpumask
  93. mov lr, r0
  94. add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
  95. 1:
  96. ldrex r2, [r0]
  97. orr r2, r2, r3
  98. strex r1, r2, [r0]
  99. cmp r1, #0
  100. bne 1b
  101. ret lr
  102. ENDPROC(ll_add_cpu_to_smp_group)
  103. ENTRY(ll_enable_coherency)
  104. /*
  105. * As r0 is not modified by ll_get_coherency_base() and
  106. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  107. * and avoid it being modified by the branch and link
  108. * calls. This function is used very early in the secondary
  109. * CPU boot, and no stack is available at this point.
  110. */
  111. mov r0, lr
  112. bl ll_get_coherency_base
  113. /* Bail out if the coherency is not enabled */
  114. cmp r1, #0
  115. reteq r0
  116. bl ll_get_coherency_cpumask
  117. mov lr, r0
  118. add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
  119. 1:
  120. ldrex r2, [r0]
  121. orr r2, r2, r3
  122. strex r1, r2, [r0]
  123. cmp r1, #0
  124. bne 1b
  125. dsb
  126. mov r0, #0
  127. ret lr
  128. ENDPROC(ll_enable_coherency)
  129. ENTRY(ll_disable_coherency)
  130. /*
  131. * As r0 is not modified by ll_get_coherency_base() and
  132. * ll_get_coherency_cpumask(), we use it to temporarly save lr
  133. * and avoid it being modified by the branch and link
  134. * calls. This function is used very early in the secondary
  135. * CPU boot, and no stack is available at this point.
  136. */
  137. mov r0, lr
  138. bl ll_get_coherency_base
  139. /* Bail out if the coherency is not enabled */
  140. cmp r1, #0
  141. reteq r0
  142. bl ll_get_coherency_cpumask
  143. mov lr, r0
  144. add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
  145. 1:
  146. ldrex r2, [r0]
  147. bic r2, r2, r3
  148. strex r1, r2, [r0]
  149. cmp r1, #0
  150. bne 1b
  151. dsb
  152. ret lr
  153. ENDPROC(ll_disable_coherency)
  154. .align 2
  155. 3:
  156. .long coherency_phys_base - .