r4kcache.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Inline assembly cache operations.
  7. *
  8. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9. * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
  10. * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
  11. */
  12. #ifndef _ASM_R4KCACHE_H
  13. #define _ASM_R4KCACHE_H
  14. #include <linux/stringify.h>
  15. #include <asm/asm.h>
  16. #include <asm/cacheops.h>
  17. #include <asm/compiler.h>
  18. #include <asm/cpu-features.h>
  19. #include <asm/cpu-type.h>
  20. #include <asm/mipsmtregs.h>
  21. #include <asm/mmzone.h>
  22. #include <linux/uaccess.h> /* for uaccess_kernel() */
  23. extern void (*r4k_blast_dcache)(void);
  24. extern void (*r4k_blast_icache)(void);
  25. /*
  26. * This macro return a properly sign-extended address suitable as base address
  27. * for indexed cache operations. Two issues here:
  28. *
  29. * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
  30. * the index bits from the virtual address. This breaks with tradition
  31. * set by the R4000. To keep unpleasant surprises from happening we pick
  32. * an address in KSEG0 / CKSEG0.
  33. * - We need a properly sign extended address for 64-bit code. To get away
  34. * without ifdefs we let the compiler do it by a type cast.
  35. */
  36. #define INDEX_BASE CKSEG0
  37. #define cache_op(op,addr) \
  38. __asm__ __volatile__( \
  39. " .set push \n" \
  40. " .set noreorder \n" \
  41. " .set "MIPS_ISA_ARCH_LEVEL" \n" \
  42. " cache %0, %1 \n" \
  43. " .set pop \n" \
  44. : \
  45. : "i" (op), "R" (*(unsigned char *)(addr)))
  46. #ifdef CONFIG_MIPS_MT
  47. #define __iflush_prologue \
  48. unsigned long redundance; \
  49. extern int mt_n_iflushes; \
  50. for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
  51. #define __iflush_epilogue \
  52. }
  53. #define __dflush_prologue \
  54. unsigned long redundance; \
  55. extern int mt_n_dflushes; \
  56. for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
  57. #define __dflush_epilogue \
  58. }
  59. #define __inv_dflush_prologue __dflush_prologue
  60. #define __inv_dflush_epilogue __dflush_epilogue
  61. #define __sflush_prologue {
  62. #define __sflush_epilogue }
  63. #define __inv_sflush_prologue __sflush_prologue
  64. #define __inv_sflush_epilogue __sflush_epilogue
  65. #else /* CONFIG_MIPS_MT */
  66. #define __iflush_prologue {
  67. #define __iflush_epilogue }
  68. #define __dflush_prologue {
  69. #define __dflush_epilogue }
  70. #define __inv_dflush_prologue {
  71. #define __inv_dflush_epilogue }
  72. #define __sflush_prologue {
  73. #define __sflush_epilogue }
  74. #define __inv_sflush_prologue {
  75. #define __inv_sflush_epilogue }
  76. #endif /* CONFIG_MIPS_MT */
  77. static inline void flush_icache_line_indexed(unsigned long addr)
  78. {
  79. __iflush_prologue
  80. cache_op(Index_Invalidate_I, addr);
  81. __iflush_epilogue
  82. }
  83. static inline void flush_dcache_line_indexed(unsigned long addr)
  84. {
  85. __dflush_prologue
  86. cache_op(Index_Writeback_Inv_D, addr);
  87. __dflush_epilogue
  88. }
  89. static inline void flush_scache_line_indexed(unsigned long addr)
  90. {
  91. cache_op(Index_Writeback_Inv_SD, addr);
  92. }
  93. static inline void flush_icache_line(unsigned long addr)
  94. {
  95. __iflush_prologue
  96. switch (boot_cpu_type()) {
  97. case CPU_LOONGSON2:
  98. cache_op(Hit_Invalidate_I_Loongson2, addr);
  99. break;
  100. default:
  101. cache_op(Hit_Invalidate_I, addr);
  102. break;
  103. }
  104. __iflush_epilogue
  105. }
  106. static inline void flush_dcache_line(unsigned long addr)
  107. {
  108. __dflush_prologue
  109. cache_op(Hit_Writeback_Inv_D, addr);
  110. __dflush_epilogue
  111. }
  112. static inline void invalidate_dcache_line(unsigned long addr)
  113. {
  114. __dflush_prologue
  115. cache_op(Hit_Invalidate_D, addr);
  116. __dflush_epilogue
  117. }
  118. static inline void invalidate_scache_line(unsigned long addr)
  119. {
  120. cache_op(Hit_Invalidate_SD, addr);
  121. }
  122. static inline void flush_scache_line(unsigned long addr)
  123. {
  124. cache_op(Hit_Writeback_Inv_SD, addr);
  125. }
  126. #define protected_cache_op(op,addr) \
  127. ({ \
  128. int __err = 0; \
  129. __asm__ __volatile__( \
  130. " .set push \n" \
  131. " .set noreorder \n" \
  132. " .set "MIPS_ISA_ARCH_LEVEL" \n" \
  133. "1: cache %1, (%2) \n" \
  134. "2: .insn \n" \
  135. " .set pop \n" \
  136. " .section .fixup,\"ax\" \n" \
  137. "3: li %0, %3 \n" \
  138. " j 2b \n" \
  139. " .previous \n" \
  140. " .section __ex_table,\"a\" \n" \
  141. " "STR(PTR)" 1b, 3b \n" \
  142. " .previous" \
  143. : "+r" (__err) \
  144. : "i" (op), "r" (addr), "i" (-EFAULT)); \
  145. __err; \
  146. })
  147. #define protected_cachee_op(op,addr) \
  148. ({ \
  149. int __err = 0; \
  150. __asm__ __volatile__( \
  151. " .set push \n" \
  152. " .set noreorder \n" \
  153. " .set mips0 \n" \
  154. " .set eva \n" \
  155. "1: cachee %1, (%2) \n" \
  156. "2: .insn \n" \
  157. " .set pop \n" \
  158. " .section .fixup,\"ax\" \n" \
  159. "3: li %0, %3 \n" \
  160. " j 2b \n" \
  161. " .previous \n" \
  162. " .section __ex_table,\"a\" \n" \
  163. " "STR(PTR)" 1b, 3b \n" \
  164. " .previous" \
  165. : "+r" (__err) \
  166. : "i" (op), "r" (addr), "i" (-EFAULT)); \
  167. __err; \
  168. })
  169. /*
  170. * The next two are for badland addresses like signal trampolines.
  171. */
  172. static inline int protected_flush_icache_line(unsigned long addr)
  173. {
  174. switch (boot_cpu_type()) {
  175. case CPU_LOONGSON2:
  176. return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
  177. default:
  178. #ifdef CONFIG_EVA
  179. return protected_cachee_op(Hit_Invalidate_I, addr);
  180. #else
  181. return protected_cache_op(Hit_Invalidate_I, addr);
  182. #endif
  183. }
  184. }
  185. /*
  186. * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
  187. * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
  188. * caches. We're talking about one cacheline unnecessarily getting invalidated
  189. * here so the penalty isn't overly hard.
  190. */
  191. static inline int protected_writeback_dcache_line(unsigned long addr)
  192. {
  193. #ifdef CONFIG_EVA
  194. return protected_cachee_op(Hit_Writeback_Inv_D, addr);
  195. #else
  196. return protected_cache_op(Hit_Writeback_Inv_D, addr);
  197. #endif
  198. }
  199. static inline int protected_writeback_scache_line(unsigned long addr)
  200. {
  201. #ifdef CONFIG_EVA
  202. return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
  203. #else
  204. return protected_cache_op(Hit_Writeback_Inv_SD, addr);
  205. #endif
  206. }
  207. /*
  208. * This one is RM7000-specific
  209. */
  210. static inline void invalidate_tcache_page(unsigned long addr)
  211. {
  212. cache_op(Page_Invalidate_T, addr);
  213. }
  214. #ifndef CONFIG_CPU_MIPSR6
  215. #define cache16_unroll32(base,op) \
  216. __asm__ __volatile__( \
  217. " .set push \n" \
  218. " .set noreorder \n" \
  219. " .set mips3 \n" \
  220. " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
  221. " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
  222. " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
  223. " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
  224. " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
  225. " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
  226. " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
  227. " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
  228. " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
  229. " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
  230. " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
  231. " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
  232. " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
  233. " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
  234. " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
  235. " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
  236. " .set pop \n" \
  237. : \
  238. : "r" (base), \
  239. "i" (op));
  240. #define cache32_unroll32(base,op) \
  241. __asm__ __volatile__( \
  242. " .set push \n" \
  243. " .set noreorder \n" \
  244. " .set mips3 \n" \
  245. " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
  246. " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
  247. " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
  248. " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
  249. " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
  250. " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
  251. " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
  252. " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
  253. " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
  254. " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
  255. " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
  256. " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
  257. " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
  258. " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
  259. " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
  260. " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
  261. " .set pop \n" \
  262. : \
  263. : "r" (base), \
  264. "i" (op));
  265. #define cache64_unroll32(base,op) \
  266. __asm__ __volatile__( \
  267. " .set push \n" \
  268. " .set noreorder \n" \
  269. " .set mips3 \n" \
  270. " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
  271. " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
  272. " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
  273. " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
  274. " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
  275. " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
  276. " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
  277. " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
  278. " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
  279. " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
  280. " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
  281. " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
  282. " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
  283. " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
  284. " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
  285. " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
  286. " .set pop \n" \
  287. : \
  288. : "r" (base), \
  289. "i" (op));
  290. #define cache128_unroll32(base,op) \
  291. __asm__ __volatile__( \
  292. " .set push \n" \
  293. " .set noreorder \n" \
  294. " .set mips3 \n" \
  295. " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
  296. " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
  297. " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
  298. " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
  299. " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
  300. " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
  301. " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
  302. " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
  303. " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
  304. " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
  305. " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
  306. " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
  307. " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
  308. " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
  309. " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
  310. " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
  311. " .set pop \n" \
  312. : \
  313. : "r" (base), \
  314. "i" (op));
  315. #else
  316. /*
  317. * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
  318. * This means we now need to increment the base register before we flush
  319. * more cache lines
  320. */
  321. #define cache16_unroll32(base,op) \
  322. __asm__ __volatile__( \
  323. " .set push\n" \
  324. " .set noreorder\n" \
  325. " .set mips64r6\n" \
  326. " .set noat\n" \
  327. " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
  328. " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
  329. " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
  330. " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
  331. " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
  332. " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
  333. " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
  334. " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
  335. " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
  336. " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
  337. " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
  338. " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
  339. " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
  340. " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
  341. " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
  342. " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
  343. " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
  344. " .set pop\n" \
  345. : \
  346. : "r" (base), \
  347. "i" (op));
  348. #define cache32_unroll32(base,op) \
  349. __asm__ __volatile__( \
  350. " .set push\n" \
  351. " .set noreorder\n" \
  352. " .set mips64r6\n" \
  353. " .set noat\n" \
  354. " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
  355. " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
  356. " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
  357. " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
  358. " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
  359. " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
  360. " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
  361. " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
  362. " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
  363. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  364. " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
  365. " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
  366. " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
  367. " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
  368. " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
  369. " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
  370. " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
  371. " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
  372. " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
  373. " .set pop\n" \
  374. : \
  375. : "r" (base), \
  376. "i" (op));
  377. #define cache64_unroll32(base,op) \
  378. __asm__ __volatile__( \
  379. " .set push\n" \
  380. " .set noreorder\n" \
  381. " .set mips64r6\n" \
  382. " .set noat\n" \
  383. " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
  384. " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
  385. " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
  386. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  387. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  388. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  389. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  390. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  391. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  392. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  393. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  394. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  395. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  396. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  397. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  398. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  399. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  400. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  401. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  402. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  403. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  404. " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
  405. " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
  406. " .set pop\n" \
  407. : \
  408. : "r" (base), \
  409. "i" (op));
  410. #define cache128_unroll32(base,op) \
  411. __asm__ __volatile__( \
  412. " .set push\n" \
  413. " .set noreorder\n" \
  414. " .set mips64r6\n" \
  415. " .set noat\n" \
  416. " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
  417. " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
  418. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  419. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  420. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  421. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  422. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  423. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  424. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  425. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  426. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  427. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  428. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  429. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  430. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  431. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  432. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  433. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  434. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  435. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  436. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  437. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  438. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  439. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  440. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  441. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  442. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  443. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  444. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  445. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  446. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  447. " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
  448. " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
  449. " .set pop\n" \
  450. : \
  451. : "r" (base), \
  452. "i" (op));
  453. #endif /* CONFIG_CPU_MIPSR6 */
  454. /*
  455. * Perform the cache operation specified by op using a user mode virtual
  456. * address while in kernel mode.
  457. */
  458. #define cache16_unroll32_user(base,op) \
  459. __asm__ __volatile__( \
  460. " .set push \n" \
  461. " .set noreorder \n" \
  462. " .set mips0 \n" \
  463. " .set eva \n" \
  464. " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
  465. " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
  466. " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
  467. " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
  468. " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
  469. " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
  470. " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
  471. " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
  472. " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
  473. " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
  474. " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
  475. " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
  476. " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
  477. " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
  478. " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
  479. " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
  480. " .set pop \n" \
  481. : \
  482. : "r" (base), \
  483. "i" (op));
  484. #define cache32_unroll32_user(base, op) \
  485. __asm__ __volatile__( \
  486. " .set push \n" \
  487. " .set noreorder \n" \
  488. " .set mips0 \n" \
  489. " .set eva \n" \
  490. " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
  491. " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
  492. " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
  493. " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
  494. " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
  495. " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
  496. " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
  497. " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
  498. " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
  499. " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
  500. " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
  501. " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
  502. " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
  503. " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
  504. " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
  505. " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
  506. " .set pop \n" \
  507. : \
  508. : "r" (base), \
  509. "i" (op));
  510. #define cache64_unroll32_user(base, op) \
  511. __asm__ __volatile__( \
  512. " .set push \n" \
  513. " .set noreorder \n" \
  514. " .set mips0 \n" \
  515. " .set eva \n" \
  516. " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
  517. " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
  518. " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
  519. " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
  520. " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
  521. " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
  522. " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
  523. " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
  524. " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
  525. " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
  526. " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
  527. " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
  528. " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
  529. " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
  530. " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
  531. " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
  532. " .set pop \n" \
  533. : \
  534. : "r" (base), \
  535. "i" (op));
  536. /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
  537. #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
  538. static inline void extra##blast_##pfx##cache##lsize(void) \
  539. { \
  540. unsigned long start = INDEX_BASE; \
  541. unsigned long end = start + current_cpu_data.desc.waysize; \
  542. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  543. unsigned long ws_end = current_cpu_data.desc.ways << \
  544. current_cpu_data.desc.waybit; \
  545. unsigned long ws, addr; \
  546. \
  547. __##pfx##flush_prologue \
  548. \
  549. for (ws = 0; ws < ws_end; ws += ws_inc) \
  550. for (addr = start; addr < end; addr += lsize * 32) \
  551. cache##lsize##_unroll32(addr|ws, indexop); \
  552. \
  553. __##pfx##flush_epilogue \
  554. } \
  555. \
  556. static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
  557. { \
  558. unsigned long start = page; \
  559. unsigned long end = page + PAGE_SIZE; \
  560. \
  561. __##pfx##flush_prologue \
  562. \
  563. do { \
  564. cache##lsize##_unroll32(start, hitop); \
  565. start += lsize * 32; \
  566. } while (start < end); \
  567. \
  568. __##pfx##flush_epilogue \
  569. } \
  570. \
  571. static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
  572. { \
  573. unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
  574. unsigned long start = INDEX_BASE + (page & indexmask); \
  575. unsigned long end = start + PAGE_SIZE; \
  576. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  577. unsigned long ws_end = current_cpu_data.desc.ways << \
  578. current_cpu_data.desc.waybit; \
  579. unsigned long ws, addr; \
  580. \
  581. __##pfx##flush_prologue \
  582. \
  583. for (ws = 0; ws < ws_end; ws += ws_inc) \
  584. for (addr = start; addr < end; addr += lsize * 32) \
  585. cache##lsize##_unroll32(addr|ws, indexop); \
  586. \
  587. __##pfx##flush_epilogue \
  588. }
  589. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
  590. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
  591. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
  592. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
  593. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
  594. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
  595. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
  596. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
  597. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
  598. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
  599. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
  600. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
  601. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
  602. __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
  603. __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
  604. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
  605. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
  606. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
  607. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
  608. #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
  609. static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
  610. { \
  611. unsigned long start = page; \
  612. unsigned long end = page + PAGE_SIZE; \
  613. \
  614. __##pfx##flush_prologue \
  615. \
  616. do { \
  617. cache##lsize##_unroll32_user(start, hitop); \
  618. start += lsize * 32; \
  619. } while (start < end); \
  620. \
  621. __##pfx##flush_epilogue \
  622. }
  623. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  624. 16)
  625. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
  626. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  627. 32)
  628. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
  629. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  630. 64)
  631. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
  632. /* build blast_xxx_range, protected_blast_xxx_range */
  633. #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
  634. static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
  635. unsigned long end) \
  636. { \
  637. unsigned long lsize = cpu_##desc##_line_size(); \
  638. unsigned long addr = start & ~(lsize - 1); \
  639. unsigned long aend = (end - 1) & ~(lsize - 1); \
  640. \
  641. __##pfx##flush_prologue \
  642. \
  643. while (1) { \
  644. prot##cache_op(hitop, addr); \
  645. if (addr == aend) \
  646. break; \
  647. addr += lsize; \
  648. } \
  649. \
  650. __##pfx##flush_epilogue \
  651. }
  652. #ifndef CONFIG_EVA
  653. __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
  654. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
  655. #else
  656. #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
  657. static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
  658. unsigned long end) \
  659. { \
  660. unsigned long lsize = cpu_##desc##_line_size(); \
  661. unsigned long addr = start & ~(lsize - 1); \
  662. unsigned long aend = (end - 1) & ~(lsize - 1); \
  663. \
  664. __##pfx##flush_prologue \
  665. \
  666. if (!uaccess_kernel()) { \
  667. while (1) { \
  668. protected_cachee_op(hitop, addr); \
  669. if (addr == aend) \
  670. break; \
  671. addr += lsize; \
  672. } \
  673. } else { \
  674. while (1) { \
  675. protected_cache_op(hitop, addr); \
  676. if (addr == aend) \
  677. break; \
  678. addr += lsize; \
  679. } \
  680. \
  681. } \
  682. __##pfx##flush_epilogue \
  683. }
  684. __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
  685. __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
  686. #endif
  687. __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
  688. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
  689. protected_, loongson2_)
  690. __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
  691. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
  692. __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
  693. /* blast_inv_dcache_range */
  694. __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
  695. __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
  696. /* Currently, this is very specific to Loongson-3 */
  697. #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
  698. static inline void blast_##pfx##cache##lsize##_node(long node) \
  699. { \
  700. unsigned long start = CAC_BASE | nid_to_addrbase(node); \
  701. unsigned long end = start + current_cpu_data.desc.waysize; \
  702. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  703. unsigned long ws_end = current_cpu_data.desc.ways << \
  704. current_cpu_data.desc.waybit; \
  705. unsigned long ws, addr; \
  706. \
  707. for (ws = 0; ws < ws_end; ws += ws_inc) \
  708. for (addr = start; addr < end; addr += lsize * 32) \
  709. cache##lsize##_unroll32(addr|ws, indexop); \
  710. }
  711. __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
  712. __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
  713. __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
  714. __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
  715. #endif /* _ASM_R4KCACHE_H */