ev6-memset.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * arch/alpha/lib/ev6-memset.S
  3. *
  4. * This is an efficient (and relatively small) implementation of the C library
  5. * "memset()" function for the 21264 implementation of Alpha.
  6. *
  7. * 21264 version contributed by Rick Gorton <rick.gorton@alpha-processor.com>
  8. *
  9. * Much of the information about 21264 scheduling/coding comes from:
  10. * Compiler Writer's Guide for the Alpha 21264
  11. * abbreviated as 'CWG' in other comments here
  12. * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html
  13. * Scheduling notation:
  14. * E - either cluster
  15. * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
  16. * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
  17. * The algorithm for the leading and trailing quadwords remains the same,
  18. * however the loop has been unrolled to enable better memory throughput,
  19. * and the code has been replicated for each of the entry points: __memset
  20. * and __memsetw to permit better scheduling to eliminate the stalling
  21. * encountered during the mask replication.
  22. * A future enhancement might be to put in a byte store loop for really
  23. * small (say < 32 bytes) memset()s. Whether or not that change would be
  24. * a win in the kernel would depend upon the contextual usage.
  25. * WARNING: Maintaining this is going to be more work than the above version,
  26. * as fixes will need to be made in multiple places. The performance gain
  27. * is worth it.
  28. */
  29. #include <asm/export.h>
  30. .set noat
  31. .set noreorder
  32. .text
  33. .globl memset
  34. .globl __memset
  35. .globl ___memset
  36. .globl __memsetw
  37. .globl __constant_c_memset
  38. .ent ___memset
  39. .align 5
  40. ___memset:
  41. .frame $30,0,$26,0
  42. .prologue 0
  43. /*
  44. * Serious stalling happens. The only way to mitigate this is to
  45. * undertake a major re-write to interleave the constant materialization
  46. * with other parts of the fall-through code. This is important, even
  47. * though it makes maintenance tougher.
  48. * Do this later.
  49. */
  50. and $17,255,$1 # E : 00000000000000ch
  51. insbl $17,1,$2 # U : 000000000000ch00
  52. bis $16,$16,$0 # E : return value
  53. ble $18,end_b # U : zero length requested?
  54. addq $18,$16,$6 # E : max address to write to
  55. bis $1,$2,$17 # E : 000000000000chch
  56. insbl $1,2,$3 # U : 0000000000ch0000
  57. insbl $1,3,$4 # U : 00000000ch000000
  58. or $3,$4,$3 # E : 00000000chch0000
  59. inswl $17,4,$5 # U : 0000chch00000000
  60. xor $16,$6,$1 # E : will complete write be within one quadword?
  61. inswl $17,6,$2 # U : chch000000000000
  62. or $17,$3,$17 # E : 00000000chchchch
  63. or $2,$5,$2 # E : chchchch00000000
  64. bic $1,7,$1 # E : fit within a single quadword?
  65. and $16,7,$3 # E : Target addr misalignment
  66. or $17,$2,$17 # E : chchchchchchchch
  67. beq $1,within_quad_b # U :
  68. nop # E :
  69. beq $3,aligned_b # U : target is 0mod8
  70. /*
  71. * Target address is misaligned, and won't fit within a quadword
  72. */
  73. ldq_u $4,0($16) # L : Fetch first partial
  74. bis $16,$16,$5 # E : Save the address
  75. insql $17,$16,$2 # U : Insert new bytes
  76. subq $3,8,$3 # E : Invert (for addressing uses)
  77. addq $18,$3,$18 # E : $18 is new count ($3 is negative)
  78. mskql $4,$16,$4 # U : clear relevant parts of the quad
  79. subq $16,$3,$16 # E : $16 is new aligned destination
  80. bis $2,$4,$1 # E : Final bytes
  81. nop
  82. stq_u $1,0($5) # L : Store result
  83. nop
  84. nop
  85. .align 4
  86. aligned_b:
  87. /*
  88. * We are now guaranteed to be quad aligned, with at least
  89. * one partial quad to write.
  90. */
  91. sra $18,3,$3 # U : Number of remaining quads to write
  92. and $18,7,$18 # E : Number of trailing bytes to write
  93. bis $16,$16,$5 # E : Save dest address
  94. beq $3,no_quad_b # U : tail stuff only
  95. /*
  96. * it's worth the effort to unroll this and use wh64 if possible
  97. * Lifted a bunch of code from clear_user.S
  98. * At this point, entry values are:
  99. * $16 Current destination address
  100. * $5 A copy of $16
  101. * $6 The max quadword address to write to
  102. * $18 Number trailer bytes
  103. * $3 Number quads to write
  104. */
  105. and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
  106. subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
  107. subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
  108. blt $4, loop_b # U :
  109. /*
  110. * We know we've got at least 16 quads, minimum of one trip
  111. * through unrolled loop. Do a quad at a time to get us 0mod64
  112. * aligned.
  113. */
  114. nop # E :
  115. nop # E :
  116. nop # E :
  117. beq $1, $bigalign_b # U :
  118. $alignmod64_b:
  119. stq $17, 0($5) # L :
  120. subq $3, 1, $3 # E : For consistency later
  121. addq $1, 8, $1 # E : Increment towards zero for alignment
  122. addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
  123. nop
  124. nop
  125. addq $5, 8, $5 # E : Inc address
  126. blt $1, $alignmod64_b # U :
  127. $bigalign_b:
  128. /*
  129. * $3 - number quads left to go
  130. * $5 - target address (aligned 0mod64)
  131. * $17 - mask of stuff to store
  132. * Scratch registers available: $7, $2, $4, $1
  133. * we know that we'll be taking a minimum of one trip through
  134. * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  135. * Assumes the wh64 needs to be for 2 trips through the loop in the future
  136. * The wh64 is issued on for the starting destination address for trip +2
  137. * through the loop, and if there are less than two trips left, the target
  138. * address will be for the current trip.
  139. */
  140. $do_wh64_b:
  141. wh64 ($4) # L1 : memory subsystem write hint
  142. subq $3, 24, $2 # E : For determining future wh64 addresses
  143. stq $17, 0($5) # L :
  144. nop # E :
  145. addq $5, 128, $4 # E : speculative target of next wh64
  146. stq $17, 8($5) # L :
  147. stq $17, 16($5) # L :
  148. addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
  149. stq $17, 24($5) # L :
  150. stq $17, 32($5) # L :
  151. cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
  152. nop
  153. stq $17, 40($5) # L :
  154. stq $17, 48($5) # L :
  155. subq $3, 16, $2 # E : Repeat the loop at least once more?
  156. nop
  157. stq $17, 56($5) # L :
  158. addq $5, 64, $5 # E :
  159. subq $3, 8, $3 # E :
  160. bge $2, $do_wh64_b # U :
  161. nop
  162. nop
  163. nop
  164. beq $3, no_quad_b # U : Might have finished already
  165. .align 4
  166. /*
  167. * Simple loop for trailing quadwords, or for small amounts
  168. * of data (where we can't use an unrolled loop and wh64)
  169. */
  170. loop_b:
  171. stq $17,0($5) # L :
  172. subq $3,1,$3 # E : Decrement number quads left
  173. addq $5,8,$5 # E : Inc address
  174. bne $3,loop_b # U : more?
  175. no_quad_b:
  176. /*
  177. * Write 0..7 trailing bytes.
  178. */
  179. nop # E :
  180. beq $18,end_b # U : All done?
  181. ldq $7,0($5) # L :
  182. mskqh $7,$6,$2 # U : Mask final quad
  183. insqh $17,$6,$4 # U : New bits
  184. bis $2,$4,$1 # E : Put it all together
  185. stq $1,0($5) # L : And back to memory
  186. ret $31,($26),1 # L0 :
  187. within_quad_b:
  188. ldq_u $1,0($16) # L :
  189. insql $17,$16,$2 # U : New bits
  190. mskql $1,$16,$4 # U : Clear old
  191. bis $2,$4,$2 # E : New result
  192. mskql $2,$6,$4 # U :
  193. mskqh $1,$6,$2 # U :
  194. bis $2,$4,$1 # E :
  195. stq_u $1,0($16) # L :
  196. end_b:
  197. nop
  198. nop
  199. nop
  200. ret $31,($26),1 # L0 :
  201. .end ___memset
  202. EXPORT_SYMBOL(___memset)
  203. /*
  204. * This is the original body of code, prior to replication and
  205. * rescheduling. Leave it here, as there may be calls to this
  206. * entry point.
  207. */
  208. .align 4
  209. .ent __constant_c_memset
  210. __constant_c_memset:
  211. .frame $30,0,$26,0
  212. .prologue 0
  213. addq $18,$16,$6 # E : max address to write to
  214. bis $16,$16,$0 # E : return value
  215. xor $16,$6,$1 # E : will complete write be within one quadword?
  216. ble $18,end # U : zero length requested?
  217. bic $1,7,$1 # E : fit within a single quadword
  218. beq $1,within_one_quad # U :
  219. and $16,7,$3 # E : Target addr misalignment
  220. beq $3,aligned # U : target is 0mod8
  221. /*
  222. * Target address is misaligned, and won't fit within a quadword
  223. */
  224. ldq_u $4,0($16) # L : Fetch first partial
  225. bis $16,$16,$5 # E : Save the address
  226. insql $17,$16,$2 # U : Insert new bytes
  227. subq $3,8,$3 # E : Invert (for addressing uses)
  228. addq $18,$3,$18 # E : $18 is new count ($3 is negative)
  229. mskql $4,$16,$4 # U : clear relevant parts of the quad
  230. subq $16,$3,$16 # E : $16 is new aligned destination
  231. bis $2,$4,$1 # E : Final bytes
  232. nop
  233. stq_u $1,0($5) # L : Store result
  234. nop
  235. nop
  236. .align 4
  237. aligned:
  238. /*
  239. * We are now guaranteed to be quad aligned, with at least
  240. * one partial quad to write.
  241. */
  242. sra $18,3,$3 # U : Number of remaining quads to write
  243. and $18,7,$18 # E : Number of trailing bytes to write
  244. bis $16,$16,$5 # E : Save dest address
  245. beq $3,no_quad # U : tail stuff only
  246. /*
  247. * it's worth the effort to unroll this and use wh64 if possible
  248. * Lifted a bunch of code from clear_user.S
  249. * At this point, entry values are:
  250. * $16 Current destination address
  251. * $5 A copy of $16
  252. * $6 The max quadword address to write to
  253. * $18 Number trailer bytes
  254. * $3 Number quads to write
  255. */
  256. and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
  257. subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
  258. subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
  259. blt $4, loop # U :
  260. /*
  261. * We know we've got at least 16 quads, minimum of one trip
  262. * through unrolled loop. Do a quad at a time to get us 0mod64
  263. * aligned.
  264. */
  265. nop # E :
  266. nop # E :
  267. nop # E :
  268. beq $1, $bigalign # U :
  269. $alignmod64:
  270. stq $17, 0($5) # L :
  271. subq $3, 1, $3 # E : For consistency later
  272. addq $1, 8, $1 # E : Increment towards zero for alignment
  273. addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
  274. nop
  275. nop
  276. addq $5, 8, $5 # E : Inc address
  277. blt $1, $alignmod64 # U :
  278. $bigalign:
  279. /*
  280. * $3 - number quads left to go
  281. * $5 - target address (aligned 0mod64)
  282. * $17 - mask of stuff to store
  283. * Scratch registers available: $7, $2, $4, $1
  284. * we know that we'll be taking a minimum of one trip through
  285. * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  286. * Assumes the wh64 needs to be for 2 trips through the loop in the future
  287. * The wh64 is issued on for the starting destination address for trip +2
  288. * through the loop, and if there are less than two trips left, the target
  289. * address will be for the current trip.
  290. */
  291. $do_wh64:
  292. wh64 ($4) # L1 : memory subsystem write hint
  293. subq $3, 24, $2 # E : For determining future wh64 addresses
  294. stq $17, 0($5) # L :
  295. nop # E :
  296. addq $5, 128, $4 # E : speculative target of next wh64
  297. stq $17, 8($5) # L :
  298. stq $17, 16($5) # L :
  299. addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
  300. stq $17, 24($5) # L :
  301. stq $17, 32($5) # L :
  302. cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
  303. nop
  304. stq $17, 40($5) # L :
  305. stq $17, 48($5) # L :
  306. subq $3, 16, $2 # E : Repeat the loop at least once more?
  307. nop
  308. stq $17, 56($5) # L :
  309. addq $5, 64, $5 # E :
  310. subq $3, 8, $3 # E :
  311. bge $2, $do_wh64 # U :
  312. nop
  313. nop
  314. nop
  315. beq $3, no_quad # U : Might have finished already
  316. .align 4
  317. /*
  318. * Simple loop for trailing quadwords, or for small amounts
  319. * of data (where we can't use an unrolled loop and wh64)
  320. */
  321. loop:
  322. stq $17,0($5) # L :
  323. subq $3,1,$3 # E : Decrement number quads left
  324. addq $5,8,$5 # E : Inc address
  325. bne $3,loop # U : more?
  326. no_quad:
  327. /*
  328. * Write 0..7 trailing bytes.
  329. */
  330. nop # E :
  331. beq $18,end # U : All done?
  332. ldq $7,0($5) # L :
  333. mskqh $7,$6,$2 # U : Mask final quad
  334. insqh $17,$6,$4 # U : New bits
  335. bis $2,$4,$1 # E : Put it all together
  336. stq $1,0($5) # L : And back to memory
  337. ret $31,($26),1 # L0 :
  338. within_one_quad:
  339. ldq_u $1,0($16) # L :
  340. insql $17,$16,$2 # U : New bits
  341. mskql $1,$16,$4 # U : Clear old
  342. bis $2,$4,$2 # E : New result
  343. mskql $2,$6,$4 # U :
  344. mskqh $1,$6,$2 # U :
  345. bis $2,$4,$1 # E :
  346. stq_u $1,0($16) # L :
  347. end:
  348. nop
  349. nop
  350. nop
  351. ret $31,($26),1 # L0 :
  352. .end __constant_c_memset
  353. EXPORT_SYMBOL(__constant_c_memset)
  354. /*
  355. * This is a replicant of the __constant_c_memset code, rescheduled
  356. * to mask stalls. Note that entry point names also had to change
  357. */
  358. .align 5
  359. .ent __memsetw
  360. __memsetw:
  361. .frame $30,0,$26,0
  362. .prologue 0
  363. inswl $17,0,$5 # U : 000000000000c1c2
  364. inswl $17,2,$2 # U : 00000000c1c20000
  365. bis $16,$16,$0 # E : return value
  366. addq $18,$16,$6 # E : max address to write to
  367. ble $18, end_w # U : zero length requested?
  368. inswl $17,4,$3 # U : 0000c1c200000000
  369. inswl $17,6,$4 # U : c1c2000000000000
  370. xor $16,$6,$1 # E : will complete write be within one quadword?
  371. or $2,$5,$2 # E : 00000000c1c2c1c2
  372. or $3,$4,$17 # E : c1c2c1c200000000
  373. bic $1,7,$1 # E : fit within a single quadword
  374. and $16,7,$3 # E : Target addr misalignment
  375. or $17,$2,$17 # E : c1c2c1c2c1c2c1c2
  376. beq $1,within_quad_w # U :
  377. nop
  378. beq $3,aligned_w # U : target is 0mod8
  379. /*
  380. * Target address is misaligned, and won't fit within a quadword
  381. */
  382. ldq_u $4,0($16) # L : Fetch first partial
  383. bis $16,$16,$5 # E : Save the address
  384. insql $17,$16,$2 # U : Insert new bytes
  385. subq $3,8,$3 # E : Invert (for addressing uses)
  386. addq $18,$3,$18 # E : $18 is new count ($3 is negative)
  387. mskql $4,$16,$4 # U : clear relevant parts of the quad
  388. subq $16,$3,$16 # E : $16 is new aligned destination
  389. bis $2,$4,$1 # E : Final bytes
  390. nop
  391. stq_u $1,0($5) # L : Store result
  392. nop
  393. nop
  394. .align 4
  395. aligned_w:
  396. /*
  397. * We are now guaranteed to be quad aligned, with at least
  398. * one partial quad to write.
  399. */
  400. sra $18,3,$3 # U : Number of remaining quads to write
  401. and $18,7,$18 # E : Number of trailing bytes to write
  402. bis $16,$16,$5 # E : Save dest address
  403. beq $3,no_quad_w # U : tail stuff only
  404. /*
  405. * it's worth the effort to unroll this and use wh64 if possible
  406. * Lifted a bunch of code from clear_user.S
  407. * At this point, entry values are:
  408. * $16 Current destination address
  409. * $5 A copy of $16
  410. * $6 The max quadword address to write to
  411. * $18 Number trailer bytes
  412. * $3 Number quads to write
  413. */
  414. and $16, 0x3f, $2 # E : Forward work (only useful for unrolled loop)
  415. subq $3, 16, $4 # E : Only try to unroll if > 128 bytes
  416. subq $2, 0x40, $1 # E : bias counter (aligning stuff 0mod64)
  417. blt $4, loop_w # U :
  418. /*
  419. * We know we've got at least 16 quads, minimum of one trip
  420. * through unrolled loop. Do a quad at a time to get us 0mod64
  421. * aligned.
  422. */
  423. nop # E :
  424. nop # E :
  425. nop # E :
  426. beq $1, $bigalign_w # U :
  427. $alignmod64_w:
  428. stq $17, 0($5) # L :
  429. subq $3, 1, $3 # E : For consistency later
  430. addq $1, 8, $1 # E : Increment towards zero for alignment
  431. addq $5, 8, $4 # E : Initial wh64 address (filler instruction)
  432. nop
  433. nop
  434. addq $5, 8, $5 # E : Inc address
  435. blt $1, $alignmod64_w # U :
  436. $bigalign_w:
  437. /*
  438. * $3 - number quads left to go
  439. * $5 - target address (aligned 0mod64)
  440. * $17 - mask of stuff to store
  441. * Scratch registers available: $7, $2, $4, $1
  442. * we know that we'll be taking a minimum of one trip through
  443. * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle
  444. * Assumes the wh64 needs to be for 2 trips through the loop in the future
  445. * The wh64 is issued on for the starting destination address for trip +2
  446. * through the loop, and if there are less than two trips left, the target
  447. * address will be for the current trip.
  448. */
  449. $do_wh64_w:
  450. wh64 ($4) # L1 : memory subsystem write hint
  451. subq $3, 24, $2 # E : For determining future wh64 addresses
  452. stq $17, 0($5) # L :
  453. nop # E :
  454. addq $5, 128, $4 # E : speculative target of next wh64
  455. stq $17, 8($5) # L :
  456. stq $17, 16($5) # L :
  457. addq $5, 64, $7 # E : Fallback address for wh64 (== next trip addr)
  458. stq $17, 24($5) # L :
  459. stq $17, 32($5) # L :
  460. cmovlt $2, $7, $4 # E : Latency 2, extra mapping cycle
  461. nop
  462. stq $17, 40($5) # L :
  463. stq $17, 48($5) # L :
  464. subq $3, 16, $2 # E : Repeat the loop at least once more?
  465. nop
  466. stq $17, 56($5) # L :
  467. addq $5, 64, $5 # E :
  468. subq $3, 8, $3 # E :
  469. bge $2, $do_wh64_w # U :
  470. nop
  471. nop
  472. nop
  473. beq $3, no_quad_w # U : Might have finished already
  474. .align 4
  475. /*
  476. * Simple loop for trailing quadwords, or for small amounts
  477. * of data (where we can't use an unrolled loop and wh64)
  478. */
  479. loop_w:
  480. stq $17,0($5) # L :
  481. subq $3,1,$3 # E : Decrement number quads left
  482. addq $5,8,$5 # E : Inc address
  483. bne $3,loop_w # U : more?
  484. no_quad_w:
  485. /*
  486. * Write 0..7 trailing bytes.
  487. */
  488. nop # E :
  489. beq $18,end_w # U : All done?
  490. ldq $7,0($5) # L :
  491. mskqh $7,$6,$2 # U : Mask final quad
  492. insqh $17,$6,$4 # U : New bits
  493. bis $2,$4,$1 # E : Put it all together
  494. stq $1,0($5) # L : And back to memory
  495. ret $31,($26),1 # L0 :
  496. within_quad_w:
  497. ldq_u $1,0($16) # L :
  498. insql $17,$16,$2 # U : New bits
  499. mskql $1,$16,$4 # U : Clear old
  500. bis $2,$4,$2 # E : New result
  501. mskql $2,$6,$4 # U :
  502. mskqh $1,$6,$2 # U :
  503. bis $2,$4,$1 # E :
  504. stq_u $1,0($16) # L :
  505. end_w:
  506. nop
  507. nop
  508. nop
  509. ret $31,($26),1 # L0 :
  510. .end __memsetw
  511. EXPORT_SYMBOL(__memsetw)
  512. memset = ___memset
  513. __memset = ___memset
  514. EXPORT_SYMBOL(memset)
  515. EXPORT_SYMBOL(__memset)