usercopy.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. /*
  2. * User address space access functions.
  3. * The non-inlined parts of asm-metag/uaccess.h are here.
  4. *
  5. * Copyright (C) 2006, Imagination Technologies.
  6. * Copyright (C) 2000, Axis Communications AB.
  7. *
  8. * Written by Hans-Peter Nilsson.
  9. * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
  10. * Modified for Meta by Will Newton.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/uaccess.h>
  14. #include <asm/cache.h> /* def of L1_CACHE_BYTES */
  15. #define USE_RAPF
  16. #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
  17. /* The "double write" in this code is because the Meta will not fault
  18. * immediately unless the memory pipe is forced to by e.g. a data stall or
  19. * another memory op. The second write should be discarded by the write
  20. * combiner so should have virtually no cost.
  21. */
  22. #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  23. asm volatile ( \
  24. COPY \
  25. "1:\n" \
  26. " .section .fixup,\"ax\"\n" \
  27. FIXUP \
  28. " MOVT D1Ar1,#HI(1b)\n" \
  29. " JUMP D1Ar1,#LO(1b)\n" \
  30. " .previous\n" \
  31. " .section __ex_table,\"a\"\n" \
  32. TENTRY \
  33. " .previous\n" \
  34. : "=r" (to), "=r" (from), "=r" (ret) \
  35. : "0" (to), "1" (from), "2" (ret) \
  36. : "D1Ar1", "memory")
  37. #define __asm_copy_to_user_1(to, from, ret) \
  38. __asm_copy_user_cont(to, from, ret, \
  39. " GETB D1Ar1,[%1++]\n" \
  40. " SETB [%0],D1Ar1\n" \
  41. "2: SETB [%0++],D1Ar1\n", \
  42. "3: ADD %2,%2,#1\n", \
  43. " .long 2b,3b\n")
  44. #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  45. __asm_copy_user_cont(to, from, ret, \
  46. " GETW D1Ar1,[%1++]\n" \
  47. " SETW [%0],D1Ar1\n" \
  48. "2: SETW [%0++],D1Ar1\n" COPY, \
  49. "3: ADD %2,%2,#2\n" FIXUP, \
  50. " .long 2b,3b\n" TENTRY)
  51. #define __asm_copy_to_user_2(to, from, ret) \
  52. __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
  53. #define __asm_copy_to_user_3(to, from, ret) \
  54. __asm_copy_to_user_2x_cont(to, from, ret, \
  55. " GETB D1Ar1,[%1++]\n" \
  56. " SETB [%0],D1Ar1\n" \
  57. "4: SETB [%0++],D1Ar1\n", \
  58. "5: ADD %2,%2,#1\n", \
  59. " .long 4b,5b\n")
  60. #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  61. __asm_copy_user_cont(to, from, ret, \
  62. " GETD D1Ar1,[%1++]\n" \
  63. " SETD [%0],D1Ar1\n" \
  64. "2: SETD [%0++],D1Ar1\n" COPY, \
  65. "3: ADD %2,%2,#4\n" FIXUP, \
  66. " .long 2b,3b\n" TENTRY)
  67. #define __asm_copy_to_user_4(to, from, ret) \
  68. __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
  69. #define __asm_copy_to_user_5(to, from, ret) \
  70. __asm_copy_to_user_4x_cont(to, from, ret, \
  71. " GETB D1Ar1,[%1++]\n" \
  72. " SETB [%0],D1Ar1\n" \
  73. "4: SETB [%0++],D1Ar1\n", \
  74. "5: ADD %2,%2,#1\n", \
  75. " .long 4b,5b\n")
  76. #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  77. __asm_copy_to_user_4x_cont(to, from, ret, \
  78. " GETW D1Ar1,[%1++]\n" \
  79. " SETW [%0],D1Ar1\n" \
  80. "4: SETW [%0++],D1Ar1\n" COPY, \
  81. "5: ADD %2,%2,#2\n" FIXUP, \
  82. " .long 4b,5b\n" TENTRY)
  83. #define __asm_copy_to_user_6(to, from, ret) \
  84. __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
  85. #define __asm_copy_to_user_7(to, from, ret) \
  86. __asm_copy_to_user_6x_cont(to, from, ret, \
  87. " GETB D1Ar1,[%1++]\n" \
  88. " SETB [%0],D1Ar1\n" \
  89. "6: SETB [%0++],D1Ar1\n", \
  90. "7: ADD %2,%2,#1\n", \
  91. " .long 6b,7b\n")
  92. #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  93. __asm_copy_to_user_4x_cont(to, from, ret, \
  94. " GETD D1Ar1,[%1++]\n" \
  95. " SETD [%0],D1Ar1\n" \
  96. "4: SETD [%0++],D1Ar1\n" COPY, \
  97. "5: ADD %2,%2,#4\n" FIXUP, \
  98. " .long 4b,5b\n" TENTRY)
  99. #define __asm_copy_to_user_8(to, from, ret) \
  100. __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
  101. #define __asm_copy_to_user_9(to, from, ret) \
  102. __asm_copy_to_user_8x_cont(to, from, ret, \
  103. " GETB D1Ar1,[%1++]\n" \
  104. " SETB [%0],D1Ar1\n" \
  105. "6: SETB [%0++],D1Ar1\n", \
  106. "7: ADD %2,%2,#1\n", \
  107. " .long 6b,7b\n")
  108. #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  109. __asm_copy_to_user_8x_cont(to, from, ret, \
  110. " GETW D1Ar1,[%1++]\n" \
  111. " SETW [%0],D1Ar1\n" \
  112. "6: SETW [%0++],D1Ar1\n" COPY, \
  113. "7: ADD %2,%2,#2\n" FIXUP, \
  114. " .long 6b,7b\n" TENTRY)
  115. #define __asm_copy_to_user_10(to, from, ret) \
  116. __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
  117. #define __asm_copy_to_user_11(to, from, ret) \
  118. __asm_copy_to_user_10x_cont(to, from, ret, \
  119. " GETB D1Ar1,[%1++]\n" \
  120. " SETB [%0],D1Ar1\n" \
  121. "8: SETB [%0++],D1Ar1\n", \
  122. "9: ADD %2,%2,#1\n", \
  123. " .long 8b,9b\n")
  124. #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  125. __asm_copy_to_user_8x_cont(to, from, ret, \
  126. " GETD D1Ar1,[%1++]\n" \
  127. " SETD [%0],D1Ar1\n" \
  128. "6: SETD [%0++],D1Ar1\n" COPY, \
  129. "7: ADD %2,%2,#4\n" FIXUP, \
  130. " .long 6b,7b\n" TENTRY)
  131. #define __asm_copy_to_user_12(to, from, ret) \
  132. __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
  133. #define __asm_copy_to_user_13(to, from, ret) \
  134. __asm_copy_to_user_12x_cont(to, from, ret, \
  135. " GETB D1Ar1,[%1++]\n" \
  136. " SETB [%0],D1Ar1\n" \
  137. "8: SETB [%0++],D1Ar1\n", \
  138. "9: ADD %2,%2,#1\n", \
  139. " .long 8b,9b\n")
  140. #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  141. __asm_copy_to_user_12x_cont(to, from, ret, \
  142. " GETW D1Ar1,[%1++]\n" \
  143. " SETW [%0],D1Ar1\n" \
  144. "8: SETW [%0++],D1Ar1\n" COPY, \
  145. "9: ADD %2,%2,#2\n" FIXUP, \
  146. " .long 8b,9b\n" TENTRY)
  147. #define __asm_copy_to_user_14(to, from, ret) \
  148. __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
  149. #define __asm_copy_to_user_15(to, from, ret) \
  150. __asm_copy_to_user_14x_cont(to, from, ret, \
  151. " GETB D1Ar1,[%1++]\n" \
  152. " SETB [%0],D1Ar1\n" \
  153. "10: SETB [%0++],D1Ar1\n", \
  154. "11: ADD %2,%2,#1\n", \
  155. " .long 10b,11b\n")
  156. #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  157. __asm_copy_to_user_12x_cont(to, from, ret, \
  158. " GETD D1Ar1,[%1++]\n" \
  159. " SETD [%0],D1Ar1\n" \
  160. "8: SETD [%0++],D1Ar1\n" COPY, \
  161. "9: ADD %2,%2,#4\n" FIXUP, \
  162. " .long 8b,9b\n" TENTRY)
  163. #define __asm_copy_to_user_16(to, from, ret) \
  164. __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
  165. #define __asm_copy_to_user_8x64(to, from, ret) \
  166. asm volatile ( \
  167. " GETL D0Ar2,D1Ar1,[%1++]\n" \
  168. " SETL [%0],D0Ar2,D1Ar1\n" \
  169. "2: SETL [%0++],D0Ar2,D1Ar1\n" \
  170. "1:\n" \
  171. " .section .fixup,\"ax\"\n" \
  172. "3: ADD %2,%2,#8\n" \
  173. " MOVT D0Ar2,#HI(1b)\n" \
  174. " JUMP D0Ar2,#LO(1b)\n" \
  175. " .previous\n" \
  176. " .section __ex_table,\"a\"\n" \
  177. " .long 2b,3b\n" \
  178. " .previous\n" \
  179. : "=r" (to), "=r" (from), "=r" (ret) \
  180. : "0" (to), "1" (from), "2" (ret) \
  181. : "D1Ar1", "D0Ar2", "memory")
  182. /*
  183. * optimized copying loop using RAPF when 64 bit aligned
  184. *
  185. * n will be automatically decremented inside the loop
  186. * ret will be left intact. if error occurs we will rewind
  187. * so that the original non optimized code will fill up
  188. * this value correctly.
  189. *
  190. * on fault:
  191. * > n will hold total number of uncopied bytes
  192. *
  193. * > {'to','from'} will be rewind back so that
  194. * the non-optimized code will do the proper fix up
  195. *
  196. * DCACHE drops the cacheline which helps in reducing cache
  197. * pollution.
  198. *
  199. * We introduce an extra SETL at the end of the loop to
  200. * ensure we don't fall off the loop before we catch all
  201. * erros.
  202. *
  203. * NOTICE:
  204. * LSM_STEP in TXSTATUS must be cleared in fix up code.
  205. * since we're using M{S,G}ETL, a fault might happen at
  206. * any address in the middle of M{S,G}ETL causing
  207. * the value of LSM_STEP to be incorrect which can
  208. * cause subsequent use of M{S,G}ET{L,D} to go wrong.
  209. * ie: if LSM_STEP was 1 when a fault occurs, the
  210. * next call to M{S,G}ET{L,D} will skip the first
  211. * copy/getting as it think that the first 1 has already
  212. * been done.
  213. *
  214. */
  215. #define __asm_copy_user_64bit_rapf_loop( \
  216. to, from, ret, n, id, FIXUP) \
  217. asm volatile ( \
  218. ".balign 8\n" \
  219. "MOV RAPF, %1\n" \
  220. "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
  221. "MOV D0Ar6, #0\n" \
  222. "LSR D1Ar5, %3, #6\n" \
  223. "SUB TXRPT, D1Ar5, #2\n" \
  224. "MOV RAPF, %1\n" \
  225. "$Lloop"id":\n" \
  226. "ADD RAPF, %1, #64\n" \
  227. "21:\n" \
  228. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  229. "22:\n" \
  230. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  231. "23:\n" \
  232. "SUB %3, %3, #32\n" \
  233. "24:\n" \
  234. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  235. "25:\n" \
  236. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  237. "26:\n" \
  238. "SUB %3, %3, #32\n" \
  239. "DCACHE [%1+#-64], D0Ar6\n" \
  240. "BR $Lloop"id"\n" \
  241. \
  242. "MOV RAPF, %1\n" \
  243. "27:\n" \
  244. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  245. "28:\n" \
  246. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  247. "29:\n" \
  248. "SUB %3, %3, #32\n" \
  249. "30:\n" \
  250. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  251. "31:\n" \
  252. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  253. "32:\n" \
  254. "SUB %0, %0, #8\n" \
  255. "33:\n" \
  256. "SETL [%0++], D0.7, D1.7\n" \
  257. "SUB %3, %3, #32\n" \
  258. "1:" \
  259. "DCACHE [%1+#-64], D0Ar6\n" \
  260. "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
  261. "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
  262. "GETL D0.5, D1.5, [A0StP+#-24]\n" \
  263. "GETL D0.6, D1.6, [A0StP+#-16]\n" \
  264. "GETL D0.7, D1.7, [A0StP+#-8]\n" \
  265. "SUB A0StP, A0StP, #40\n" \
  266. " .section .fixup,\"ax\"\n" \
  267. "4:\n" \
  268. " ADD %0, %0, #8\n" \
  269. "3:\n" \
  270. " MOV D0Ar2, TXSTATUS\n" \
  271. " MOV D1Ar1, TXSTATUS\n" \
  272. " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
  273. " MOV TXSTATUS, D1Ar1\n" \
  274. FIXUP \
  275. " MOVT D0Ar2,#HI(1b)\n" \
  276. " JUMP D0Ar2,#LO(1b)\n" \
  277. " .previous\n" \
  278. " .section __ex_table,\"a\"\n" \
  279. " .long 21b,3b\n" \
  280. " .long 22b,3b\n" \
  281. " .long 23b,3b\n" \
  282. " .long 24b,3b\n" \
  283. " .long 25b,3b\n" \
  284. " .long 26b,3b\n" \
  285. " .long 27b,3b\n" \
  286. " .long 28b,3b\n" \
  287. " .long 29b,3b\n" \
  288. " .long 30b,3b\n" \
  289. " .long 31b,3b\n" \
  290. " .long 32b,3b\n" \
  291. " .long 33b,4b\n" \
  292. " .previous\n" \
  293. : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
  294. : "0" (to), "1" (from), "2" (ret), "3" (n) \
  295. : "D1Ar1", "D0Ar2", "cc", "memory")
  296. /* rewind 'to' and 'from' pointers when a fault occurs
  297. *
  298. * Rationale:
  299. * A fault always occurs on writing to user buffer. A fault
  300. * is at a single address, so we need to rewind by only 4
  301. * bytes.
  302. * Since we do a complete read from kernel buffer before
  303. * writing, we need to rewind it also. The amount to be
  304. * rewind equals the number of faulty writes in MSETD
  305. * which is: [4 - (LSM_STEP-1)]*8
  306. * LSM_STEP is bits 10:8 in TXSTATUS which is already read
  307. * and stored in D0Ar2
  308. *
  309. * NOTE: If a fault occurs at the last operation in M{G,S}ETL
  310. * LSM_STEP will be 0. ie: we do 4 writes in our case, if
  311. * a fault happens at the 4th write, LSM_STEP will be 0
  312. * instead of 4. The code copes with that.
  313. *
  314. * n is updated by the number of successful writes, which is:
  315. * n = n - (LSM_STEP-1)*8
  316. */
  317. #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
  318. __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
  319. "LSR D0Ar2, D0Ar2, #8\n" \
  320. "ANDS D0Ar2, D0Ar2, #0x7\n" \
  321. "ADDZ D0Ar2, D0Ar2, #4\n" \
  322. "SUB D0Ar2, D0Ar2, #1\n" \
  323. "MOV D1Ar1, #4\n" \
  324. "SUB D0Ar2, D1Ar1, D0Ar2\n" \
  325. "LSL D0Ar2, D0Ar2, #3\n" \
  326. "LSL D1Ar1, D1Ar1, #3\n" \
  327. "SUB D1Ar1, D1Ar1, D0Ar2\n" \
  328. "SUB %0, %0, #8\n" \
  329. "SUB %1, %1,D0Ar2\n" \
  330. "SUB %3, %3, D1Ar1\n")
  331. /*
  332. * optimized copying loop using RAPF when 32 bit aligned
  333. *
  334. * n will be automatically decremented inside the loop
  335. * ret will be left intact. if error occurs we will rewind
  336. * so that the original non optimized code will fill up
  337. * this value correctly.
  338. *
  339. * on fault:
  340. * > n will hold total number of uncopied bytes
  341. *
  342. * > {'to','from'} will be rewind back so that
  343. * the non-optimized code will do the proper fix up
  344. *
  345. * DCACHE drops the cacheline which helps in reducing cache
  346. * pollution.
  347. *
  348. * We introduce an extra SETD at the end of the loop to
  349. * ensure we don't fall off the loop before we catch all
  350. * erros.
  351. *
  352. * NOTICE:
  353. * LSM_STEP in TXSTATUS must be cleared in fix up code.
  354. * since we're using M{S,G}ETL, a fault might happen at
  355. * any address in the middle of M{S,G}ETL causing
  356. * the value of LSM_STEP to be incorrect which can
  357. * cause subsequent use of M{S,G}ET{L,D} to go wrong.
  358. * ie: if LSM_STEP was 1 when a fault occurs, the
  359. * next call to M{S,G}ET{L,D} will skip the first
  360. * copy/getting as it think that the first 1 has already
  361. * been done.
  362. *
  363. */
  364. #define __asm_copy_user_32bit_rapf_loop( \
  365. to, from, ret, n, id, FIXUP) \
  366. asm volatile ( \
  367. ".balign 8\n" \
  368. "MOV RAPF, %1\n" \
  369. "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
  370. "MOV D0Ar6, #0\n" \
  371. "LSR D1Ar5, %3, #6\n" \
  372. "SUB TXRPT, D1Ar5, #2\n" \
  373. "MOV RAPF, %1\n" \
  374. "$Lloop"id":\n" \
  375. "ADD RAPF, %1, #64\n" \
  376. "21:\n" \
  377. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  378. "22:\n" \
  379. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  380. "23:\n" \
  381. "SUB %3, %3, #16\n" \
  382. "24:\n" \
  383. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  384. "25:\n" \
  385. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  386. "26:\n" \
  387. "SUB %3, %3, #16\n" \
  388. "27:\n" \
  389. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  390. "28:\n" \
  391. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  392. "29:\n" \
  393. "SUB %3, %3, #16\n" \
  394. "30:\n" \
  395. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  396. "31:\n" \
  397. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  398. "32:\n" \
  399. "SUB %3, %3, #16\n" \
  400. "DCACHE [%1+#-64], D0Ar6\n" \
  401. "BR $Lloop"id"\n" \
  402. \
  403. "MOV RAPF, %1\n" \
  404. "33:\n" \
  405. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  406. "34:\n" \
  407. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  408. "35:\n" \
  409. "SUB %3, %3, #16\n" \
  410. "36:\n" \
  411. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  412. "37:\n" \
  413. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  414. "38:\n" \
  415. "SUB %3, %3, #16\n" \
  416. "39:\n" \
  417. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  418. "40:\n" \
  419. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  420. "41:\n" \
  421. "SUB %3, %3, #16\n" \
  422. "42:\n" \
  423. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  424. "43:\n" \
  425. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  426. "44:\n" \
  427. "SUB %0, %0, #4\n" \
  428. "45:\n" \
  429. "SETD [%0++], D0.7\n" \
  430. "SUB %3, %3, #16\n" \
  431. "1:" \
  432. "DCACHE [%1+#-64], D0Ar6\n" \
  433. "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
  434. "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
  435. "GETL D0.5, D1.5, [A0StP+#-24]\n" \
  436. "GETL D0.6, D1.6, [A0StP+#-16]\n" \
  437. "GETL D0.7, D1.7, [A0StP+#-8]\n" \
  438. "SUB A0StP, A0StP, #40\n" \
  439. " .section .fixup,\"ax\"\n" \
  440. "4:\n" \
  441. " ADD %0, %0, #4\n" \
  442. "3:\n" \
  443. " MOV D0Ar2, TXSTATUS\n" \
  444. " MOV D1Ar1, TXSTATUS\n" \
  445. " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
  446. " MOV TXSTATUS, D1Ar1\n" \
  447. FIXUP \
  448. " MOVT D0Ar2,#HI(1b)\n" \
  449. " JUMP D0Ar2,#LO(1b)\n" \
  450. " .previous\n" \
  451. " .section __ex_table,\"a\"\n" \
  452. " .long 21b,3b\n" \
  453. " .long 22b,3b\n" \
  454. " .long 23b,3b\n" \
  455. " .long 24b,3b\n" \
  456. " .long 25b,3b\n" \
  457. " .long 26b,3b\n" \
  458. " .long 27b,3b\n" \
  459. " .long 28b,3b\n" \
  460. " .long 29b,3b\n" \
  461. " .long 30b,3b\n" \
  462. " .long 31b,3b\n" \
  463. " .long 32b,3b\n" \
  464. " .long 33b,3b\n" \
  465. " .long 34b,3b\n" \
  466. " .long 35b,3b\n" \
  467. " .long 36b,3b\n" \
  468. " .long 37b,3b\n" \
  469. " .long 38b,3b\n" \
  470. " .long 39b,3b\n" \
  471. " .long 40b,3b\n" \
  472. " .long 41b,3b\n" \
  473. " .long 42b,3b\n" \
  474. " .long 43b,3b\n" \
  475. " .long 44b,3b\n" \
  476. " .long 45b,4b\n" \
  477. " .previous\n" \
  478. : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
  479. : "0" (to), "1" (from), "2" (ret), "3" (n) \
  480. : "D1Ar1", "D0Ar2", "cc", "memory")
  481. /* rewind 'to' and 'from' pointers when a fault occurs
  482. *
  483. * Rationale:
  484. * A fault always occurs on writing to user buffer. A fault
  485. * is at a single address, so we need to rewind by only 4
  486. * bytes.
  487. * Since we do a complete read from kernel buffer before
  488. * writing, we need to rewind it also. The amount to be
  489. * rewind equals the number of faulty writes in MSETD
  490. * which is: [4 - (LSM_STEP-1)]*4
  491. * LSM_STEP is bits 10:8 in TXSTATUS which is already read
  492. * and stored in D0Ar2
  493. *
  494. * NOTE: If a fault occurs at the last operation in M{G,S}ETL
  495. * LSM_STEP will be 0. ie: we do 4 writes in our case, if
  496. * a fault happens at the 4th write, LSM_STEP will be 0
  497. * instead of 4. The code copes with that.
  498. *
  499. * n is updated by the number of successful writes, which is:
  500. * n = n - (LSM_STEP-1)*4
  501. */
  502. #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
  503. __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
  504. "LSR D0Ar2, D0Ar2, #8\n" \
  505. "ANDS D0Ar2, D0Ar2, #0x7\n" \
  506. "ADDZ D0Ar2, D0Ar2, #4\n" \
  507. "SUB D0Ar2, D0Ar2, #1\n" \
  508. "MOV D1Ar1, #4\n" \
  509. "SUB D0Ar2, D1Ar1, D0Ar2\n" \
  510. "LSL D0Ar2, D0Ar2, #2\n" \
  511. "LSL D1Ar1, D1Ar1, #2\n" \
  512. "SUB D1Ar1, D1Ar1, D0Ar2\n" \
  513. "SUB %0, %0, #4\n" \
  514. "SUB %1, %1, D0Ar2\n" \
  515. "SUB %3, %3, D1Ar1\n")
  516. unsigned long __copy_user(void __user *pdst, const void *psrc,
  517. unsigned long n)
  518. {
  519. register char __user *dst asm ("A0.2") = pdst;
  520. register const char *src asm ("A1.2") = psrc;
  521. unsigned long retn = 0;
  522. if (n == 0)
  523. return 0;
  524. if ((unsigned long) src & 1) {
  525. __asm_copy_to_user_1(dst, src, retn);
  526. n--;
  527. if (retn)
  528. return retn + n;
  529. }
  530. if ((unsigned long) dst & 1) {
  531. /* Worst case - byte copy */
  532. while (n > 0) {
  533. __asm_copy_to_user_1(dst, src, retn);
  534. n--;
  535. if (retn)
  536. return retn + n;
  537. }
  538. }
  539. if (((unsigned long) src & 2) && n >= 2) {
  540. __asm_copy_to_user_2(dst, src, retn);
  541. n -= 2;
  542. if (retn)
  543. return retn + n;
  544. }
  545. if ((unsigned long) dst & 2) {
  546. /* Second worst case - word copy */
  547. while (n >= 2) {
  548. __asm_copy_to_user_2(dst, src, retn);
  549. n -= 2;
  550. if (retn)
  551. return retn + n;
  552. }
  553. }
  554. #ifdef USE_RAPF
  555. /* 64 bit copy loop */
  556. if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
  557. if (n >= RAPF_MIN_BUF_SIZE) {
  558. /* copy user using 64 bit rapf copy */
  559. __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
  560. n, "64cu");
  561. }
  562. while (n >= 8) {
  563. __asm_copy_to_user_8x64(dst, src, retn);
  564. n -= 8;
  565. if (retn)
  566. return retn + n;
  567. }
  568. }
  569. if (n >= RAPF_MIN_BUF_SIZE) {
  570. /* copy user using 32 bit rapf copy */
  571. __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
  572. }
  573. #else
  574. /* 64 bit copy loop */
  575. if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
  576. while (n >= 8) {
  577. __asm_copy_to_user_8x64(dst, src, retn);
  578. n -= 8;
  579. if (retn)
  580. return retn + n;
  581. }
  582. }
  583. #endif
  584. while (n >= 16) {
  585. __asm_copy_to_user_16(dst, src, retn);
  586. n -= 16;
  587. if (retn)
  588. return retn + n;
  589. }
  590. while (n >= 4) {
  591. __asm_copy_to_user_4(dst, src, retn);
  592. n -= 4;
  593. if (retn)
  594. return retn + n;
  595. }
  596. switch (n) {
  597. case 0:
  598. break;
  599. case 1:
  600. __asm_copy_to_user_1(dst, src, retn);
  601. break;
  602. case 2:
  603. __asm_copy_to_user_2(dst, src, retn);
  604. break;
  605. case 3:
  606. __asm_copy_to_user_3(dst, src, retn);
  607. break;
  608. }
  609. /*
  610. * If we get here, retn correctly reflects the number of failing
  611. * bytes.
  612. */
  613. return retn;
  614. }
  615. EXPORT_SYMBOL(__copy_user);
  616. #define __asm_copy_from_user_1(to, from, ret) \
  617. __asm_copy_user_cont(to, from, ret, \
  618. " GETB D1Ar1,[%1++]\n" \
  619. "2: SETB [%0++],D1Ar1\n", \
  620. "3: ADD %2,%2,#1\n", \
  621. " .long 2b,3b\n")
  622. #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  623. __asm_copy_user_cont(to, from, ret, \
  624. " GETW D1Ar1,[%1++]\n" \
  625. "2: SETW [%0++],D1Ar1\n" COPY, \
  626. "3: ADD %2,%2,#2\n" FIXUP, \
  627. " .long 2b,3b\n" TENTRY)
  628. #define __asm_copy_from_user_2(to, from, ret) \
  629. __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
  630. #define __asm_copy_from_user_3(to, from, ret) \
  631. __asm_copy_from_user_2x_cont(to, from, ret, \
  632. " GETB D1Ar1,[%1++]\n" \
  633. "4: SETB [%0++],D1Ar1\n", \
  634. "5: ADD %2,%2,#1\n", \
  635. " .long 4b,5b\n")
  636. #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  637. __asm_copy_user_cont(to, from, ret, \
  638. " GETD D1Ar1,[%1++]\n" \
  639. "2: SETD [%0++],D1Ar1\n" COPY, \
  640. "3: ADD %2,%2,#4\n" FIXUP, \
  641. " .long 2b,3b\n" TENTRY)
  642. #define __asm_copy_from_user_4(to, from, ret) \
  643. __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
  644. #define __asm_copy_from_user_8x64(to, from, ret) \
  645. asm volatile ( \
  646. " GETL D0Ar2,D1Ar1,[%1++]\n" \
  647. "2: SETL [%0++],D0Ar2,D1Ar1\n" \
  648. "1:\n" \
  649. " .section .fixup,\"ax\"\n" \
  650. "3: ADD %2,%2,#8\n" \
  651. " MOVT D0Ar2,#HI(1b)\n" \
  652. " JUMP D0Ar2,#LO(1b)\n" \
  653. " .previous\n" \
  654. " .section __ex_table,\"a\"\n" \
  655. " .long 2b,3b\n" \
  656. " .previous\n" \
  657. : "=a" (to), "=r" (from), "=r" (ret) \
  658. : "0" (to), "1" (from), "2" (ret) \
  659. : "D1Ar1", "D0Ar2", "memory")
  660. /* rewind 'from' pointer when a fault occurs
  661. *
  662. * Rationale:
  663. * A fault occurs while reading from user buffer, which is the
  664. * source.
  665. * Since we don't write to kernel buffer until we read first,
  666. * the kernel buffer is at the right state and needn't be
  667. * corrected, but the source must be rewound to the beginning of
  668. * the block, which is LSM_STEP*8 bytes.
  669. * LSM_STEP is bits 10:8 in TXSTATUS which is already read
  670. * and stored in D0Ar2
  671. *
  672. * NOTE: If a fault occurs at the last operation in M{G,S}ETL
  673. * LSM_STEP will be 0. ie: we do 4 writes in our case, if
  674. * a fault happens at the 4th write, LSM_STEP will be 0
  675. * instead of 4. The code copes with that.
  676. */
  677. #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
  678. __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
  679. "LSR D0Ar2, D0Ar2, #5\n" \
  680. "ANDS D0Ar2, D0Ar2, #0x38\n" \
  681. "ADDZ D0Ar2, D0Ar2, #32\n" \
  682. "SUB %1, %1, D0Ar2\n")
  683. /* rewind 'from' pointer when a fault occurs
  684. *
  685. * Rationale:
  686. * A fault occurs while reading from user buffer, which is the
  687. * source.
  688. * Since we don't write to kernel buffer until we read first,
  689. * the kernel buffer is at the right state and needn't be
  690. * corrected, but the source must be rewound to the beginning of
  691. * the block, which is LSM_STEP*4 bytes.
  692. * LSM_STEP is bits 10:8 in TXSTATUS which is already read
  693. * and stored in D0Ar2
  694. *
  695. * NOTE: If a fault occurs at the last operation in M{G,S}ETL
  696. * LSM_STEP will be 0. ie: we do 4 writes in our case, if
  697. * a fault happens at the 4th write, LSM_STEP will be 0
  698. * instead of 4. The code copes with that.
  699. */
  700. #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
  701. __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
  702. "LSR D0Ar2, D0Ar2, #6\n" \
  703. "ANDS D0Ar2, D0Ar2, #0x1c\n" \
  704. "ADDZ D0Ar2, D0Ar2, #16\n" \
  705. "SUB %1, %1, D0Ar2\n")
  706. /*
  707. * Copy from user to kernel. The return-value is the number of bytes that were
  708. * inaccessible.
  709. */
  710. unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
  711. unsigned long n)
  712. {
  713. register char *dst asm ("A0.2") = pdst;
  714. register const char __user *src asm ("A1.2") = psrc;
  715. unsigned long retn = 0;
  716. if (n == 0)
  717. return 0;
  718. if ((unsigned long) src & 1) {
  719. __asm_copy_from_user_1(dst, src, retn);
  720. n--;
  721. if (retn)
  722. return retn + n;
  723. }
  724. if ((unsigned long) dst & 1) {
  725. /* Worst case - byte copy */
  726. while (n > 0) {
  727. __asm_copy_from_user_1(dst, src, retn);
  728. n--;
  729. if (retn)
  730. return retn + n;
  731. }
  732. }
  733. if (((unsigned long) src & 2) && n >= 2) {
  734. __asm_copy_from_user_2(dst, src, retn);
  735. n -= 2;
  736. if (retn)
  737. return retn + n;
  738. }
  739. if ((unsigned long) dst & 2) {
  740. /* Second worst case - word copy */
  741. while (n >= 2) {
  742. __asm_copy_from_user_2(dst, src, retn);
  743. n -= 2;
  744. if (retn)
  745. return retn + n;
  746. }
  747. }
  748. #ifdef USE_RAPF
  749. /* 64 bit copy loop */
  750. if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
  751. if (n >= RAPF_MIN_BUF_SIZE) {
  752. /* Copy using fast 64bit rapf */
  753. __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
  754. n, "64cuz");
  755. }
  756. while (n >= 8) {
  757. __asm_copy_from_user_8x64(dst, src, retn);
  758. n -= 8;
  759. if (retn)
  760. return retn + n;
  761. }
  762. }
  763. if (n >= RAPF_MIN_BUF_SIZE) {
  764. /* Copy using fast 32bit rapf */
  765. __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
  766. n, "32cuz");
  767. }
  768. #else
  769. /* 64 bit copy loop */
  770. if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
  771. while (n >= 8) {
  772. __asm_copy_from_user_8x64(dst, src, retn);
  773. n -= 8;
  774. if (retn)
  775. return retn + n;
  776. }
  777. }
  778. #endif
  779. while (n >= 4) {
  780. __asm_copy_from_user_4(dst, src, retn);
  781. n -= 4;
  782. if (retn)
  783. return retn + n;
  784. }
  785. /* If we get here, there were no memory read faults. */
  786. switch (n) {
  787. /* These copies are at least "naturally aligned" (so we don't
  788. have to check each byte), due to the src alignment code.
  789. The *_3 case *will* get the correct count for retn. */
  790. case 0:
  791. /* This case deliberately left in (if you have doubts check the
  792. generated assembly code). */
  793. break;
  794. case 1:
  795. __asm_copy_from_user_1(dst, src, retn);
  796. break;
  797. case 2:
  798. __asm_copy_from_user_2(dst, src, retn);
  799. break;
  800. case 3:
  801. __asm_copy_from_user_3(dst, src, retn);
  802. break;
  803. }
  804. /* If we get here, retn correctly reflects the number of failing
  805. bytes. */
  806. return retn;
  807. }
  808. EXPORT_SYMBOL(raw_copy_from_user);
  809. #define __asm_clear_8x64(to, ret) \
  810. asm volatile ( \
  811. " MOV D0Ar2,#0\n" \
  812. " MOV D1Ar1,#0\n" \
  813. " SETL [%0],D0Ar2,D1Ar1\n" \
  814. "2: SETL [%0++],D0Ar2,D1Ar1\n" \
  815. "1:\n" \
  816. " .section .fixup,\"ax\"\n" \
  817. "3: ADD %1,%1,#8\n" \
  818. " MOVT D0Ar2,#HI(1b)\n" \
  819. " JUMP D0Ar2,#LO(1b)\n" \
  820. " .previous\n" \
  821. " .section __ex_table,\"a\"\n" \
  822. " .long 2b,3b\n" \
  823. " .previous\n" \
  824. : "=r" (to), "=r" (ret) \
  825. : "0" (to), "1" (ret) \
  826. : "D1Ar1", "D0Ar2", "memory")
  827. /* Zero userspace. */
  828. #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
  829. asm volatile ( \
  830. " MOV D1Ar1,#0\n" \
  831. CLEAR \
  832. "1:\n" \
  833. " .section .fixup,\"ax\"\n" \
  834. FIXUP \
  835. " MOVT D1Ar1,#HI(1b)\n" \
  836. " JUMP D1Ar1,#LO(1b)\n" \
  837. " .previous\n" \
  838. " .section __ex_table,\"a\"\n" \
  839. TENTRY \
  840. " .previous" \
  841. : "=r" (to), "=r" (ret) \
  842. : "0" (to), "1" (ret) \
  843. : "D1Ar1", "memory")
  844. #define __asm_clear_1(to, ret) \
  845. __asm_clear(to, ret, \
  846. " SETB [%0],D1Ar1\n" \
  847. "2: SETB [%0++],D1Ar1\n", \
  848. "3: ADD %1,%1,#1\n", \
  849. " .long 2b,3b\n")
  850. #define __asm_clear_2(to, ret) \
  851. __asm_clear(to, ret, \
  852. " SETW [%0],D1Ar1\n" \
  853. "2: SETW [%0++],D1Ar1\n", \
  854. "3: ADD %1,%1,#2\n", \
  855. " .long 2b,3b\n")
  856. #define __asm_clear_3(to, ret) \
  857. __asm_clear(to, ret, \
  858. "2: SETW [%0++],D1Ar1\n" \
  859. " SETB [%0],D1Ar1\n" \
  860. "3: SETB [%0++],D1Ar1\n", \
  861. "4: ADD %1,%1,#2\n" \
  862. "5: ADD %1,%1,#1\n", \
  863. " .long 2b,4b\n" \
  864. " .long 3b,5b\n")
  865. #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  866. __asm_clear(to, ret, \
  867. " SETD [%0],D1Ar1\n" \
  868. "2: SETD [%0++],D1Ar1\n" CLEAR, \
  869. "3: ADD %1,%1,#4\n" FIXUP, \
  870. " .long 2b,3b\n" TENTRY)
  871. #define __asm_clear_4(to, ret) \
  872. __asm_clear_4x_cont(to, ret, "", "", "")
  873. #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  874. __asm_clear_4x_cont(to, ret, \
  875. " SETD [%0],D1Ar1\n" \
  876. "4: SETD [%0++],D1Ar1\n" CLEAR, \
  877. "5: ADD %1,%1,#4\n" FIXUP, \
  878. " .long 4b,5b\n" TENTRY)
  879. #define __asm_clear_8(to, ret) \
  880. __asm_clear_8x_cont(to, ret, "", "", "")
  881. #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  882. __asm_clear_8x_cont(to, ret, \
  883. " SETD [%0],D1Ar1\n" \
  884. "6: SETD [%0++],D1Ar1\n" CLEAR, \
  885. "7: ADD %1,%1,#4\n" FIXUP, \
  886. " .long 6b,7b\n" TENTRY)
  887. #define __asm_clear_12(to, ret) \
  888. __asm_clear_12x_cont(to, ret, "", "", "")
  889. #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  890. __asm_clear_12x_cont(to, ret, \
  891. " SETD [%0],D1Ar1\n" \
  892. "8: SETD [%0++],D1Ar1\n" CLEAR, \
  893. "9: ADD %1,%1,#4\n" FIXUP, \
  894. " .long 8b,9b\n" TENTRY)
  895. #define __asm_clear_16(to, ret) \
  896. __asm_clear_16x_cont(to, ret, "", "", "")
  897. unsigned long __do_clear_user(void __user *pto, unsigned long pn)
  898. {
  899. register char __user *dst asm ("D0Re0") = pto;
  900. register unsigned long n asm ("D1Re0") = pn;
  901. register unsigned long retn asm ("D0Ar6") = 0;
  902. if ((unsigned long) dst & 1) {
  903. __asm_clear_1(dst, retn);
  904. n--;
  905. }
  906. if ((unsigned long) dst & 2) {
  907. __asm_clear_2(dst, retn);
  908. n -= 2;
  909. }
  910. /* 64 bit copy loop */
  911. if (!((__force unsigned long) dst & 7)) {
  912. while (n >= 8) {
  913. __asm_clear_8x64(dst, retn);
  914. n -= 8;
  915. }
  916. }
  917. while (n >= 16) {
  918. __asm_clear_16(dst, retn);
  919. n -= 16;
  920. }
  921. while (n >= 4) {
  922. __asm_clear_4(dst, retn);
  923. n -= 4;
  924. }
  925. switch (n) {
  926. case 0:
  927. break;
  928. case 1:
  929. __asm_clear_1(dst, retn);
  930. break;
  931. case 2:
  932. __asm_clear_2(dst, retn);
  933. break;
  934. case 3:
  935. __asm_clear_3(dst, retn);
  936. break;
  937. }
  938. return retn;
  939. }
  940. EXPORT_SYMBOL(__do_clear_user);
  941. unsigned char __get_user_asm_b(const void __user *addr, long *err)
  942. {
  943. register unsigned char x asm ("D0Re0") = 0;
  944. asm volatile (
  945. " GETB %0,[%2]\n"
  946. "1:\n"
  947. " GETB %0,[%2]\n"
  948. "2:\n"
  949. " .section .fixup,\"ax\"\n"
  950. "3: MOV D0FrT,%3\n"
  951. " SETD [%1],D0FrT\n"
  952. " MOVT D0FrT,#HI(2b)\n"
  953. " JUMP D0FrT,#LO(2b)\n"
  954. " .previous\n"
  955. " .section __ex_table,\"a\"\n"
  956. " .long 1b,3b\n"
  957. " .previous\n"
  958. : "=r" (x)
  959. : "r" (err), "r" (addr), "P" (-EFAULT)
  960. : "D0FrT");
  961. return x;
  962. }
  963. EXPORT_SYMBOL(__get_user_asm_b);
  964. unsigned short __get_user_asm_w(const void __user *addr, long *err)
  965. {
  966. register unsigned short x asm ("D0Re0") = 0;
  967. asm volatile (
  968. " GETW %0,[%2]\n"
  969. "1:\n"
  970. " GETW %0,[%2]\n"
  971. "2:\n"
  972. " .section .fixup,\"ax\"\n"
  973. "3: MOV D0FrT,%3\n"
  974. " SETD [%1],D0FrT\n"
  975. " MOVT D0FrT,#HI(2b)\n"
  976. " JUMP D0FrT,#LO(2b)\n"
  977. " .previous\n"
  978. " .section __ex_table,\"a\"\n"
  979. " .long 1b,3b\n"
  980. " .previous\n"
  981. : "=r" (x)
  982. : "r" (err), "r" (addr), "P" (-EFAULT)
  983. : "D0FrT");
  984. return x;
  985. }
  986. EXPORT_SYMBOL(__get_user_asm_w);
  987. unsigned int __get_user_asm_d(const void __user *addr, long *err)
  988. {
  989. register unsigned int x asm ("D0Re0") = 0;
  990. asm volatile (
  991. " GETD %0,[%2]\n"
  992. "1:\n"
  993. " GETD %0,[%2]\n"
  994. "2:\n"
  995. " .section .fixup,\"ax\"\n"
  996. "3: MOV D0FrT,%3\n"
  997. " SETD [%1],D0FrT\n"
  998. " MOVT D0FrT,#HI(2b)\n"
  999. " JUMP D0FrT,#LO(2b)\n"
  1000. " .previous\n"
  1001. " .section __ex_table,\"a\"\n"
  1002. " .long 1b,3b\n"
  1003. " .previous\n"
  1004. : "=r" (x)
  1005. : "r" (err), "r" (addr), "P" (-EFAULT)
  1006. : "D0FrT");
  1007. return x;
  1008. }
  1009. EXPORT_SYMBOL(__get_user_asm_d);
  1010. long __put_user_asm_b(unsigned int x, void __user *addr)
  1011. {
  1012. register unsigned int err asm ("D0Re0") = 0;
  1013. asm volatile (
  1014. " MOV %0,#0\n"
  1015. " SETB [%2],%1\n"
  1016. "1:\n"
  1017. " SETB [%2],%1\n"
  1018. "2:\n"
  1019. ".section .fixup,\"ax\"\n"
  1020. "3: MOV %0,%3\n"
  1021. " MOVT D0FrT,#HI(2b)\n"
  1022. " JUMP D0FrT,#LO(2b)\n"
  1023. ".previous\n"
  1024. ".section __ex_table,\"a\"\n"
  1025. " .long 1b,3b\n"
  1026. ".previous"
  1027. : "=r"(err)
  1028. : "d" (x), "a" (addr), "P"(-EFAULT)
  1029. : "D0FrT");
  1030. return err;
  1031. }
  1032. EXPORT_SYMBOL(__put_user_asm_b);
  1033. long __put_user_asm_w(unsigned int x, void __user *addr)
  1034. {
  1035. register unsigned int err asm ("D0Re0") = 0;
  1036. asm volatile (
  1037. " MOV %0,#0\n"
  1038. " SETW [%2],%1\n"
  1039. "1:\n"
  1040. " SETW [%2],%1\n"
  1041. "2:\n"
  1042. ".section .fixup,\"ax\"\n"
  1043. "3: MOV %0,%3\n"
  1044. " MOVT D0FrT,#HI(2b)\n"
  1045. " JUMP D0FrT,#LO(2b)\n"
  1046. ".previous\n"
  1047. ".section __ex_table,\"a\"\n"
  1048. " .long 1b,3b\n"
  1049. ".previous"
  1050. : "=r"(err)
  1051. : "d" (x), "a" (addr), "P"(-EFAULT)
  1052. : "D0FrT");
  1053. return err;
  1054. }
  1055. EXPORT_SYMBOL(__put_user_asm_w);
  1056. long __put_user_asm_d(unsigned int x, void __user *addr)
  1057. {
  1058. register unsigned int err asm ("D0Re0") = 0;
  1059. asm volatile (
  1060. " MOV %0,#0\n"
  1061. " SETD [%2],%1\n"
  1062. "1:\n"
  1063. " SETD [%2],%1\n"
  1064. "2:\n"
  1065. ".section .fixup,\"ax\"\n"
  1066. "3: MOV %0,%3\n"
  1067. " MOVT D0FrT,#HI(2b)\n"
  1068. " JUMP D0FrT,#LO(2b)\n"
  1069. ".previous\n"
  1070. ".section __ex_table,\"a\"\n"
  1071. " .long 1b,3b\n"
  1072. ".previous"
  1073. : "=r"(err)
  1074. : "d" (x), "a" (addr), "P"(-EFAULT)
  1075. : "D0FrT");
  1076. return err;
  1077. }
  1078. EXPORT_SYMBOL(__put_user_asm_d);
  1079. long __put_user_asm_l(unsigned long long x, void __user *addr)
  1080. {
  1081. register unsigned int err asm ("D0Re0") = 0;
  1082. asm volatile (
  1083. " MOV %0,#0\n"
  1084. " SETL [%2],%1,%t1\n"
  1085. "1:\n"
  1086. " SETL [%2],%1,%t1\n"
  1087. "2:\n"
  1088. ".section .fixup,\"ax\"\n"
  1089. "3: MOV %0,%3\n"
  1090. " MOVT D0FrT,#HI(2b)\n"
  1091. " JUMP D0FrT,#LO(2b)\n"
  1092. ".previous\n"
  1093. ".section __ex_table,\"a\"\n"
  1094. " .long 1b,3b\n"
  1095. ".previous"
  1096. : "=r"(err)
  1097. : "d" (x), "a" (addr), "P"(-EFAULT)
  1098. : "D0FrT");
  1099. return err;
  1100. }
  1101. EXPORT_SYMBOL(__put_user_asm_l);
  1102. long strnlen_user(const char __user *src, long count)
  1103. {
  1104. long res;
  1105. if (!access_ok(VERIFY_READ, src, 0))
  1106. return 0;
  1107. asm volatile (" MOV D0Ar4, %1\n"
  1108. " MOV D0Ar6, %2\n"
  1109. "0:\n"
  1110. " SUBS D0FrT, D0Ar6, #0\n"
  1111. " SUB D0Ar6, D0Ar6, #1\n"
  1112. " BLE 2f\n"
  1113. " GETB D0FrT, [D0Ar4+#1++]\n"
  1114. "1:\n"
  1115. " TST D0FrT, #255\n"
  1116. " BNE 0b\n"
  1117. "2:\n"
  1118. " SUB %0, %2, D0Ar6\n"
  1119. "3:\n"
  1120. " .section .fixup,\"ax\"\n"
  1121. "4:\n"
  1122. " MOV %0, #0\n"
  1123. " MOVT D0FrT,#HI(3b)\n"
  1124. " JUMP D0FrT,#LO(3b)\n"
  1125. " .previous\n"
  1126. " .section __ex_table,\"a\"\n"
  1127. " .long 1b,4b\n"
  1128. " .previous\n"
  1129. : "=r" (res)
  1130. : "r" (src), "r" (count)
  1131. : "D0FrT", "D0Ar4", "D0Ar6", "cc");
  1132. return res;
  1133. }
  1134. EXPORT_SYMBOL(strnlen_user);
  1135. long __strncpy_from_user(char *dst, const char __user *src, long count)
  1136. {
  1137. long res;
  1138. if (count == 0)
  1139. return 0;
  1140. /*
  1141. * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
  1142. * So do we.
  1143. *
  1144. * This code is deduced from:
  1145. *
  1146. * char tmp2;
  1147. * long tmp1, tmp3;
  1148. * tmp1 = count;
  1149. * while ((*dst++ = (tmp2 = *src++)) != 0
  1150. * && --tmp1)
  1151. * ;
  1152. *
  1153. * res = count - tmp1;
  1154. *
  1155. * with tweaks.
  1156. */
  1157. asm volatile (" MOV %0,%3\n"
  1158. "1:\n"
  1159. " GETB D0FrT,[%2++]\n"
  1160. "2:\n"
  1161. " CMP D0FrT,#0\n"
  1162. " SETB [%1++],D0FrT\n"
  1163. " BEQ 3f\n"
  1164. " SUBS %0,%0,#1\n"
  1165. " BNZ 1b\n"
  1166. "3:\n"
  1167. " SUB %0,%3,%0\n"
  1168. "4:\n"
  1169. " .section .fixup,\"ax\"\n"
  1170. "5:\n"
  1171. " MOV %0,%7\n"
  1172. " MOVT D0FrT,#HI(4b)\n"
  1173. " JUMP D0FrT,#LO(4b)\n"
  1174. " .previous\n"
  1175. " .section __ex_table,\"a\"\n"
  1176. " .long 2b,5b\n"
  1177. " .previous"
  1178. : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
  1179. : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
  1180. : "D0FrT", "memory", "cc");
  1181. return res;
  1182. }
  1183. EXPORT_SYMBOL(__strncpy_from_user);