sse2.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /* -*- linux-c -*- ------------------------------------------------------- *
  2. *
  3. * Copyright 2002 H. Peter Anvin - All Rights Reserved
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
  8. * Boston MA 02111-1307, USA; either version 2 of the License, or
  9. * (at your option) any later version; incorporated herein by reference.
  10. *
  11. * ----------------------------------------------------------------------- */
  12. /*
  13. * raid6/sse2.c
  14. *
  15. * SSE-2 implementation of RAID-6 syndrome functions
  16. *
  17. */
  18. #include <linux/raid/pq.h>
  19. #include "x86.h"
  20. static const struct raid6_sse_constants {
  21. u64 x1d[2];
  22. } raid6_sse_constants __attribute__((aligned(16))) = {
  23. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL },
  24. };
  25. static int raid6_have_sse2(void)
  26. {
  27. /* Not really boot_cpu but "all_cpus" */
  28. return boot_cpu_has(X86_FEATURE_MMX) &&
  29. boot_cpu_has(X86_FEATURE_FXSR) &&
  30. boot_cpu_has(X86_FEATURE_XMM) &&
  31. boot_cpu_has(X86_FEATURE_XMM2);
  32. }
  33. /*
  34. * Plain SSE2 implementation
  35. */
  36. static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
  37. {
  38. u8 **dptr = (u8 **)ptrs;
  39. u8 *p, *q;
  40. int d, z, z0;
  41. z0 = disks - 3; /* Highest data disk */
  42. p = dptr[z0+1]; /* XOR parity */
  43. q = dptr[z0+2]; /* RS syndrome */
  44. kernel_fpu_begin();
  45. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  46. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  47. for ( d = 0 ; d < bytes ; d += 16 ) {
  48. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  49. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  50. asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
  51. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  52. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z0-1][d]));
  53. for ( z = z0-2 ; z >= 0 ; z-- ) {
  54. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  55. asm volatile("pcmpgtb %xmm4,%xmm5");
  56. asm volatile("paddb %xmm4,%xmm4");
  57. asm volatile("pand %xmm0,%xmm5");
  58. asm volatile("pxor %xmm5,%xmm4");
  59. asm volatile("pxor %xmm5,%xmm5");
  60. asm volatile("pxor %xmm6,%xmm2");
  61. asm volatile("pxor %xmm6,%xmm4");
  62. asm volatile("movdqa %0,%%xmm6" : : "m" (dptr[z][d]));
  63. }
  64. asm volatile("pcmpgtb %xmm4,%xmm5");
  65. asm volatile("paddb %xmm4,%xmm4");
  66. asm volatile("pand %xmm0,%xmm5");
  67. asm volatile("pxor %xmm5,%xmm4");
  68. asm volatile("pxor %xmm5,%xmm5");
  69. asm volatile("pxor %xmm6,%xmm2");
  70. asm volatile("pxor %xmm6,%xmm4");
  71. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  72. asm volatile("pxor %xmm2,%xmm2");
  73. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  74. asm volatile("pxor %xmm4,%xmm4");
  75. }
  76. asm volatile("sfence" : : : "memory");
  77. kernel_fpu_end();
  78. }
  79. static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
  80. size_t bytes, void **ptrs)
  81. {
  82. u8 **dptr = (u8 **)ptrs;
  83. u8 *p, *q;
  84. int d, z, z0;
  85. z0 = stop; /* P/Q right side optimization */
  86. p = dptr[disks-2]; /* XOR parity */
  87. q = dptr[disks-1]; /* RS syndrome */
  88. kernel_fpu_begin();
  89. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  90. for ( d = 0 ; d < bytes ; d += 16 ) {
  91. asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
  92. asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
  93. asm volatile("pxor %xmm4,%xmm2");
  94. /* P/Q data pages */
  95. for ( z = z0-1 ; z >= start ; z-- ) {
  96. asm volatile("pxor %xmm5,%xmm5");
  97. asm volatile("pcmpgtb %xmm4,%xmm5");
  98. asm volatile("paddb %xmm4,%xmm4");
  99. asm volatile("pand %xmm0,%xmm5");
  100. asm volatile("pxor %xmm5,%xmm4");
  101. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  102. asm volatile("pxor %xmm5,%xmm2");
  103. asm volatile("pxor %xmm5,%xmm4");
  104. }
  105. /* P/Q left side optimization */
  106. for ( z = start-1 ; z >= 0 ; z-- ) {
  107. asm volatile("pxor %xmm5,%xmm5");
  108. asm volatile("pcmpgtb %xmm4,%xmm5");
  109. asm volatile("paddb %xmm4,%xmm4");
  110. asm volatile("pand %xmm0,%xmm5");
  111. asm volatile("pxor %xmm5,%xmm4");
  112. }
  113. asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
  114. /* Don't use movntdq for r/w memory area < cache line */
  115. asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
  116. asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
  117. }
  118. asm volatile("sfence" : : : "memory");
  119. kernel_fpu_end();
  120. }
  121. const struct raid6_calls raid6_sse2x1 = {
  122. raid6_sse21_gen_syndrome,
  123. raid6_sse21_xor_syndrome,
  124. raid6_have_sse2,
  125. "sse2x1",
  126. 1 /* Has cache hints */
  127. };
  128. /*
  129. * Unrolled-by-2 SSE2 implementation
  130. */
  131. static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
  132. {
  133. u8 **dptr = (u8 **)ptrs;
  134. u8 *p, *q;
  135. int d, z, z0;
  136. z0 = disks - 3; /* Highest data disk */
  137. p = dptr[z0+1]; /* XOR parity */
  138. q = dptr[z0+2]; /* RS syndrome */
  139. kernel_fpu_begin();
  140. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  141. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  142. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  143. /* We uniformly assume a single prefetch covers at least 32 bytes */
  144. for ( d = 0 ; d < bytes ; d += 32 ) {
  145. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  146. asm volatile("movdqa %0,%%xmm2" : : "m" (dptr[z0][d])); /* P[0] */
  147. asm volatile("movdqa %0,%%xmm3" : : "m" (dptr[z0][d+16])); /* P[1] */
  148. asm volatile("movdqa %xmm2,%xmm4"); /* Q[0] */
  149. asm volatile("movdqa %xmm3,%xmm6"); /* Q[1] */
  150. for ( z = z0-1 ; z >= 0 ; z-- ) {
  151. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  152. asm volatile("pcmpgtb %xmm4,%xmm5");
  153. asm volatile("pcmpgtb %xmm6,%xmm7");
  154. asm volatile("paddb %xmm4,%xmm4");
  155. asm volatile("paddb %xmm6,%xmm6");
  156. asm volatile("pand %xmm0,%xmm5");
  157. asm volatile("pand %xmm0,%xmm7");
  158. asm volatile("pxor %xmm5,%xmm4");
  159. asm volatile("pxor %xmm7,%xmm6");
  160. asm volatile("movdqa %0,%%xmm5" : : "m" (dptr[z][d]));
  161. asm volatile("movdqa %0,%%xmm7" : : "m" (dptr[z][d+16]));
  162. asm volatile("pxor %xmm5,%xmm2");
  163. asm volatile("pxor %xmm7,%xmm3");
  164. asm volatile("pxor %xmm5,%xmm4");
  165. asm volatile("pxor %xmm7,%xmm6");
  166. asm volatile("pxor %xmm5,%xmm5");
  167. asm volatile("pxor %xmm7,%xmm7");
  168. }
  169. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  170. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  171. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  172. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  173. }
  174. asm volatile("sfence" : : : "memory");
  175. kernel_fpu_end();
  176. }
  177. static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
  178. size_t bytes, void **ptrs)
  179. {
  180. u8 **dptr = (u8 **)ptrs;
  181. u8 *p, *q;
  182. int d, z, z0;
  183. z0 = stop; /* P/Q right side optimization */
  184. p = dptr[disks-2]; /* XOR parity */
  185. q = dptr[disks-1]; /* RS syndrome */
  186. kernel_fpu_begin();
  187. asm volatile("movdqa %0,%%xmm0" : : "m" (raid6_sse_constants.x1d[0]));
  188. for ( d = 0 ; d < bytes ; d += 32 ) {
  189. asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
  190. asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
  191. asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
  192. asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
  193. asm volatile("pxor %xmm4,%xmm2");
  194. asm volatile("pxor %xmm6,%xmm3");
  195. /* P/Q data pages */
  196. for ( z = z0-1 ; z >= start ; z-- ) {
  197. asm volatile("pxor %xmm5,%xmm5");
  198. asm volatile("pxor %xmm7,%xmm7");
  199. asm volatile("pcmpgtb %xmm4,%xmm5");
  200. asm volatile("pcmpgtb %xmm6,%xmm7");
  201. asm volatile("paddb %xmm4,%xmm4");
  202. asm volatile("paddb %xmm6,%xmm6");
  203. asm volatile("pand %xmm0,%xmm5");
  204. asm volatile("pand %xmm0,%xmm7");
  205. asm volatile("pxor %xmm5,%xmm4");
  206. asm volatile("pxor %xmm7,%xmm6");
  207. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  208. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  209. asm volatile("pxor %xmm5,%xmm2");
  210. asm volatile("pxor %xmm7,%xmm3");
  211. asm volatile("pxor %xmm5,%xmm4");
  212. asm volatile("pxor %xmm7,%xmm6");
  213. }
  214. /* P/Q left side optimization */
  215. for ( z = start-1 ; z >= 0 ; z-- ) {
  216. asm volatile("pxor %xmm5,%xmm5");
  217. asm volatile("pxor %xmm7,%xmm7");
  218. asm volatile("pcmpgtb %xmm4,%xmm5");
  219. asm volatile("pcmpgtb %xmm6,%xmm7");
  220. asm volatile("paddb %xmm4,%xmm4");
  221. asm volatile("paddb %xmm6,%xmm6");
  222. asm volatile("pand %xmm0,%xmm5");
  223. asm volatile("pand %xmm0,%xmm7");
  224. asm volatile("pxor %xmm5,%xmm4");
  225. asm volatile("pxor %xmm7,%xmm6");
  226. }
  227. asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
  228. asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
  229. /* Don't use movntdq for r/w memory area < cache line */
  230. asm volatile("movdqa %%xmm4,%0" : "=m" (q[d]));
  231. asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16]));
  232. asm volatile("movdqa %%xmm2,%0" : "=m" (p[d]));
  233. asm volatile("movdqa %%xmm3,%0" : "=m" (p[d+16]));
  234. }
  235. asm volatile("sfence" : : : "memory");
  236. kernel_fpu_end();
  237. }
  238. const struct raid6_calls raid6_sse2x2 = {
  239. raid6_sse22_gen_syndrome,
  240. raid6_sse22_xor_syndrome,
  241. raid6_have_sse2,
  242. "sse2x2",
  243. 1 /* Has cache hints */
  244. };
  245. #ifdef CONFIG_X86_64
  246. /*
  247. * Unrolled-by-4 SSE2 implementation
  248. */
  249. static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
  250. {
  251. u8 **dptr = (u8 **)ptrs;
  252. u8 *p, *q;
  253. int d, z, z0;
  254. z0 = disks - 3; /* Highest data disk */
  255. p = dptr[z0+1]; /* XOR parity */
  256. q = dptr[z0+2]; /* RS syndrome */
  257. kernel_fpu_begin();
  258. asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
  259. asm volatile("pxor %xmm2,%xmm2"); /* P[0] */
  260. asm volatile("pxor %xmm3,%xmm3"); /* P[1] */
  261. asm volatile("pxor %xmm4,%xmm4"); /* Q[0] */
  262. asm volatile("pxor %xmm5,%xmm5"); /* Zero temp */
  263. asm volatile("pxor %xmm6,%xmm6"); /* Q[1] */
  264. asm volatile("pxor %xmm7,%xmm7"); /* Zero temp */
  265. asm volatile("pxor %xmm10,%xmm10"); /* P[2] */
  266. asm volatile("pxor %xmm11,%xmm11"); /* P[3] */
  267. asm volatile("pxor %xmm12,%xmm12"); /* Q[2] */
  268. asm volatile("pxor %xmm13,%xmm13"); /* Zero temp */
  269. asm volatile("pxor %xmm14,%xmm14"); /* Q[3] */
  270. asm volatile("pxor %xmm15,%xmm15"); /* Zero temp */
  271. for ( d = 0 ; d < bytes ; d += 64 ) {
  272. for ( z = z0 ; z >= 0 ; z-- ) {
  273. /* The second prefetch seems to improve performance... */
  274. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  275. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
  276. asm volatile("pcmpgtb %xmm4,%xmm5");
  277. asm volatile("pcmpgtb %xmm6,%xmm7");
  278. asm volatile("pcmpgtb %xmm12,%xmm13");
  279. asm volatile("pcmpgtb %xmm14,%xmm15");
  280. asm volatile("paddb %xmm4,%xmm4");
  281. asm volatile("paddb %xmm6,%xmm6");
  282. asm volatile("paddb %xmm12,%xmm12");
  283. asm volatile("paddb %xmm14,%xmm14");
  284. asm volatile("pand %xmm0,%xmm5");
  285. asm volatile("pand %xmm0,%xmm7");
  286. asm volatile("pand %xmm0,%xmm13");
  287. asm volatile("pand %xmm0,%xmm15");
  288. asm volatile("pxor %xmm5,%xmm4");
  289. asm volatile("pxor %xmm7,%xmm6");
  290. asm volatile("pxor %xmm13,%xmm12");
  291. asm volatile("pxor %xmm15,%xmm14");
  292. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  293. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  294. asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
  295. asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
  296. asm volatile("pxor %xmm5,%xmm2");
  297. asm volatile("pxor %xmm7,%xmm3");
  298. asm volatile("pxor %xmm13,%xmm10");
  299. asm volatile("pxor %xmm15,%xmm11");
  300. asm volatile("pxor %xmm5,%xmm4");
  301. asm volatile("pxor %xmm7,%xmm6");
  302. asm volatile("pxor %xmm13,%xmm12");
  303. asm volatile("pxor %xmm15,%xmm14");
  304. asm volatile("pxor %xmm5,%xmm5");
  305. asm volatile("pxor %xmm7,%xmm7");
  306. asm volatile("pxor %xmm13,%xmm13");
  307. asm volatile("pxor %xmm15,%xmm15");
  308. }
  309. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  310. asm volatile("pxor %xmm2,%xmm2");
  311. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  312. asm volatile("pxor %xmm3,%xmm3");
  313. asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
  314. asm volatile("pxor %xmm10,%xmm10");
  315. asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
  316. asm volatile("pxor %xmm11,%xmm11");
  317. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  318. asm volatile("pxor %xmm4,%xmm4");
  319. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  320. asm volatile("pxor %xmm6,%xmm6");
  321. asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
  322. asm volatile("pxor %xmm12,%xmm12");
  323. asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
  324. asm volatile("pxor %xmm14,%xmm14");
  325. }
  326. asm volatile("sfence" : : : "memory");
  327. kernel_fpu_end();
  328. }
  329. static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
  330. size_t bytes, void **ptrs)
  331. {
  332. u8 **dptr = (u8 **)ptrs;
  333. u8 *p, *q;
  334. int d, z, z0;
  335. z0 = stop; /* P/Q right side optimization */
  336. p = dptr[disks-2]; /* XOR parity */
  337. q = dptr[disks-1]; /* RS syndrome */
  338. kernel_fpu_begin();
  339. asm volatile("movdqa %0,%%xmm0" :: "m" (raid6_sse_constants.x1d[0]));
  340. for ( d = 0 ; d < bytes ; d += 64 ) {
  341. asm volatile("movdqa %0,%%xmm4" :: "m" (dptr[z0][d]));
  342. asm volatile("movdqa %0,%%xmm6" :: "m" (dptr[z0][d+16]));
  343. asm volatile("movdqa %0,%%xmm12" :: "m" (dptr[z0][d+32]));
  344. asm volatile("movdqa %0,%%xmm14" :: "m" (dptr[z0][d+48]));
  345. asm volatile("movdqa %0,%%xmm2" : : "m" (p[d]));
  346. asm volatile("movdqa %0,%%xmm3" : : "m" (p[d+16]));
  347. asm volatile("movdqa %0,%%xmm10" : : "m" (p[d+32]));
  348. asm volatile("movdqa %0,%%xmm11" : : "m" (p[d+48]));
  349. asm volatile("pxor %xmm4,%xmm2");
  350. asm volatile("pxor %xmm6,%xmm3");
  351. asm volatile("pxor %xmm12,%xmm10");
  352. asm volatile("pxor %xmm14,%xmm11");
  353. /* P/Q data pages */
  354. for ( z = z0-1 ; z >= start ; z-- ) {
  355. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  356. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+32]));
  357. asm volatile("pxor %xmm5,%xmm5");
  358. asm volatile("pxor %xmm7,%xmm7");
  359. asm volatile("pxor %xmm13,%xmm13");
  360. asm volatile("pxor %xmm15,%xmm15");
  361. asm volatile("pcmpgtb %xmm4,%xmm5");
  362. asm volatile("pcmpgtb %xmm6,%xmm7");
  363. asm volatile("pcmpgtb %xmm12,%xmm13");
  364. asm volatile("pcmpgtb %xmm14,%xmm15");
  365. asm volatile("paddb %xmm4,%xmm4");
  366. asm volatile("paddb %xmm6,%xmm6");
  367. asm volatile("paddb %xmm12,%xmm12");
  368. asm volatile("paddb %xmm14,%xmm14");
  369. asm volatile("pand %xmm0,%xmm5");
  370. asm volatile("pand %xmm0,%xmm7");
  371. asm volatile("pand %xmm0,%xmm13");
  372. asm volatile("pand %xmm0,%xmm15");
  373. asm volatile("pxor %xmm5,%xmm4");
  374. asm volatile("pxor %xmm7,%xmm6");
  375. asm volatile("pxor %xmm13,%xmm12");
  376. asm volatile("pxor %xmm15,%xmm14");
  377. asm volatile("movdqa %0,%%xmm5" :: "m" (dptr[z][d]));
  378. asm volatile("movdqa %0,%%xmm7" :: "m" (dptr[z][d+16]));
  379. asm volatile("movdqa %0,%%xmm13" :: "m" (dptr[z][d+32]));
  380. asm volatile("movdqa %0,%%xmm15" :: "m" (dptr[z][d+48]));
  381. asm volatile("pxor %xmm5,%xmm2");
  382. asm volatile("pxor %xmm7,%xmm3");
  383. asm volatile("pxor %xmm13,%xmm10");
  384. asm volatile("pxor %xmm15,%xmm11");
  385. asm volatile("pxor %xmm5,%xmm4");
  386. asm volatile("pxor %xmm7,%xmm6");
  387. asm volatile("pxor %xmm13,%xmm12");
  388. asm volatile("pxor %xmm15,%xmm14");
  389. }
  390. asm volatile("prefetchnta %0" :: "m" (q[d]));
  391. asm volatile("prefetchnta %0" :: "m" (q[d+32]));
  392. /* P/Q left side optimization */
  393. for ( z = start-1 ; z >= 0 ; z-- ) {
  394. asm volatile("pxor %xmm5,%xmm5");
  395. asm volatile("pxor %xmm7,%xmm7");
  396. asm volatile("pxor %xmm13,%xmm13");
  397. asm volatile("pxor %xmm15,%xmm15");
  398. asm volatile("pcmpgtb %xmm4,%xmm5");
  399. asm volatile("pcmpgtb %xmm6,%xmm7");
  400. asm volatile("pcmpgtb %xmm12,%xmm13");
  401. asm volatile("pcmpgtb %xmm14,%xmm15");
  402. asm volatile("paddb %xmm4,%xmm4");
  403. asm volatile("paddb %xmm6,%xmm6");
  404. asm volatile("paddb %xmm12,%xmm12");
  405. asm volatile("paddb %xmm14,%xmm14");
  406. asm volatile("pand %xmm0,%xmm5");
  407. asm volatile("pand %xmm0,%xmm7");
  408. asm volatile("pand %xmm0,%xmm13");
  409. asm volatile("pand %xmm0,%xmm15");
  410. asm volatile("pxor %xmm5,%xmm4");
  411. asm volatile("pxor %xmm7,%xmm6");
  412. asm volatile("pxor %xmm13,%xmm12");
  413. asm volatile("pxor %xmm15,%xmm14");
  414. }
  415. asm volatile("movntdq %%xmm2,%0" : "=m" (p[d]));
  416. asm volatile("movntdq %%xmm3,%0" : "=m" (p[d+16]));
  417. asm volatile("movntdq %%xmm10,%0" : "=m" (p[d+32]));
  418. asm volatile("movntdq %%xmm11,%0" : "=m" (p[d+48]));
  419. asm volatile("pxor %0,%%xmm4" : : "m" (q[d]));
  420. asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16]));
  421. asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32]));
  422. asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48]));
  423. asm volatile("movntdq %%xmm4,%0" : "=m" (q[d]));
  424. asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16]));
  425. asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32]));
  426. asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48]));
  427. }
  428. asm volatile("sfence" : : : "memory");
  429. kernel_fpu_end();
  430. }
  431. const struct raid6_calls raid6_sse2x4 = {
  432. raid6_sse24_gen_syndrome,
  433. raid6_sse24_xor_syndrome,
  434. raid6_have_sse2,
  435. "sse2x4",
  436. 1 /* Has cache hints */
  437. };
  438. #endif /* CONFIG_X86_64 */