recov_avx2.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. /*
  2. * Copyright (C) 2012 Intel Corporation
  3. * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation; version 2
  8. * of the License.
  9. */
  10. #ifdef CONFIG_AS_AVX2
  11. #include <linux/raid/pq.h>
  12. #include "x86.h"
  13. static int raid6_has_avx2(void)
  14. {
  15. return boot_cpu_has(X86_FEATURE_AVX2) &&
  16. boot_cpu_has(X86_FEATURE_AVX);
  17. }
  18. static void raid6_2data_recov_avx2(int disks, size_t bytes, int faila,
  19. int failb, void **ptrs)
  20. {
  21. u8 *p, *q, *dp, *dq;
  22. const u8 *pbmul; /* P multiplier table for B data */
  23. const u8 *qmul; /* Q multiplier table (for both) */
  24. const u8 x0f = 0x0f;
  25. p = (u8 *)ptrs[disks-2];
  26. q = (u8 *)ptrs[disks-1];
  27. /* Compute syndrome with zero for the missing data pages
  28. Use the dead data pages as temporary storage for
  29. delta p and delta q */
  30. dp = (u8 *)ptrs[faila];
  31. ptrs[faila] = (void *)raid6_empty_zero_page;
  32. ptrs[disks-2] = dp;
  33. dq = (u8 *)ptrs[failb];
  34. ptrs[failb] = (void *)raid6_empty_zero_page;
  35. ptrs[disks-1] = dq;
  36. raid6_call.gen_syndrome(disks, bytes, ptrs);
  37. /* Restore pointer table */
  38. ptrs[faila] = dp;
  39. ptrs[failb] = dq;
  40. ptrs[disks-2] = p;
  41. ptrs[disks-1] = q;
  42. /* Now, pick the proper data tables */
  43. pbmul = raid6_vgfmul[raid6_gfexi[failb-faila]];
  44. qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^
  45. raid6_gfexp[failb]]];
  46. kernel_fpu_begin();
  47. /* ymm0 = x0f[16] */
  48. asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
  49. while (bytes) {
  50. #ifdef CONFIG_X86_64
  51. asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0]));
  52. asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32]));
  53. asm volatile("vmovdqa %0, %%ymm0" : : "m" (p[0]));
  54. asm volatile("vmovdqa %0, %%ymm8" : : "m" (p[32]));
  55. asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (dq[0]));
  56. asm volatile("vpxor %0, %%ymm9, %%ymm9" : : "m" (dq[32]));
  57. asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (dp[0]));
  58. asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (dp[32]));
  59. /*
  60. * 1 = dq[0] ^ q[0]
  61. * 9 = dq[32] ^ q[32]
  62. * 0 = dp[0] ^ p[0]
  63. * 8 = dp[32] ^ p[32]
  64. */
  65. asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
  66. asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
  67. asm volatile("vpsraw $4, %ymm1, %ymm3");
  68. asm volatile("vpsraw $4, %ymm9, %ymm12");
  69. asm volatile("vpand %ymm7, %ymm1, %ymm1");
  70. asm volatile("vpand %ymm7, %ymm9, %ymm9");
  71. asm volatile("vpand %ymm7, %ymm3, %ymm3");
  72. asm volatile("vpand %ymm7, %ymm12, %ymm12");
  73. asm volatile("vpshufb %ymm9, %ymm4, %ymm14");
  74. asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
  75. asm volatile("vpshufb %ymm12, %ymm5, %ymm15");
  76. asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
  77. asm volatile("vpxor %ymm14, %ymm15, %ymm15");
  78. asm volatile("vpxor %ymm4, %ymm5, %ymm5");
  79. /*
  80. * 5 = qx[0]
  81. * 15 = qx[32]
  82. */
  83. asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
  84. asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
  85. asm volatile("vpsraw $4, %ymm0, %ymm2");
  86. asm volatile("vpsraw $4, %ymm8, %ymm6");
  87. asm volatile("vpand %ymm7, %ymm0, %ymm3");
  88. asm volatile("vpand %ymm7, %ymm8, %ymm14");
  89. asm volatile("vpand %ymm7, %ymm2, %ymm2");
  90. asm volatile("vpand %ymm7, %ymm6, %ymm6");
  91. asm volatile("vpshufb %ymm14, %ymm4, %ymm12");
  92. asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
  93. asm volatile("vpshufb %ymm6, %ymm1, %ymm13");
  94. asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
  95. asm volatile("vpxor %ymm4, %ymm1, %ymm1");
  96. asm volatile("vpxor %ymm12, %ymm13, %ymm13");
  97. /*
  98. * 1 = pbmul[px[0]]
  99. * 13 = pbmul[px[32]]
  100. */
  101. asm volatile("vpxor %ymm5, %ymm1, %ymm1");
  102. asm volatile("vpxor %ymm15, %ymm13, %ymm13");
  103. /*
  104. * 1 = db = DQ
  105. * 13 = db[32] = DQ[32]
  106. */
  107. asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
  108. asm volatile("vmovdqa %%ymm13,%0" : "=m" (dq[32]));
  109. asm volatile("vpxor %ymm1, %ymm0, %ymm0");
  110. asm volatile("vpxor %ymm13, %ymm8, %ymm8");
  111. asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
  112. asm volatile("vmovdqa %%ymm8, %0" : "=m" (dp[32]));
  113. bytes -= 64;
  114. p += 64;
  115. q += 64;
  116. dp += 64;
  117. dq += 64;
  118. #else
  119. asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q));
  120. asm volatile("vmovdqa %0, %%ymm0" : : "m" (*p));
  121. asm volatile("vpxor %0, %%ymm1, %%ymm1" : : "m" (*dq));
  122. asm volatile("vpxor %0, %%ymm0, %%ymm0" : : "m" (*dp));
  123. /* 1 = dq ^ q; 0 = dp ^ p */
  124. asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (qmul[0]));
  125. asm volatile("vbroadcasti128 %0, %%ymm5" : : "m" (qmul[16]));
  126. /*
  127. * 1 = dq ^ q
  128. * 3 = dq ^ p >> 4
  129. */
  130. asm volatile("vpsraw $4, %ymm1, %ymm3");
  131. asm volatile("vpand %ymm7, %ymm1, %ymm1");
  132. asm volatile("vpand %ymm7, %ymm3, %ymm3");
  133. asm volatile("vpshufb %ymm1, %ymm4, %ymm4");
  134. asm volatile("vpshufb %ymm3, %ymm5, %ymm5");
  135. asm volatile("vpxor %ymm4, %ymm5, %ymm5");
  136. /* 5 = qx */
  137. asm volatile("vbroadcasti128 %0, %%ymm4" : : "m" (pbmul[0]));
  138. asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (pbmul[16]));
  139. asm volatile("vpsraw $4, %ymm0, %ymm2");
  140. asm volatile("vpand %ymm7, %ymm0, %ymm3");
  141. asm volatile("vpand %ymm7, %ymm2, %ymm2");
  142. asm volatile("vpshufb %ymm3, %ymm4, %ymm4");
  143. asm volatile("vpshufb %ymm2, %ymm1, %ymm1");
  144. asm volatile("vpxor %ymm4, %ymm1, %ymm1");
  145. /* 1 = pbmul[px] */
  146. asm volatile("vpxor %ymm5, %ymm1, %ymm1");
  147. /* 1 = db = DQ */
  148. asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
  149. asm volatile("vpxor %ymm1, %ymm0, %ymm0");
  150. asm volatile("vmovdqa %%ymm0, %0" : "=m" (dp[0]));
  151. bytes -= 32;
  152. p += 32;
  153. q += 32;
  154. dp += 32;
  155. dq += 32;
  156. #endif
  157. }
  158. kernel_fpu_end();
  159. }
  160. static void raid6_datap_recov_avx2(int disks, size_t bytes, int faila,
  161. void **ptrs)
  162. {
  163. u8 *p, *q, *dq;
  164. const u8 *qmul; /* Q multiplier table */
  165. const u8 x0f = 0x0f;
  166. p = (u8 *)ptrs[disks-2];
  167. q = (u8 *)ptrs[disks-1];
  168. /* Compute syndrome with zero for the missing data page
  169. Use the dead data page as temporary storage for delta q */
  170. dq = (u8 *)ptrs[faila];
  171. ptrs[faila] = (void *)raid6_empty_zero_page;
  172. ptrs[disks-1] = dq;
  173. raid6_call.gen_syndrome(disks, bytes, ptrs);
  174. /* Restore pointer table */
  175. ptrs[faila] = dq;
  176. ptrs[disks-1] = q;
  177. /* Now, pick the proper data tables */
  178. qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
  179. kernel_fpu_begin();
  180. asm volatile("vpbroadcastb %0, %%ymm7" : : "m" (x0f));
  181. while (bytes) {
  182. #ifdef CONFIG_X86_64
  183. asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
  184. asm volatile("vmovdqa %0, %%ymm8" : : "m" (dq[32]));
  185. asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
  186. asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32]));
  187. /*
  188. * 3 = q[0] ^ dq[0]
  189. * 8 = q[32] ^ dq[32]
  190. */
  191. asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
  192. asm volatile("vmovapd %ymm0, %ymm13");
  193. asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
  194. asm volatile("vmovapd %ymm1, %ymm14");
  195. asm volatile("vpsraw $4, %ymm3, %ymm6");
  196. asm volatile("vpsraw $4, %ymm8, %ymm12");
  197. asm volatile("vpand %ymm7, %ymm3, %ymm3");
  198. asm volatile("vpand %ymm7, %ymm8, %ymm8");
  199. asm volatile("vpand %ymm7, %ymm6, %ymm6");
  200. asm volatile("vpand %ymm7, %ymm12, %ymm12");
  201. asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
  202. asm volatile("vpshufb %ymm8, %ymm13, %ymm13");
  203. asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
  204. asm volatile("vpshufb %ymm12, %ymm14, %ymm14");
  205. asm volatile("vpxor %ymm0, %ymm1, %ymm1");
  206. asm volatile("vpxor %ymm13, %ymm14, %ymm14");
  207. /*
  208. * 1 = qmul[q[0] ^ dq[0]]
  209. * 14 = qmul[q[32] ^ dq[32]]
  210. */
  211. asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
  212. asm volatile("vmovdqa %0, %%ymm12" : : "m" (p[32]));
  213. asm volatile("vpxor %ymm1, %ymm2, %ymm2");
  214. asm volatile("vpxor %ymm14, %ymm12, %ymm12");
  215. /*
  216. * 2 = p[0] ^ qmul[q[0] ^ dq[0]]
  217. * 12 = p[32] ^ qmul[q[32] ^ dq[32]]
  218. */
  219. asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
  220. asm volatile("vmovdqa %%ymm14, %0" : "=m" (dq[32]));
  221. asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
  222. asm volatile("vmovdqa %%ymm12,%0" : "=m" (p[32]));
  223. bytes -= 64;
  224. p += 64;
  225. q += 64;
  226. dq += 64;
  227. #else
  228. asm volatile("vmovdqa %0, %%ymm3" : : "m" (dq[0]));
  229. asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0]));
  230. /* 3 = q ^ dq */
  231. asm volatile("vbroadcasti128 %0, %%ymm0" : : "m" (qmul[0]));
  232. asm volatile("vbroadcasti128 %0, %%ymm1" : : "m" (qmul[16]));
  233. asm volatile("vpsraw $4, %ymm3, %ymm6");
  234. asm volatile("vpand %ymm7, %ymm3, %ymm3");
  235. asm volatile("vpand %ymm7, %ymm6, %ymm6");
  236. asm volatile("vpshufb %ymm3, %ymm0, %ymm0");
  237. asm volatile("vpshufb %ymm6, %ymm1, %ymm1");
  238. asm volatile("vpxor %ymm0, %ymm1, %ymm1");
  239. /* 1 = qmul[q ^ dq] */
  240. asm volatile("vmovdqa %0, %%ymm2" : : "m" (p[0]));
  241. asm volatile("vpxor %ymm1, %ymm2, %ymm2");
  242. /* 2 = p ^ qmul[q ^ dq] */
  243. asm volatile("vmovdqa %%ymm1, %0" : "=m" (dq[0]));
  244. asm volatile("vmovdqa %%ymm2, %0" : "=m" (p[0]));
  245. bytes -= 32;
  246. p += 32;
  247. q += 32;
  248. dq += 32;
  249. #endif
  250. }
  251. kernel_fpu_end();
  252. }
  253. const struct raid6_recov_calls raid6_recov_avx2 = {
  254. .data2 = raid6_2data_recov_avx2,
  255. .datap = raid6_datap_recov_avx2,
  256. .valid = raid6_has_avx2,
  257. #ifdef CONFIG_X86_64
  258. .name = "avx2x2",
  259. #else
  260. .name = "avx2x1",
  261. #endif
  262. .priority = 2,
  263. };
  264. #else
  265. #warning "your version of binutils lacks AVX2 support"
  266. #endif