rescaler_sse2.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. // Copyright 2015 Google Inc. All Rights Reserved.
  2. //
  3. // Use of this source code is governed by a BSD-style license
  4. // that can be found in the COPYING file in the root of the source
  5. // tree. An additional intellectual property rights grant can be found
  6. // in the file PATENTS. All contributing project authors may
  7. // be found in the AUTHORS file in the root of the source tree.
  8. // -----------------------------------------------------------------------------
  9. //
  10. // SSE2 Rescaling functions
  11. //
  12. // Author: Skal (pascal.massimino@gmail.com)
  13. #include "./dsp.h"
  14. #if defined(WEBP_USE_SSE2)
  15. #include <emmintrin.h>
  16. #include <assert.h>
  17. #include "../utils/rescaler_utils.h"
  18. #include "../utils/utils.h"
  19. //------------------------------------------------------------------------------
  20. // Implementations of critical functions ImportRow / ExportRow
  21. #define ROUNDER (WEBP_RESCALER_ONE >> 1)
  22. #define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
  23. // input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0
  24. static void LoadTwoPixels(const uint8_t* const src, __m128i* out) {
  25. const __m128i zero = _mm_setzero_si128();
  26. const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH
  27. const __m128i B = _mm_unpacklo_epi8(A, zero); // A0B0C0D0E0F0G0H0
  28. const __m128i C = _mm_srli_si128(B, 8); // E0F0G0H0
  29. *out = _mm_unpacklo_epi16(B, C);
  30. }
  31. // input: 8 bytes ABCDEFGH -> output: A0B0C0D0E0F0G0H0
  32. static void LoadHeightPixels(const uint8_t* const src, __m128i* out) {
  33. const __m128i zero = _mm_setzero_si128();
  34. const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH
  35. *out = _mm_unpacklo_epi8(A, zero);
  36. }
  37. static void RescalerImportRowExpandSSE2(WebPRescaler* const wrk,
  38. const uint8_t* src) {
  39. rescaler_t* frow = wrk->frow;
  40. const rescaler_t* const frow_end = frow + wrk->dst_width * wrk->num_channels;
  41. const int x_add = wrk->x_add;
  42. int accum = x_add;
  43. __m128i cur_pixels;
  44. assert(!WebPRescalerInputDone(wrk));
  45. assert(wrk->x_expand);
  46. if (wrk->num_channels == 4) {
  47. if (wrk->src_width < 2) {
  48. WebPRescalerImportRowExpandC(wrk, src);
  49. return;
  50. }
  51. LoadTwoPixels(src, &cur_pixels);
  52. src += 4;
  53. while (1) {
  54. const __m128i mult = _mm_set1_epi32(((x_add - accum) << 16) | accum);
  55. const __m128i out = _mm_madd_epi16(cur_pixels, mult);
  56. _mm_storeu_si128((__m128i*)frow, out);
  57. frow += 4;
  58. if (frow >= frow_end) break;
  59. accum -= wrk->x_sub;
  60. if (accum < 0) {
  61. LoadTwoPixels(src, &cur_pixels);
  62. src += 4;
  63. accum += x_add;
  64. }
  65. }
  66. } else {
  67. int left;
  68. const uint8_t* const src_limit = src + wrk->src_width - 8;
  69. if (wrk->src_width < 8) {
  70. WebPRescalerImportRowExpandC(wrk, src);
  71. return;
  72. }
  73. LoadHeightPixels(src, &cur_pixels);
  74. src += 7;
  75. left = 7;
  76. while (1) {
  77. const __m128i mult = _mm_cvtsi32_si128(((x_add - accum) << 16) | accum);
  78. const __m128i out = _mm_madd_epi16(cur_pixels, mult);
  79. assert(sizeof(*frow) == sizeof(uint32_t));
  80. WebPUint32ToMem((uint8_t*)frow, _mm_cvtsi128_si32(out));
  81. frow += 1;
  82. if (frow >= frow_end) break;
  83. accum -= wrk->x_sub;
  84. if (accum < 0) {
  85. if (--left) {
  86. cur_pixels = _mm_srli_si128(cur_pixels, 2);
  87. } else if (src <= src_limit) {
  88. LoadHeightPixels(src, &cur_pixels);
  89. src += 7;
  90. left = 7;
  91. } else { // tail
  92. cur_pixels = _mm_srli_si128(cur_pixels, 2);
  93. cur_pixels = _mm_insert_epi16(cur_pixels, src[1], 1);
  94. src += 1;
  95. left = 1;
  96. }
  97. accum += x_add;
  98. }
  99. }
  100. }
  101. assert(accum == 0);
  102. }
  103. static void RescalerImportRowShrinkSSE2(WebPRescaler* const wrk,
  104. const uint8_t* src) {
  105. const int x_sub = wrk->x_sub;
  106. int accum = 0;
  107. const __m128i zero = _mm_setzero_si128();
  108. const __m128i mult0 = _mm_set1_epi16(x_sub);
  109. const __m128i mult1 = _mm_set1_epi32(wrk->fx_scale);
  110. const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
  111. __m128i sum = zero;
  112. rescaler_t* frow = wrk->frow;
  113. const rescaler_t* const frow_end = wrk->frow + 4 * wrk->dst_width;
  114. if (wrk->num_channels != 4 || wrk->x_add > (x_sub << 7)) {
  115. WebPRescalerImportRowShrinkC(wrk, src);
  116. return;
  117. }
  118. assert(!WebPRescalerInputDone(wrk));
  119. assert(!wrk->x_expand);
  120. for (; frow < frow_end; frow += 4) {
  121. __m128i base = zero;
  122. accum += wrk->x_add;
  123. while (accum > 0) {
  124. const __m128i A = _mm_cvtsi32_si128(WebPMemToUint32(src));
  125. src += 4;
  126. base = _mm_unpacklo_epi8(A, zero);
  127. // To avoid overflow, we need: base * x_add / x_sub < 32768
  128. // => x_add < x_sub << 7. That's a 1/128 reduction ratio limit.
  129. sum = _mm_add_epi16(sum, base);
  130. accum -= x_sub;
  131. }
  132. { // Emit next horizontal pixel.
  133. const __m128i mult = _mm_set1_epi16(-accum);
  134. const __m128i frac0 = _mm_mullo_epi16(base, mult); // 16b x 16b -> 32b
  135. const __m128i frac1 = _mm_mulhi_epu16(base, mult);
  136. const __m128i frac = _mm_unpacklo_epi16(frac0, frac1); // frac is 32b
  137. const __m128i A0 = _mm_mullo_epi16(sum, mult0);
  138. const __m128i A1 = _mm_mulhi_epu16(sum, mult0);
  139. const __m128i B0 = _mm_unpacklo_epi16(A0, A1); // sum * x_sub
  140. const __m128i frow_out = _mm_sub_epi32(B0, frac); // sum * x_sub - frac
  141. const __m128i D0 = _mm_srli_epi64(frac, 32);
  142. const __m128i D1 = _mm_mul_epu32(frac, mult1); // 32b x 16b -> 64b
  143. const __m128i D2 = _mm_mul_epu32(D0, mult1);
  144. const __m128i E1 = _mm_add_epi64(D1, rounder);
  145. const __m128i E2 = _mm_add_epi64(D2, rounder);
  146. const __m128i F1 = _mm_shuffle_epi32(E1, 1 | (3 << 2));
  147. const __m128i F2 = _mm_shuffle_epi32(E2, 1 | (3 << 2));
  148. const __m128i G = _mm_unpacklo_epi32(F1, F2);
  149. sum = _mm_packs_epi32(G, zero);
  150. _mm_storeu_si128((__m128i*)frow, frow_out);
  151. }
  152. }
  153. assert(accum == 0);
  154. }
  155. //------------------------------------------------------------------------------
  156. // Row export
  157. // load *src as epi64, multiply by mult and store result in [out0 ... out3]
  158. static WEBP_INLINE void LoadDispatchAndMult(const rescaler_t* const src,
  159. const __m128i* const mult,
  160. __m128i* const out0,
  161. __m128i* const out1,
  162. __m128i* const out2,
  163. __m128i* const out3) {
  164. const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + 0));
  165. const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + 4));
  166. const __m128i A2 = _mm_srli_epi64(A0, 32);
  167. const __m128i A3 = _mm_srli_epi64(A1, 32);
  168. if (mult != NULL) {
  169. *out0 = _mm_mul_epu32(A0, *mult);
  170. *out1 = _mm_mul_epu32(A1, *mult);
  171. *out2 = _mm_mul_epu32(A2, *mult);
  172. *out3 = _mm_mul_epu32(A3, *mult);
  173. } else {
  174. *out0 = A0;
  175. *out1 = A1;
  176. *out2 = A2;
  177. *out3 = A3;
  178. }
  179. }
  180. static WEBP_INLINE void ProcessRow(const __m128i* const A0,
  181. const __m128i* const A1,
  182. const __m128i* const A2,
  183. const __m128i* const A3,
  184. const __m128i* const mult,
  185. uint8_t* const dst) {
  186. const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
  187. const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0);
  188. const __m128i B0 = _mm_mul_epu32(*A0, *mult);
  189. const __m128i B1 = _mm_mul_epu32(*A1, *mult);
  190. const __m128i B2 = _mm_mul_epu32(*A2, *mult);
  191. const __m128i B3 = _mm_mul_epu32(*A3, *mult);
  192. const __m128i C0 = _mm_add_epi64(B0, rounder);
  193. const __m128i C1 = _mm_add_epi64(B1, rounder);
  194. const __m128i C2 = _mm_add_epi64(B2, rounder);
  195. const __m128i C3 = _mm_add_epi64(B3, rounder);
  196. const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX);
  197. const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);
  198. #if (WEBP_RESCALER_FIX < 32)
  199. const __m128i D2 =
  200. _mm_and_si128(_mm_slli_epi64(C2, 32 - WEBP_RESCALER_RFIX), mask);
  201. const __m128i D3 =
  202. _mm_and_si128(_mm_slli_epi64(C3, 32 - WEBP_RESCALER_RFIX), mask);
  203. #else
  204. const __m128i D2 = _mm_and_si128(C2, mask);
  205. const __m128i D3 = _mm_and_si128(C3, mask);
  206. #endif
  207. const __m128i E0 = _mm_or_si128(D0, D2);
  208. const __m128i E1 = _mm_or_si128(D1, D3);
  209. const __m128i F = _mm_packs_epi32(E0, E1);
  210. const __m128i G = _mm_packus_epi16(F, F);
  211. _mm_storel_epi64((__m128i*)dst, G);
  212. }
  213. static void RescalerExportRowExpandSSE2(WebPRescaler* const wrk) {
  214. int x_out;
  215. uint8_t* const dst = wrk->dst;
  216. rescaler_t* const irow = wrk->irow;
  217. const int x_out_max = wrk->dst_width * wrk->num_channels;
  218. const rescaler_t* const frow = wrk->frow;
  219. const __m128i mult = _mm_set_epi32(0, wrk->fy_scale, 0, wrk->fy_scale);
  220. assert(!WebPRescalerOutputDone(wrk));
  221. assert(wrk->y_accum <= 0 && wrk->y_sub + wrk->y_accum >= 0);
  222. assert(wrk->y_expand);
  223. if (wrk->y_accum == 0) {
  224. for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
  225. __m128i A0, A1, A2, A3;
  226. LoadDispatchAndMult(frow + x_out, NULL, &A0, &A1, &A2, &A3);
  227. ProcessRow(&A0, &A1, &A2, &A3, &mult, dst + x_out);
  228. }
  229. for (; x_out < x_out_max; ++x_out) {
  230. const uint32_t J = frow[x_out];
  231. const int v = (int)MULT_FIX(J, wrk->fy_scale);
  232. assert(v >= 0 && v <= 255);
  233. dst[x_out] = v;
  234. }
  235. } else {
  236. const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub);
  237. const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B);
  238. const __m128i mA = _mm_set_epi32(0, A, 0, A);
  239. const __m128i mB = _mm_set_epi32(0, B, 0, B);
  240. const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
  241. for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
  242. __m128i A0, A1, A2, A3, B0, B1, B2, B3;
  243. LoadDispatchAndMult(frow + x_out, &mA, &A0, &A1, &A2, &A3);
  244. LoadDispatchAndMult(irow + x_out, &mB, &B0, &B1, &B2, &B3);
  245. {
  246. const __m128i C0 = _mm_add_epi64(A0, B0);
  247. const __m128i C1 = _mm_add_epi64(A1, B1);
  248. const __m128i C2 = _mm_add_epi64(A2, B2);
  249. const __m128i C3 = _mm_add_epi64(A3, B3);
  250. const __m128i D0 = _mm_add_epi64(C0, rounder);
  251. const __m128i D1 = _mm_add_epi64(C1, rounder);
  252. const __m128i D2 = _mm_add_epi64(C2, rounder);
  253. const __m128i D3 = _mm_add_epi64(C3, rounder);
  254. const __m128i E0 = _mm_srli_epi64(D0, WEBP_RESCALER_RFIX);
  255. const __m128i E1 = _mm_srli_epi64(D1, WEBP_RESCALER_RFIX);
  256. const __m128i E2 = _mm_srli_epi64(D2, WEBP_RESCALER_RFIX);
  257. const __m128i E3 = _mm_srli_epi64(D3, WEBP_RESCALER_RFIX);
  258. ProcessRow(&E0, &E1, &E2, &E3, &mult, dst + x_out);
  259. }
  260. }
  261. for (; x_out < x_out_max; ++x_out) {
  262. const uint64_t I = (uint64_t)A * frow[x_out]
  263. + (uint64_t)B * irow[x_out];
  264. const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX);
  265. const int v = (int)MULT_FIX(J, wrk->fy_scale);
  266. assert(v >= 0 && v <= 255);
  267. dst[x_out] = v;
  268. }
  269. }
  270. }
  271. static void RescalerExportRowShrinkSSE2(WebPRescaler* const wrk) {
  272. int x_out;
  273. uint8_t* const dst = wrk->dst;
  274. rescaler_t* const irow = wrk->irow;
  275. const int x_out_max = wrk->dst_width * wrk->num_channels;
  276. const rescaler_t* const frow = wrk->frow;
  277. const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum);
  278. assert(!WebPRescalerOutputDone(wrk));
  279. assert(wrk->y_accum <= 0);
  280. assert(!wrk->y_expand);
  281. if (yscale) {
  282. const int scale_xy = wrk->fxy_scale;
  283. const __m128i mult_xy = _mm_set_epi32(0, scale_xy, 0, scale_xy);
  284. const __m128i mult_y = _mm_set_epi32(0, yscale, 0, yscale);
  285. const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
  286. for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
  287. __m128i A0, A1, A2, A3, B0, B1, B2, B3;
  288. LoadDispatchAndMult(irow + x_out, NULL, &A0, &A1, &A2, &A3);
  289. LoadDispatchAndMult(frow + x_out, &mult_y, &B0, &B1, &B2, &B3);
  290. {
  291. const __m128i C0 = _mm_add_epi64(B0, rounder);
  292. const __m128i C1 = _mm_add_epi64(B1, rounder);
  293. const __m128i C2 = _mm_add_epi64(B2, rounder);
  294. const __m128i C3 = _mm_add_epi64(B3, rounder);
  295. const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX); // = frac
  296. const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);
  297. const __m128i D2 = _mm_srli_epi64(C2, WEBP_RESCALER_RFIX);
  298. const __m128i D3 = _mm_srli_epi64(C3, WEBP_RESCALER_RFIX);
  299. const __m128i E0 = _mm_sub_epi64(A0, D0); // irow[x] - frac
  300. const __m128i E1 = _mm_sub_epi64(A1, D1);
  301. const __m128i E2 = _mm_sub_epi64(A2, D2);
  302. const __m128i E3 = _mm_sub_epi64(A3, D3);
  303. const __m128i F2 = _mm_slli_epi64(D2, 32);
  304. const __m128i F3 = _mm_slli_epi64(D3, 32);
  305. const __m128i G0 = _mm_or_si128(D0, F2);
  306. const __m128i G1 = _mm_or_si128(D1, F3);
  307. _mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);
  308. _mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);
  309. ProcessRow(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
  310. }
  311. }
  312. for (; x_out < x_out_max; ++x_out) {
  313. const uint32_t frac = (int)MULT_FIX(frow[x_out], yscale);
  314. const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
  315. assert(v >= 0 && v <= 255);
  316. dst[x_out] = v;
  317. irow[x_out] = frac; // new fractional start
  318. }
  319. } else {
  320. const uint32_t scale = wrk->fxy_scale;
  321. const __m128i mult = _mm_set_epi32(0, scale, 0, scale);
  322. const __m128i zero = _mm_setzero_si128();
  323. for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
  324. __m128i A0, A1, A2, A3;
  325. LoadDispatchAndMult(irow + x_out, NULL, &A0, &A1, &A2, &A3);
  326. _mm_storeu_si128((__m128i*)(irow + x_out + 0), zero);
  327. _mm_storeu_si128((__m128i*)(irow + x_out + 4), zero);
  328. ProcessRow(&A0, &A1, &A2, &A3, &mult, dst + x_out);
  329. }
  330. for (; x_out < x_out_max; ++x_out) {
  331. const int v = (int)MULT_FIX(irow[x_out], scale);
  332. assert(v >= 0 && v <= 255);
  333. dst[x_out] = v;
  334. irow[x_out] = 0;
  335. }
  336. }
  337. }
  338. #undef MULT_FIX
  339. #undef ROUNDER
  340. //------------------------------------------------------------------------------
  341. extern void WebPRescalerDspInitSSE2(void);
  342. WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitSSE2(void) {
  343. WebPRescalerImportRowExpand = RescalerImportRowExpandSSE2;
  344. WebPRescalerImportRowShrink = RescalerImportRowShrinkSSE2;
  345. WebPRescalerExportRowExpand = RescalerExportRowExpandSSE2;
  346. WebPRescalerExportRowShrink = RescalerExportRowShrinkSSE2;
  347. }
  348. #else // !WEBP_USE_SSE2
  349. WEBP_DSP_INIT_STUB(WebPRescalerDspInitSSE2)
  350. #endif // WEBP_USE_SSE2