shortidct4x4llm_neon.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <arm_neon.h>
  11. static const int16_t cospi8sqrt2minus1 = 20091;
  12. static const int16_t sinpi8sqrt2 = 35468;
  13. void vp8_short_idct4x4llm_neon(
  14. int16_t *input,
  15. unsigned char *pred_ptr,
  16. int pred_stride,
  17. unsigned char *dst_ptr,
  18. int dst_stride) {
  19. int i;
  20. uint32x2_t d6u32 = vdup_n_u32(0);
  21. uint8x8_t d1u8;
  22. int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
  23. uint16x8_t q1u16;
  24. int16x8_t q1s16, q2s16, q3s16, q4s16;
  25. int32x2x2_t v2tmp0, v2tmp1;
  26. int16x4x2_t v2tmp2, v2tmp3;
  27. d2 = vld1_s16(input);
  28. d3 = vld1_s16(input + 4);
  29. d4 = vld1_s16(input + 8);
  30. d5 = vld1_s16(input + 12);
  31. // 1st for loop
  32. q1s16 = vcombine_s16(d2, d4); // Swap d3 d4 here
  33. q2s16 = vcombine_s16(d3, d5);
  34. q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
  35. q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
  36. d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1
  37. d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1
  38. q3s16 = vshrq_n_s16(q3s16, 1);
  39. q4s16 = vshrq_n_s16(q4s16, 1);
  40. q3s16 = vqaddq_s16(q3s16, q2s16);
  41. q4s16 = vqaddq_s16(q4s16, q2s16);
  42. d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1
  43. d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1
  44. d2 = vqadd_s16(d12, d11);
  45. d3 = vqadd_s16(d13, d10);
  46. d4 = vqsub_s16(d13, d10);
  47. d5 = vqsub_s16(d12, d11);
  48. v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
  49. v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
  50. v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
  51. vreinterpret_s16_s32(v2tmp1.val[0]));
  52. v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
  53. vreinterpret_s16_s32(v2tmp1.val[1]));
  54. // 2nd for loop
  55. q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp3.val[0]);
  56. q2s16 = vcombine_s16(v2tmp2.val[1], v2tmp3.val[1]);
  57. q3s16 = vqdmulhq_n_s16(q2s16, sinpi8sqrt2);
  58. q4s16 = vqdmulhq_n_s16(q2s16, cospi8sqrt2minus1);
  59. d12 = vqadd_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // a1
  60. d13 = vqsub_s16(vget_low_s16(q1s16), vget_high_s16(q1s16)); // b1
  61. q3s16 = vshrq_n_s16(q3s16, 1);
  62. q4s16 = vshrq_n_s16(q4s16, 1);
  63. q3s16 = vqaddq_s16(q3s16, q2s16);
  64. q4s16 = vqaddq_s16(q4s16, q2s16);
  65. d10 = vqsub_s16(vget_low_s16(q3s16), vget_high_s16(q4s16)); // c1
  66. d11 = vqadd_s16(vget_high_s16(q3s16), vget_low_s16(q4s16)); // d1
  67. d2 = vqadd_s16(d12, d11);
  68. d3 = vqadd_s16(d13, d10);
  69. d4 = vqsub_s16(d13, d10);
  70. d5 = vqsub_s16(d12, d11);
  71. d2 = vrshr_n_s16(d2, 3);
  72. d3 = vrshr_n_s16(d3, 3);
  73. d4 = vrshr_n_s16(d4, 3);
  74. d5 = vrshr_n_s16(d5, 3);
  75. v2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
  76. v2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
  77. v2tmp2 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[0]),
  78. vreinterpret_s16_s32(v2tmp1.val[0]));
  79. v2tmp3 = vtrn_s16(vreinterpret_s16_s32(v2tmp0.val[1]),
  80. vreinterpret_s16_s32(v2tmp1.val[1]));
  81. q1s16 = vcombine_s16(v2tmp2.val[0], v2tmp2.val[1]);
  82. q2s16 = vcombine_s16(v2tmp3.val[0], v2tmp3.val[1]);
  83. // dc_only_idct_add
  84. for (i = 0; i < 2; i++, q1s16 = q2s16) {
  85. d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 0);
  86. pred_ptr += pred_stride;
  87. d6u32 = vld1_lane_u32((const uint32_t *)pred_ptr, d6u32, 1);
  88. pred_ptr += pred_stride;
  89. q1u16 = vaddw_u8(vreinterpretq_u16_s16(q1s16),
  90. vreinterpret_u8_u32(d6u32));
  91. d1u8 = vqmovun_s16(vreinterpretq_s16_u16(q1u16));
  92. vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 0);
  93. dst_ptr += dst_stride;
  94. vst1_lane_u32((uint32_t *)dst_ptr, vreinterpret_u32_u8(d1u8), 1);
  95. dst_ptr += dst_stride;
  96. }
  97. return;
  98. }