mmxencfrag.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. * *
  8. * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
  9. * by the Xiph.Org Foundation http://www.xiph.org/ *
  10. * *
  11. ********************************************************************
  12. function:
  13. last mod: $Id: dsp_mmx.c 14579 2008-03-12 06:42:40Z xiphmont $
  14. ********************************************************************/
  15. #include <stddef.h>
  16. #include "x86enc.h"
  17. #if defined(OC_X86_ASM)
  18. unsigned oc_enc_frag_sad_mmxext(const unsigned char *_src,
  19. const unsigned char *_ref,int _ystride){
  20. ptrdiff_t ystride3;
  21. ptrdiff_t ret;
  22. __asm__ __volatile__(
  23. /*Load the first 4 rows of each block.*/
  24. "movq (%[src]),%%mm0\n\t"
  25. "movq (%[ref]),%%mm1\n\t"
  26. "movq (%[src],%[ystride]),%%mm2\n\t"
  27. "movq (%[ref],%[ystride]),%%mm3\n\t"
  28. "lea (%[ystride],%[ystride],2),%[ystride3]\n\t"
  29. "movq (%[src],%[ystride],2),%%mm4\n\t"
  30. "movq (%[ref],%[ystride],2),%%mm5\n\t"
  31. "movq (%[src],%[ystride3]),%%mm6\n\t"
  32. "movq (%[ref],%[ystride3]),%%mm7\n\t"
  33. /*Compute their SADs and add them in %%mm0*/
  34. "psadbw %%mm1,%%mm0\n\t"
  35. "psadbw %%mm3,%%mm2\n\t"
  36. "lea (%[src],%[ystride],4),%[src]\n\t"
  37. "paddw %%mm2,%%mm0\n\t"
  38. "lea (%[ref],%[ystride],4),%[ref]\n\t"
  39. /*Load the next 3 rows as registers become available.*/
  40. "movq (%[src]),%%mm2\n\t"
  41. "movq (%[ref]),%%mm3\n\t"
  42. "psadbw %%mm5,%%mm4\n\t"
  43. "psadbw %%mm7,%%mm6\n\t"
  44. "paddw %%mm4,%%mm0\n\t"
  45. "movq (%[ref],%[ystride]),%%mm5\n\t"
  46. "movq (%[src],%[ystride]),%%mm4\n\t"
  47. "paddw %%mm6,%%mm0\n\t"
  48. "movq (%[ref],%[ystride],2),%%mm7\n\t"
  49. "movq (%[src],%[ystride],2),%%mm6\n\t"
  50. /*Start adding their SADs to %%mm0*/
  51. "psadbw %%mm3,%%mm2\n\t"
  52. "psadbw %%mm5,%%mm4\n\t"
  53. "paddw %%mm2,%%mm0\n\t"
  54. "psadbw %%mm7,%%mm6\n\t"
  55. /*Load last row as registers become available.*/
  56. "movq (%[src],%[ystride3]),%%mm2\n\t"
  57. "movq (%[ref],%[ystride3]),%%mm3\n\t"
  58. /*And finish adding up their SADs.*/
  59. "paddw %%mm4,%%mm0\n\t"
  60. "psadbw %%mm3,%%mm2\n\t"
  61. "paddw %%mm6,%%mm0\n\t"
  62. "paddw %%mm2,%%mm0\n\t"
  63. "movd %%mm0,%[ret]\n\t"
  64. :[ret]"=a"(ret),[src]"+%r"(_src),[ref]"+r"(_ref),[ystride3]"=&r"(ystride3)
  65. :[ystride]"r"((ptrdiff_t)_ystride)
  66. );
  67. return (unsigned)ret;
  68. }
  69. unsigned oc_enc_frag_sad_thresh_mmxext(const unsigned char *_src,
  70. const unsigned char *_ref,int _ystride,unsigned _thresh){
  71. /*Early termination is for suckers.*/
  72. return oc_enc_frag_sad_mmxext(_src,_ref,_ystride);
  73. }
  74. /*Assumes the first two rows of %[ref1] and %[ref2] are in %%mm0...%%mm3, the
  75. first two rows of %[src] are in %%mm4,%%mm5, and {1}x8 is in %%mm7.
  76. We pre-load the next two rows of data as registers become available.*/
  77. #define OC_SAD2_LOOP \
  78. "#OC_SAD2_LOOP\n\t" \
  79. /*We want to compute (%%mm0+%%mm1>>1) on unsigned bytes without overflow, but \
  80. pavgb computes (%%mm0+%%mm1+1>>1). \
  81. The latter is exactly 1 too large when the low bit of two corresponding \
  82. bytes is only set in one of them. \
  83. Therefore we pxor the operands, pand to mask out the low bits, and psubb to \
  84. correct the output of pavgb. \
  85. TODO: This should be rewritten to compute ~pavgb(~a,~b) instead, which \
  86. schedules better; currently, however, this function is unused.*/ \
  87. "movq %%mm0,%%mm6\n\t" \
  88. "lea (%[ref1],%[ystride],2),%[ref1]\n\t" \
  89. "pxor %%mm1,%%mm0\n\t" \
  90. "pavgb %%mm1,%%mm6\n\t" \
  91. "lea (%[ref2],%[ystride],2),%[ref2]\n\t" \
  92. "movq %%mm2,%%mm1\n\t" \
  93. "pand %%mm7,%%mm0\n\t" \
  94. "pavgb %%mm3,%%mm2\n\t" \
  95. "pxor %%mm3,%%mm1\n\t" \
  96. "movq (%[ref2],%[ystride]),%%mm3\n\t" \
  97. "psubb %%mm0,%%mm6\n\t" \
  98. "movq (%[ref1]),%%mm0\n\t" \
  99. "pand %%mm7,%%mm1\n\t" \
  100. "psadbw %%mm6,%%mm4\n\t" \
  101. "movd %[ret],%%mm6\n\t" \
  102. "psubb %%mm1,%%mm2\n\t" \
  103. "movq (%[ref2]),%%mm1\n\t" \
  104. "lea (%[src],%[ystride],2),%[src]\n\t" \
  105. "psadbw %%mm2,%%mm5\n\t" \
  106. "movq (%[ref1],%[ystride]),%%mm2\n\t" \
  107. "paddw %%mm4,%%mm5\n\t" \
  108. "movq (%[src]),%%mm4\n\t" \
  109. "paddw %%mm5,%%mm6\n\t" \
  110. "movq (%[src],%[ystride]),%%mm5\n\t" \
  111. "movd %%mm6,%[ret]\n\t" \
  112. /*Same as above, but does not pre-load the next two rows.*/
  113. #define OC_SAD2_TAIL \
  114. "#OC_SAD2_TAIL\n\t" \
  115. "movq %%mm0,%%mm6\n\t" \
  116. "pavgb %%mm1,%%mm0\n\t" \
  117. "pxor %%mm1,%%mm6\n\t" \
  118. "movq %%mm2,%%mm1\n\t" \
  119. "pand %%mm7,%%mm6\n\t" \
  120. "pavgb %%mm3,%%mm2\n\t" \
  121. "pxor %%mm3,%%mm1\n\t" \
  122. "psubb %%mm6,%%mm0\n\t" \
  123. "pand %%mm7,%%mm1\n\t" \
  124. "psadbw %%mm0,%%mm4\n\t" \
  125. "psubb %%mm1,%%mm2\n\t" \
  126. "movd %[ret],%%mm6\n\t" \
  127. "psadbw %%mm2,%%mm5\n\t" \
  128. "paddw %%mm4,%%mm5\n\t" \
  129. "paddw %%mm5,%%mm6\n\t" \
  130. "movd %%mm6,%[ret]\n\t" \
  131. unsigned oc_enc_frag_sad2_thresh_mmxext(const unsigned char *_src,
  132. const unsigned char *_ref1,const unsigned char *_ref2,int _ystride,
  133. unsigned _thresh){
  134. ptrdiff_t ret;
  135. __asm__ __volatile__(
  136. "movq (%[ref1]),%%mm0\n\t"
  137. "movq (%[ref2]),%%mm1\n\t"
  138. "movq (%[ref1],%[ystride]),%%mm2\n\t"
  139. "movq (%[ref2],%[ystride]),%%mm3\n\t"
  140. "xor %[ret],%[ret]\n\t"
  141. "movq (%[src]),%%mm4\n\t"
  142. "pxor %%mm7,%%mm7\n\t"
  143. "pcmpeqb %%mm6,%%mm6\n\t"
  144. "movq (%[src],%[ystride]),%%mm5\n\t"
  145. "psubb %%mm6,%%mm7\n\t"
  146. OC_SAD2_LOOP
  147. OC_SAD2_LOOP
  148. OC_SAD2_LOOP
  149. OC_SAD2_TAIL
  150. :[ret]"=&a"(ret),[src]"+r"(_src),[ref1]"+%r"(_ref1),[ref2]"+r"(_ref2)
  151. :[ystride]"r"((ptrdiff_t)_ystride)
  152. );
  153. return (unsigned)ret;
  154. }
  155. /*Load an 8x4 array of pixel values from %[src] and %[ref] and compute their
  156. 16-bit difference in %%mm0...%%mm7.*/
  157. #define OC_LOAD_SUB_8x4(_off) \
  158. "#OC_LOAD_SUB_8x4\n\t" \
  159. "movd "#_off"(%[src]),%%mm0\n\t" \
  160. "movd "#_off"(%[ref]),%%mm4\n\t" \
  161. "movd "#_off"(%[src],%[src_ystride]),%%mm1\n\t" \
  162. "lea (%[src],%[src_ystride],2),%[src]\n\t" \
  163. "movd "#_off"(%[ref],%[ref_ystride]),%%mm5\n\t" \
  164. "lea (%[ref],%[ref_ystride],2),%[ref]\n\t" \
  165. "movd "#_off"(%[src]),%%mm2\n\t" \
  166. "movd "#_off"(%[ref]),%%mm7\n\t" \
  167. "movd "#_off"(%[src],%[src_ystride]),%%mm3\n\t" \
  168. "movd "#_off"(%[ref],%[ref_ystride]),%%mm6\n\t" \
  169. "punpcklbw %%mm4,%%mm0\n\t" \
  170. "lea (%[src],%[src_ystride],2),%[src]\n\t" \
  171. "punpcklbw %%mm4,%%mm4\n\t" \
  172. "lea (%[ref],%[ref_ystride],2),%[ref]\n\t" \
  173. "psubw %%mm4,%%mm0\n\t" \
  174. "movd "#_off"(%[src]),%%mm4\n\t" \
  175. "movq %%mm0,"OC_MEM_OFFS(_off*2,buf)"\n\t" \
  176. "movd "#_off"(%[ref]),%%mm0\n\t" \
  177. "punpcklbw %%mm5,%%mm1\n\t" \
  178. "punpcklbw %%mm5,%%mm5\n\t" \
  179. "psubw %%mm5,%%mm1\n\t" \
  180. "movd "#_off"(%[src],%[src_ystride]),%%mm5\n\t" \
  181. "punpcklbw %%mm7,%%mm2\n\t" \
  182. "punpcklbw %%mm7,%%mm7\n\t" \
  183. "psubw %%mm7,%%mm2\n\t" \
  184. "movd "#_off"(%[ref],%[ref_ystride]),%%mm7\n\t" \
  185. "punpcklbw %%mm6,%%mm3\n\t" \
  186. "lea (%[src],%[src_ystride],2),%[src]\n\t" \
  187. "punpcklbw %%mm6,%%mm6\n\t" \
  188. "psubw %%mm6,%%mm3\n\t" \
  189. "movd "#_off"(%[src]),%%mm6\n\t" \
  190. "punpcklbw %%mm0,%%mm4\n\t" \
  191. "lea (%[ref],%[ref_ystride],2),%[ref]\n\t" \
  192. "punpcklbw %%mm0,%%mm0\n\t" \
  193. "lea (%[src],%[src_ystride],2),%[src]\n\t" \
  194. "psubw %%mm0,%%mm4\n\t" \
  195. "movd "#_off"(%[ref]),%%mm0\n\t" \
  196. "punpcklbw %%mm7,%%mm5\n\t" \
  197. "neg %[src_ystride]\n\t" \
  198. "punpcklbw %%mm7,%%mm7\n\t" \
  199. "psubw %%mm7,%%mm5\n\t" \
  200. "movd "#_off"(%[src],%[src_ystride]),%%mm7\n\t" \
  201. "punpcklbw %%mm0,%%mm6\n\t" \
  202. "lea (%[ref],%[ref_ystride],2),%[ref]\n\t" \
  203. "punpcklbw %%mm0,%%mm0\n\t" \
  204. "neg %[ref_ystride]\n\t" \
  205. "psubw %%mm0,%%mm6\n\t" \
  206. "movd "#_off"(%[ref],%[ref_ystride]),%%mm0\n\t" \
  207. "lea (%[src],%[src_ystride],8),%[src]\n\t" \
  208. "punpcklbw %%mm0,%%mm7\n\t" \
  209. "neg %[src_ystride]\n\t" \
  210. "punpcklbw %%mm0,%%mm0\n\t" \
  211. "lea (%[ref],%[ref_ystride],8),%[ref]\n\t" \
  212. "psubw %%mm0,%%mm7\n\t" \
  213. "neg %[ref_ystride]\n\t" \
  214. "movq "OC_MEM_OFFS(_off*2,buf)",%%mm0\n\t" \
  215. /*Load an 8x4 array of pixel values from %[src] into %%mm0...%%mm7.*/
  216. #define OC_LOAD_8x4(_off) \
  217. "#OC_LOAD_8x4\n\t" \
  218. "movd "#_off"(%[src]),%%mm0\n\t" \
  219. "movd "#_off"(%[src],%[ystride]),%%mm1\n\t" \
  220. "movd "#_off"(%[src],%[ystride],2),%%mm2\n\t" \
  221. "pxor %%mm7,%%mm7\n\t" \
  222. "movd "#_off"(%[src],%[ystride3]),%%mm3\n\t" \
  223. "punpcklbw %%mm7,%%mm0\n\t" \
  224. "movd "#_off"(%[src4]),%%mm4\n\t" \
  225. "punpcklbw %%mm7,%%mm1\n\t" \
  226. "movd "#_off"(%[src4],%[ystride]),%%mm5\n\t" \
  227. "punpcklbw %%mm7,%%mm2\n\t" \
  228. "movd "#_off"(%[src4],%[ystride],2),%%mm6\n\t" \
  229. "punpcklbw %%mm7,%%mm3\n\t" \
  230. "movd "#_off"(%[src4],%[ystride3]),%%mm7\n\t" \
  231. "punpcklbw %%mm4,%%mm4\n\t" \
  232. "punpcklbw %%mm5,%%mm5\n\t" \
  233. "psrlw $8,%%mm4\n\t" \
  234. "psrlw $8,%%mm5\n\t" \
  235. "punpcklbw %%mm6,%%mm6\n\t" \
  236. "punpcklbw %%mm7,%%mm7\n\t" \
  237. "psrlw $8,%%mm6\n\t" \
  238. "psrlw $8,%%mm7\n\t" \
  239. /*Performs the first two stages of an 8-point 1-D Hadamard transform.
  240. The transform is performed in place, except that outputs 0-3 are swapped with
  241. outputs 4-7.
  242. Outputs 2, 3, 6, and 7 from the second stage are negated (which allows us to
  243. perform this stage in place with no temporary registers).*/
  244. #define OC_HADAMARD_AB_8x4 \
  245. "#OC_HADAMARD_AB_8x4\n\t" \
  246. /*Stage A: \
  247. Outputs 0-3 are swapped with 4-7 here.*/ \
  248. "paddw %%mm1,%%mm5\n\t" \
  249. "paddw %%mm2,%%mm6\n\t" \
  250. "paddw %%mm1,%%mm1\n\t" \
  251. "paddw %%mm2,%%mm2\n\t" \
  252. "psubw %%mm5,%%mm1\n\t" \
  253. "psubw %%mm6,%%mm2\n\t" \
  254. "paddw %%mm3,%%mm7\n\t" \
  255. "paddw %%mm0,%%mm4\n\t" \
  256. "paddw %%mm3,%%mm3\n\t" \
  257. "paddw %%mm0,%%mm0\n\t" \
  258. "psubw %%mm7,%%mm3\n\t" \
  259. "psubw %%mm4,%%mm0\n\t" \
  260. /*Stage B:*/ \
  261. "paddw %%mm2,%%mm0\n\t" \
  262. "paddw %%mm3,%%mm1\n\t" \
  263. "paddw %%mm6,%%mm4\n\t" \
  264. "paddw %%mm7,%%mm5\n\t" \
  265. "paddw %%mm2,%%mm2\n\t" \
  266. "paddw %%mm3,%%mm3\n\t" \
  267. "paddw %%mm6,%%mm6\n\t" \
  268. "paddw %%mm7,%%mm7\n\t" \
  269. "psubw %%mm0,%%mm2\n\t" \
  270. "psubw %%mm1,%%mm3\n\t" \
  271. "psubw %%mm4,%%mm6\n\t" \
  272. "psubw %%mm5,%%mm7\n\t" \
  273. /*Performs the last stage of an 8-point 1-D Hadamard transform in place.
  274. Outputs 1, 3, 5, and 7 are negated (which allows us to perform this stage in
  275. place with no temporary registers).*/
  276. #define OC_HADAMARD_C_8x4 \
  277. "#OC_HADAMARD_C_8x4\n\t" \
  278. /*Stage C:*/ \
  279. "paddw %%mm1,%%mm0\n\t" \
  280. "paddw %%mm3,%%mm2\n\t" \
  281. "paddw %%mm5,%%mm4\n\t" \
  282. "paddw %%mm7,%%mm6\n\t" \
  283. "paddw %%mm1,%%mm1\n\t" \
  284. "paddw %%mm3,%%mm3\n\t" \
  285. "paddw %%mm5,%%mm5\n\t" \
  286. "paddw %%mm7,%%mm7\n\t" \
  287. "psubw %%mm0,%%mm1\n\t" \
  288. "psubw %%mm2,%%mm3\n\t" \
  289. "psubw %%mm4,%%mm5\n\t" \
  290. "psubw %%mm6,%%mm7\n\t" \
  291. /*Performs an 8-point 1-D Hadamard transform.
  292. The transform is performed in place, except that outputs 0-3 are swapped with
  293. outputs 4-7.
  294. Outputs 1, 2, 5 and 6 are negated (which allows us to perform the transform
  295. in place with no temporary registers).*/
  296. #define OC_HADAMARD_8x4 \
  297. OC_HADAMARD_AB_8x4 \
  298. OC_HADAMARD_C_8x4 \
  299. /*Performs the first part of the final stage of the Hadamard transform and
  300. summing of absolute values.
  301. At the end of this part, %%mm1 will contain the DC coefficient of the
  302. transform.*/
  303. #define OC_HADAMARD_C_ABS_ACCUM_A_8x4(_r6,_r7) \
  304. /*We use the fact that \
  305. (abs(a+b)+abs(a-b))/2=max(abs(a),abs(b)) \
  306. to merge the final butterfly with the abs and the first stage of \
  307. accumulation. \
  308. Thus we can avoid using pabsw, which is not available until SSSE3. \
  309. Emulating pabsw takes 3 instructions, so the straightforward MMXEXT \
  310. implementation would be (3+3)*8+7=55 instructions (+4 for spilling \
  311. registers). \
  312. Even with pabsw, it would be (3+1)*8+7=39 instructions (with no spills). \
  313. This implementation is only 26 (+4 for spilling registers).*/ \
  314. "#OC_HADAMARD_C_ABS_ACCUM_A_8x4\n\t" \
  315. "movq %%mm7,"OC_MEM_OFFS(_r7,buf)"\n\t" \
  316. "movq %%mm6,"OC_MEM_OFFS(_r6,buf)"\n\t" \
  317. /*mm7={0x7FFF}x4 \
  318. mm0=max(abs(mm0),abs(mm1))-0x7FFF*/ \
  319. "pcmpeqb %%mm7,%%mm7\n\t" \
  320. "movq %%mm0,%%mm6\n\t" \
  321. "psrlw $1,%%mm7\n\t" \
  322. "paddw %%mm1,%%mm6\n\t" \
  323. "pmaxsw %%mm1,%%mm0\n\t" \
  324. "paddsw %%mm7,%%mm6\n\t" \
  325. "psubw %%mm6,%%mm0\n\t" \
  326. /*mm2=max(abs(mm2),abs(mm3))-0x7FFF \
  327. mm4=max(abs(mm4),abs(mm5))-0x7FFF*/ \
  328. "movq %%mm2,%%mm6\n\t" \
  329. "movq %%mm4,%%mm1\n\t" \
  330. "pmaxsw %%mm3,%%mm2\n\t" \
  331. "pmaxsw %%mm5,%%mm4\n\t" \
  332. "paddw %%mm3,%%mm6\n\t" \
  333. "paddw %%mm5,%%mm1\n\t" \
  334. "movq "OC_MEM_OFFS(_r7,buf)",%%mm3\n\t" \
  335. /*Performs the second part of the final stage of the Hadamard transform and
  336. summing of absolute values.*/
  337. #define OC_HADAMARD_C_ABS_ACCUM_B_8x4(_r6,_r7) \
  338. "#OC_HADAMARD_C_ABS_ACCUM_B_8x4\n\t" \
  339. "paddsw %%mm7,%%mm6\n\t" \
  340. "movq "OC_MEM_OFFS(_r6,buf)",%%mm5\n\t" \
  341. "paddsw %%mm7,%%mm1\n\t" \
  342. "psubw %%mm6,%%mm2\n\t" \
  343. "psubw %%mm1,%%mm4\n\t" \
  344. /*mm7={1}x4 (needed for the horizontal add that follows) \
  345. mm0+=mm2+mm4+max(abs(mm3),abs(mm5))-0x7FFF*/ \
  346. "movq %%mm3,%%mm6\n\t" \
  347. "pmaxsw %%mm5,%%mm3\n\t" \
  348. "paddw %%mm2,%%mm0\n\t" \
  349. "paddw %%mm5,%%mm6\n\t" \
  350. "paddw %%mm4,%%mm0\n\t" \
  351. "paddsw %%mm7,%%mm6\n\t" \
  352. "paddw %%mm3,%%mm0\n\t" \
  353. "psrlw $14,%%mm7\n\t" \
  354. "psubw %%mm6,%%mm0\n\t" \
  355. /*Performs the last stage of an 8-point 1-D Hadamard transform, takes the
  356. absolute value of each component, and accumulates everything into mm0.
  357. This is the only portion of SATD which requires MMXEXT (we could use plain
  358. MMX, but it takes 4 instructions and an extra register to work around the
  359. lack of a pmaxsw, which is a pretty serious penalty).*/
  360. #define OC_HADAMARD_C_ABS_ACCUM_8x4(_r6,_r7) \
  361. OC_HADAMARD_C_ABS_ACCUM_A_8x4(_r6,_r7) \
  362. OC_HADAMARD_C_ABS_ACCUM_B_8x4(_r6,_r7) \
  363. /*Performs an 8-point 1-D Hadamard transform, takes the absolute value of each
  364. component, and accumulates everything into mm0.
  365. Note that mm0 will have an extra 4 added to each column, and that after
  366. removing this value, the remainder will be half the conventional value.*/
  367. #define OC_HADAMARD_ABS_ACCUM_8x4(_r6,_r7) \
  368. OC_HADAMARD_AB_8x4 \
  369. OC_HADAMARD_C_ABS_ACCUM_8x4(_r6,_r7)
  370. /*Performs two 4x4 transposes (mostly) in place.
  371. On input, {mm0,mm1,mm2,mm3} contains rows {e,f,g,h}, and {mm4,mm5,mm6,mm7}
  372. contains rows {a,b,c,d}.
  373. On output, {0x40,0x50,0x60,0x70}+_off(%[buf]) contains {e,f,g,h}^T, and
  374. {mm4,mm5,mm6,mm7} contains the transposed rows {a,b,c,d}^T.*/
  375. #define OC_TRANSPOSE_4x4x2(_off) \
  376. "#OC_TRANSPOSE_4x4x2\n\t" \
  377. /*First 4x4 transpose:*/ \
  378. "movq %%mm5,"OC_MEM_OFFS(0x10+(_off),buf)"\n\t" \
  379. /*mm0 = e3 e2 e1 e0 \
  380. mm1 = f3 f2 f1 f0 \
  381. mm2 = g3 g2 g1 g0 \
  382. mm3 = h3 h2 h1 h0*/ \
  383. "movq %%mm2,%%mm5\n\t" \
  384. "punpcklwd %%mm3,%%mm2\n\t" \
  385. "punpckhwd %%mm3,%%mm5\n\t" \
  386. "movq %%mm0,%%mm3\n\t" \
  387. "punpcklwd %%mm1,%%mm0\n\t" \
  388. "punpckhwd %%mm1,%%mm3\n\t" \
  389. /*mm0 = f1 e1 f0 e0 \
  390. mm3 = f3 e3 f2 e2 \
  391. mm2 = h1 g1 h0 g0 \
  392. mm5 = h3 g3 h2 g2*/ \
  393. "movq %%mm0,%%mm1\n\t" \
  394. "punpckldq %%mm2,%%mm0\n\t" \
  395. "punpckhdq %%mm2,%%mm1\n\t" \
  396. "movq %%mm3,%%mm2\n\t" \
  397. "punpckhdq %%mm5,%%mm3\n\t" \
  398. "movq %%mm0,"OC_MEM_OFFS(0x40+(_off),buf)"\n\t" \
  399. "punpckldq %%mm5,%%mm2\n\t" \
  400. /*mm0 = h0 g0 f0 e0 \
  401. mm1 = h1 g1 f1 e1 \
  402. mm2 = h2 g2 f2 e2 \
  403. mm3 = h3 g3 f3 e3*/ \
  404. "movq "OC_MEM_OFFS(0x10+(_off),buf)",%%mm5\n\t" \
  405. /*Second 4x4 transpose:*/ \
  406. /*mm4 = a3 a2 a1 a0 \
  407. mm5 = b3 b2 b1 b0 \
  408. mm6 = c3 c2 c1 c0 \
  409. mm7 = d3 d2 d1 d0*/ \
  410. "movq %%mm6,%%mm0\n\t" \
  411. "punpcklwd %%mm7,%%mm6\n\t" \
  412. "movq %%mm1,"OC_MEM_OFFS(0x50+(_off),buf)"\n\t" \
  413. "punpckhwd %%mm7,%%mm0\n\t" \
  414. "movq %%mm4,%%mm7\n\t" \
  415. "punpcklwd %%mm5,%%mm4\n\t" \
  416. "movq %%mm2,"OC_MEM_OFFS(0x60+(_off),buf)"\n\t" \
  417. "punpckhwd %%mm5,%%mm7\n\t" \
  418. /*mm4 = b1 a1 b0 a0 \
  419. mm7 = b3 a3 b2 a2 \
  420. mm6 = d1 c1 d0 c0 \
  421. mm0 = d3 c3 d2 c2*/ \
  422. "movq %%mm4,%%mm5\n\t" \
  423. "punpckldq %%mm6,%%mm4\n\t" \
  424. "movq %%mm3,"OC_MEM_OFFS(0x70+(_off),buf)"\n\t" \
  425. "punpckhdq %%mm6,%%mm5\n\t" \
  426. "movq %%mm7,%%mm6\n\t" \
  427. "punpckhdq %%mm0,%%mm7\n\t" \
  428. "punpckldq %%mm0,%%mm6\n\t" \
  429. /*mm4 = d0 c0 b0 a0 \
  430. mm5 = d1 c1 b1 a1 \
  431. mm6 = d2 c2 b2 a2 \
  432. mm7 = d3 c3 b3 a3*/ \
  433. static unsigned oc_int_frag_satd_mmxext(int *_dc,
  434. const unsigned char *_src,int _src_ystride,
  435. const unsigned char *_ref,int _ref_ystride){
  436. OC_ALIGN8(ogg_int16_t buf[64]);
  437. unsigned ret;
  438. unsigned ret2;
  439. int dc;
  440. __asm__ __volatile__(
  441. OC_LOAD_SUB_8x4(0x00)
  442. OC_HADAMARD_8x4
  443. OC_TRANSPOSE_4x4x2(0x00)
  444. /*Finish swapping out this 8x4 block to make room for the next one.
  445. mm0...mm3 have been swapped out already.*/
  446. "movq %%mm4,"OC_MEM_OFFS(0x00,buf)"\n\t"
  447. "movq %%mm5,"OC_MEM_OFFS(0x10,buf)"\n\t"
  448. "movq %%mm6,"OC_MEM_OFFS(0x20,buf)"\n\t"
  449. "movq %%mm7,"OC_MEM_OFFS(0x30,buf)"\n\t"
  450. OC_LOAD_SUB_8x4(0x04)
  451. OC_HADAMARD_8x4
  452. OC_TRANSPOSE_4x4x2(0x08)
  453. /*Here the first 4x4 block of output from the last transpose is the second
  454. 4x4 block of input for the next transform.
  455. We have cleverly arranged that it already be in the appropriate place, so
  456. we only have to do half the loads.*/
  457. "movq "OC_MEM_OFFS(0x10,buf)",%%mm1\n\t"
  458. "movq "OC_MEM_OFFS(0x20,buf)",%%mm2\n\t"
  459. "movq "OC_MEM_OFFS(0x30,buf)",%%mm3\n\t"
  460. "movq "OC_MEM_OFFS(0x00,buf)",%%mm0\n\t"
  461. /*We split out the stages here so we can save the DC coefficient in the
  462. middle.*/
  463. OC_HADAMARD_AB_8x4
  464. OC_HADAMARD_C_ABS_ACCUM_A_8x4(0x28,0x38)
  465. "movd %%mm1,%[dc]\n\t"
  466. OC_HADAMARD_C_ABS_ACCUM_B_8x4(0x28,0x38)
  467. /*Up to this point, everything fit in 16 bits (8 input + 1 for the
  468. difference + 2*3 for the two 8-point 1-D Hadamards - 1 for the abs - 1
  469. for the factor of two we dropped + 3 for the vertical accumulation).
  470. Now we finally have to promote things to dwords.
  471. We break this part out of OC_HADAMARD_ABS_ACCUM_8x4 to hide the long
  472. latency of pmaddwd by starting the next series of loads now.*/
  473. "pmaddwd %%mm7,%%mm0\n\t"
  474. "movq "OC_MEM_OFFS(0x50,buf)",%%mm1\n\t"
  475. "movq "OC_MEM_OFFS(0x58,buf)",%%mm5\n\t"
  476. "movq %%mm0,%%mm4\n\t"
  477. "movq "OC_MEM_OFFS(0x60,buf)",%%mm2\n\t"
  478. "punpckhdq %%mm0,%%mm0\n\t"
  479. "movq "OC_MEM_OFFS(0x68,buf)",%%mm6\n\t"
  480. "paddd %%mm0,%%mm4\n\t"
  481. "movq "OC_MEM_OFFS(0x70,buf)",%%mm3\n\t"
  482. "movd %%mm4,%[ret2]\n\t"
  483. "movq "OC_MEM_OFFS(0x78,buf)",%%mm7\n\t"
  484. "movq "OC_MEM_OFFS(0x40,buf)",%%mm0\n\t"
  485. "movq "OC_MEM_OFFS(0x48,buf)",%%mm4\n\t"
  486. OC_HADAMARD_ABS_ACCUM_8x4(0x68,0x78)
  487. "pmaddwd %%mm7,%%mm0\n\t"
  488. /*Subtract abs(dc) from 2*ret2.*/
  489. "movsx %w[dc],%[dc]\n\t"
  490. "cdq\n\t"
  491. "lea (%[ret],%[ret2],2),%[ret2]\n\t"
  492. "movq %%mm0,%%mm4\n\t"
  493. "punpckhdq %%mm0,%%mm0\n\t"
  494. "xor %[dc],%[ret]\n\t"
  495. "paddd %%mm0,%%mm4\n\t"
  496. /*The sums produced by OC_HADAMARD_ABS_ACCUM_8x4 each have an extra 4
  497. added to them, a factor of two removed, and the DC value included;
  498. correct the final sum here.*/
  499. "sub %[ret],%[ret2]\n\t"
  500. "movd %%mm4,%[ret]\n\t"
  501. "lea -64(%[ret2],%[ret],2),%[ret]\n\t"
  502. /*Although it looks like we're using 8 registers here, gcc can alias %[ret]
  503. and %[ret2] with some of the inputs, since for once we don't write to
  504. them until after we're done using everything but %[buf].*/
  505. /*Note that _src_ystride and _ref_ystride must be given non-overlapping
  506. constraints, otherewise if gcc can prove they're equal it will allocate
  507. them to the same register (which is bad); _src and _ref face a similar
  508. problem, though those are never actually the same.*/
  509. :[ret]"=d"(ret),[ret2]"=r"(ret2),[dc]"=a"(dc),
  510. [buf]"=m"(OC_ARRAY_OPERAND(ogg_int16_t,buf,64))
  511. :[src]"r"(_src),[src_ystride]"c"((ptrdiff_t)_src_ystride),
  512. [ref]"r"(_ref),[ref_ystride]"d"((ptrdiff_t)_ref_ystride)
  513. /*We have to use neg, so we actually clobber the condition codes for once
  514. (not to mention cmp, sub, and add).*/
  515. :"cc"
  516. );
  517. *_dc=dc;
  518. return ret;
  519. }
  520. unsigned oc_enc_frag_satd_mmxext(int *_dc,const unsigned char *_src,
  521. const unsigned char *_ref,int _ystride){
  522. return oc_int_frag_satd_mmxext(_dc,_src,_ystride,_ref,_ystride);
  523. }
  524. /*Our internal implementation of frag_copy2 takes an extra stride parameter so
  525. we can share code with oc_enc_frag_satd2_mmxext().*/
  526. void oc_int_frag_copy2_mmxext(unsigned char *_dst,int _dst_ystride,
  527. const unsigned char *_src1,const unsigned char *_src2,int _src_ystride){
  528. __asm__ __volatile__(
  529. /*Load the first 3 rows.*/
  530. "movq (%[src1]),%%mm0\n\t"
  531. "movq (%[src2]),%%mm1\n\t"
  532. "movq (%[src1],%[src_ystride]),%%mm2\n\t"
  533. "lea (%[src1],%[src_ystride],2),%[src1]\n\t"
  534. "movq (%[src2],%[src_ystride]),%%mm3\n\t"
  535. "lea (%[src2],%[src_ystride],2),%[src2]\n\t"
  536. "pxor %%mm7,%%mm7\n\t"
  537. "movq (%[src1]),%%mm4\n\t"
  538. "pcmpeqb %%mm6,%%mm6\n\t"
  539. "movq (%[src2]),%%mm5\n\t"
  540. /*mm7={1}x8.*/
  541. "psubb %%mm6,%%mm7\n\t"
  542. /*Start averaging %%mm0 and %%mm1 into %%mm6.*/
  543. "movq %%mm0,%%mm6\n\t"
  544. "pxor %%mm1,%%mm0\n\t"
  545. "pavgb %%mm1,%%mm6\n\t"
  546. /*%%mm1 is free, start averaging %%mm3 into %%mm2 using %%mm1.*/
  547. "movq %%mm2,%%mm1\n\t"
  548. "pand %%mm7,%%mm0\n\t"
  549. "pavgb %%mm3,%%mm2\n\t"
  550. "pxor %%mm3,%%mm1\n\t"
  551. /*%%mm3 is free.*/
  552. "psubb %%mm0,%%mm6\n\t"
  553. /*%%mm0 is free, start loading the next row.*/
  554. "movq (%[src1],%[src_ystride]),%%mm0\n\t"
  555. /*Start averaging %%mm5 and %%mm4 using %%mm3.*/
  556. "movq %%mm4,%%mm3\n\t"
  557. /*%%mm6 (row 0) is done; write it out.*/
  558. "movq %%mm6,(%[dst])\n\t"
  559. "pand %%mm7,%%mm1\n\t"
  560. "pavgb %%mm5,%%mm4\n\t"
  561. "psubb %%mm1,%%mm2\n\t"
  562. /*%%mm1 is free, continue loading the next row.*/
  563. "movq (%[src2],%[src_ystride]),%%mm1\n\t"
  564. "pxor %%mm5,%%mm3\n\t"
  565. "lea (%[src1],%[src_ystride],2),%[src1]\n\t"
  566. /*%%mm2 (row 1) is done; write it out.*/
  567. "movq %%mm2,(%[dst],%[dst_ystride])\n\t"
  568. "pand %%mm7,%%mm3\n\t"
  569. /*Start loading the next row.*/
  570. "movq (%[src1]),%%mm2\n\t"
  571. "lea (%[dst],%[dst_ystride],2),%[dst]\n\t"
  572. "psubb %%mm3,%%mm4\n\t"
  573. "lea (%[src2],%[src_ystride],2),%[src2]\n\t"
  574. /*%%mm4 (row 2) is done; write it out.*/
  575. "movq %%mm4,(%[dst])\n\t"
  576. /*Continue loading the next row.*/
  577. "movq (%[src2]),%%mm3\n\t"
  578. /*Start averaging %%mm0 and %%mm1 into %%mm6.*/
  579. "movq %%mm0,%%mm6\n\t"
  580. "pxor %%mm1,%%mm0\n\t"
  581. /*Start loading the next row.*/
  582. "movq (%[src1],%[src_ystride]),%%mm4\n\t"
  583. "pavgb %%mm1,%%mm6\n\t"
  584. /*%%mm1 is free; start averaging %%mm3 into %%mm2 using %%mm1.*/
  585. "movq %%mm2,%%mm1\n\t"
  586. "pand %%mm7,%%mm0\n\t"
  587. /*Continue loading the next row.*/
  588. "movq (%[src2],%[src_ystride]),%%mm5\n\t"
  589. "pavgb %%mm3,%%mm2\n\t"
  590. "lea (%[src1],%[src_ystride],2),%[src1]\n\t"
  591. "pxor %%mm3,%%mm1\n\t"
  592. /*%%mm3 is free.*/
  593. "psubb %%mm0,%%mm6\n\t"
  594. /*%%mm0 is free, start loading the next row.*/
  595. "movq (%[src1]),%%mm0\n\t"
  596. /*Start averaging %%mm5 into %%mm4 using %%mm3.*/
  597. "movq %%mm4,%%mm3\n\t"
  598. /*%%mm6 (row 3) is done; write it out.*/
  599. "movq %%mm6,(%[dst],%[dst_ystride])\n\t"
  600. "pand %%mm7,%%mm1\n\t"
  601. "lea (%[src2],%[src_ystride],2),%[src2]\n\t"
  602. "pavgb %%mm5,%%mm4\n\t"
  603. "lea (%[dst],%[dst_ystride],2),%[dst]\n\t"
  604. "psubb %%mm1,%%mm2\n\t"
  605. /*%%mm1 is free; continue loading the next row.*/
  606. "movq (%[src2]),%%mm1\n\t"
  607. "pxor %%mm5,%%mm3\n\t"
  608. /*%%mm2 (row 4) is done; write it out.*/
  609. "movq %%mm2,(%[dst])\n\t"
  610. "pand %%mm7,%%mm3\n\t"
  611. /*Start loading the next row.*/
  612. "movq (%[src1],%[src_ystride]),%%mm2\n\t"
  613. "psubb %%mm3,%%mm4\n\t"
  614. /*Start averaging %%mm0 and %%mm1 into %%mm6.*/
  615. "movq %%mm0,%%mm6\n\t"
  616. /*Continue loading the next row.*/
  617. "movq (%[src2],%[src_ystride]),%%mm3\n\t"
  618. /*%%mm4 (row 5) is done; write it out.*/
  619. "movq %%mm4,(%[dst],%[dst_ystride])\n\t"
  620. "pxor %%mm1,%%mm0\n\t"
  621. "pavgb %%mm1,%%mm6\n\t"
  622. /*%%mm4 is free; start averaging %%mm3 into %%mm2 using %%mm4.*/
  623. "movq %%mm2,%%mm4\n\t"
  624. "pand %%mm7,%%mm0\n\t"
  625. "pavgb %%mm3,%%mm2\n\t"
  626. "pxor %%mm3,%%mm4\n\t"
  627. "lea (%[dst],%[dst_ystride],2),%[dst]\n\t"
  628. "psubb %%mm0,%%mm6\n\t"
  629. "pand %%mm7,%%mm4\n\t"
  630. /*%%mm6 (row 6) is done, write it out.*/
  631. "movq %%mm6,(%[dst])\n\t"
  632. "psubb %%mm4,%%mm2\n\t"
  633. /*%%mm2 (row 7) is done, write it out.*/
  634. "movq %%mm2,(%[dst],%[dst_ystride])\n\t"
  635. :[dst]"+r"(_dst),[src1]"+%r"(_src1),[src2]"+r"(_src2)
  636. :[dst_ystride]"r"((ptrdiff_t)_dst_ystride),
  637. [src_ystride]"r"((ptrdiff_t)_src_ystride)
  638. :"memory"
  639. );
  640. }
  641. unsigned oc_enc_frag_satd2_mmxext(int *_dc,const unsigned char *_src,
  642. const unsigned char *_ref1,const unsigned char *_ref2,int _ystride){
  643. OC_ALIGN8(unsigned char ref[64]);
  644. oc_int_frag_copy2_mmxext(ref,8,_ref1,_ref2,_ystride);
  645. return oc_int_frag_satd_mmxext(_dc,_src,_ystride,ref,8);
  646. }
  647. unsigned oc_enc_frag_intra_satd_mmxext(int *_dc,
  648. const unsigned char *_src,int _ystride){
  649. OC_ALIGN8(ogg_int16_t buf[64]);
  650. unsigned ret;
  651. unsigned ret2;
  652. int dc;
  653. __asm__ __volatile__(
  654. OC_LOAD_8x4(0x00)
  655. OC_HADAMARD_8x4
  656. OC_TRANSPOSE_4x4x2(0x00)
  657. /*Finish swapping out this 8x4 block to make room for the next one.
  658. mm0...mm3 have been swapped out already.*/
  659. "movq %%mm4,"OC_MEM_OFFS(0x00,buf)"\n\t"
  660. "movq %%mm5,"OC_MEM_OFFS(0x10,buf)"\n\t"
  661. "movq %%mm6,"OC_MEM_OFFS(0x20,buf)"\n\t"
  662. "movq %%mm7,"OC_MEM_OFFS(0x30,buf)"\n\t"
  663. OC_LOAD_8x4(0x04)
  664. OC_HADAMARD_8x4
  665. OC_TRANSPOSE_4x4x2(0x08)
  666. /*Here the first 4x4 block of output from the last transpose is the second
  667. 4x4 block of input for the next transform.
  668. We have cleverly arranged that it already be in the appropriate place, so
  669. we only have to do half the loads.*/
  670. "movq "OC_MEM_OFFS(0x10,buf)",%%mm1\n\t"
  671. "movq "OC_MEM_OFFS(0x20,buf)",%%mm2\n\t"
  672. "movq "OC_MEM_OFFS(0x30,buf)",%%mm3\n\t"
  673. "movq "OC_MEM_OFFS(0x00,buf)",%%mm0\n\t"
  674. /*We split out the stages here so we can save the DC coefficient in the
  675. middle.*/
  676. OC_HADAMARD_AB_8x4
  677. OC_HADAMARD_C_ABS_ACCUM_A_8x4(0x28,0x38)
  678. "movd %%mm1,%[dc]\n\t"
  679. OC_HADAMARD_C_ABS_ACCUM_B_8x4(0x28,0x38)
  680. /*Up to this point, everything fit in 16 bits (8 input + 1 for the
  681. difference + 2*3 for the two 8-point 1-D Hadamards - 1 for the abs - 1
  682. for the factor of two we dropped + 3 for the vertical accumulation).
  683. Now we finally have to promote things to dwords.
  684. We break this part out of OC_HADAMARD_ABS_ACCUM_8x4 to hide the long
  685. latency of pmaddwd by starting the next series of loads now.*/
  686. "pmaddwd %%mm7,%%mm0\n\t"
  687. "movq "OC_MEM_OFFS(0x50,buf)",%%mm1\n\t"
  688. "movq "OC_MEM_OFFS(0x58,buf)",%%mm5\n\t"
  689. "movq "OC_MEM_OFFS(0x60,buf)",%%mm2\n\t"
  690. "movq %%mm0,%%mm4\n\t"
  691. "movq "OC_MEM_OFFS(0x68,buf)",%%mm6\n\t"
  692. "punpckhdq %%mm0,%%mm0\n\t"
  693. "movq "OC_MEM_OFFS(0x70,buf)",%%mm3\n\t"
  694. "paddd %%mm0,%%mm4\n\t"
  695. "movq "OC_MEM_OFFS(0x78,buf)",%%mm7\n\t"
  696. "movd %%mm4,%[ret]\n\t"
  697. "movq "OC_MEM_OFFS(0x40,buf)",%%mm0\n\t"
  698. "movq "OC_MEM_OFFS(0x48,buf)",%%mm4\n\t"
  699. OC_HADAMARD_ABS_ACCUM_8x4(0x68,0x78)
  700. "pmaddwd %%mm7,%%mm0\n\t"
  701. /*We assume that the DC coefficient is always positive (which is true,
  702. because the input to the INTRA transform was not a difference).*/
  703. "movzx %w[dc],%[dc]\n\t"
  704. "add %[ret],%[ret]\n\t"
  705. "sub %[dc],%[ret]\n\t"
  706. "movq %%mm0,%%mm4\n\t"
  707. "punpckhdq %%mm0,%%mm0\n\t"
  708. "paddd %%mm0,%%mm4\n\t"
  709. "movd %%mm4,%[ret2]\n\t"
  710. "lea -64(%[ret],%[ret2],2),%[ret]\n\t"
  711. /*Although it looks like we're using 8 registers here, gcc can alias %[ret]
  712. and %[ret2] with some of the inputs, since for once we don't write to
  713. them until after we're done using everything but %[buf] (which is also
  714. listed as an output to ensure gcc _doesn't_ alias them against it).*/
  715. :[ret]"=a"(ret),[ret2]"=r"(ret2),[dc]"=r"(dc),
  716. [buf]"=m"(OC_ARRAY_OPERAND(ogg_int16_t,buf,64))
  717. :[src]"r"(_src),[src4]"r"(_src+4*_ystride),
  718. [ystride]"r"((ptrdiff_t)_ystride),[ystride3]"r"((ptrdiff_t)3*_ystride)
  719. /*We have to use sub, so we actually clobber the condition codes for once
  720. (not to mention add).*/
  721. :"cc"
  722. );
  723. *_dc=dc;
  724. return ret;
  725. }
  726. void oc_enc_frag_sub_mmx(ogg_int16_t _residue[64],
  727. const unsigned char *_src,const unsigned char *_ref,int _ystride){
  728. int i;
  729. __asm__ __volatile__("pxor %%mm7,%%mm7\n\t"::);
  730. for(i=4;i-->0;){
  731. __asm__ __volatile__(
  732. /*mm0=[src]*/
  733. "movq (%[src]),%%mm0\n\t"
  734. /*mm1=[ref]*/
  735. "movq (%[ref]),%%mm1\n\t"
  736. /*mm4=[src+ystride]*/
  737. "movq (%[src],%[ystride]),%%mm4\n\t"
  738. /*mm5=[ref+ystride]*/
  739. "movq (%[ref],%[ystride]),%%mm5\n\t"
  740. /*Compute [src]-[ref].*/
  741. "movq %%mm0,%%mm2\n\t"
  742. "punpcklbw %%mm7,%%mm0\n\t"
  743. "movq %%mm1,%%mm3\n\t"
  744. "punpckhbw %%mm7,%%mm2\n\t"
  745. "punpcklbw %%mm7,%%mm1\n\t"
  746. "punpckhbw %%mm7,%%mm3\n\t"
  747. "psubw %%mm1,%%mm0\n\t"
  748. "psubw %%mm3,%%mm2\n\t"
  749. /*Compute [src+ystride]-[ref+ystride].*/
  750. "movq %%mm4,%%mm1\n\t"
  751. "punpcklbw %%mm7,%%mm4\n\t"
  752. "movq %%mm5,%%mm3\n\t"
  753. "punpckhbw %%mm7,%%mm1\n\t"
  754. "lea (%[src],%[ystride],2),%[src]\n\t"
  755. "punpcklbw %%mm7,%%mm5\n\t"
  756. "lea (%[ref],%[ystride],2),%[ref]\n\t"
  757. "punpckhbw %%mm7,%%mm3\n\t"
  758. "psubw %%mm5,%%mm4\n\t"
  759. "psubw %%mm3,%%mm1\n\t"
  760. /*Write the answer out.*/
  761. "movq %%mm0,0x00(%[residue])\n\t"
  762. "movq %%mm2,0x08(%[residue])\n\t"
  763. "movq %%mm4,0x10(%[residue])\n\t"
  764. "movq %%mm1,0x18(%[residue])\n\t"
  765. "lea 0x20(%[residue]),%[residue]\n\t"
  766. :[residue]"+r"(_residue),[src]"+r"(_src),[ref]"+r"(_ref)
  767. :[ystride]"r"((ptrdiff_t)_ystride)
  768. :"memory"
  769. );
  770. }
  771. }
  772. void oc_enc_frag_sub_128_mmx(ogg_int16_t _residue[64],
  773. const unsigned char *_src,int _ystride){
  774. ptrdiff_t ystride3;
  775. __asm__ __volatile__(
  776. /*mm0=[src]*/
  777. "movq (%[src]),%%mm0\n\t"
  778. /*mm1=[src+ystride]*/
  779. "movq (%[src],%[ystride]),%%mm1\n\t"
  780. /*mm6={-1}x4*/
  781. "pcmpeqw %%mm6,%%mm6\n\t"
  782. /*mm2=[src+2*ystride]*/
  783. "movq (%[src],%[ystride],2),%%mm2\n\t"
  784. /*[ystride3]=3*[ystride]*/
  785. "lea (%[ystride],%[ystride],2),%[ystride3]\n\t"
  786. /*mm6={1}x4*/
  787. "psllw $15,%%mm6\n\t"
  788. /*mm3=[src+3*ystride]*/
  789. "movq (%[src],%[ystride3]),%%mm3\n\t"
  790. /*mm6={128}x4*/
  791. "psrlw $8,%%mm6\n\t"
  792. /*mm7=0*/
  793. "pxor %%mm7,%%mm7\n\t"
  794. /*[src]=[src]+4*[ystride]*/
  795. "lea (%[src],%[ystride],4),%[src]\n\t"
  796. /*Compute [src]-128 and [src+ystride]-128*/
  797. "movq %%mm0,%%mm4\n\t"
  798. "punpcklbw %%mm7,%%mm0\n\t"
  799. "movq %%mm1,%%mm5\n\t"
  800. "punpckhbw %%mm7,%%mm4\n\t"
  801. "psubw %%mm6,%%mm0\n\t"
  802. "punpcklbw %%mm7,%%mm1\n\t"
  803. "psubw %%mm6,%%mm4\n\t"
  804. "punpckhbw %%mm7,%%mm5\n\t"
  805. "psubw %%mm6,%%mm1\n\t"
  806. "psubw %%mm6,%%mm5\n\t"
  807. /*Write the answer out.*/
  808. "movq %%mm0,0x00(%[residue])\n\t"
  809. "movq %%mm4,0x08(%[residue])\n\t"
  810. "movq %%mm1,0x10(%[residue])\n\t"
  811. "movq %%mm5,0x18(%[residue])\n\t"
  812. /*mm0=[src+4*ystride]*/
  813. "movq (%[src]),%%mm0\n\t"
  814. /*mm1=[src+5*ystride]*/
  815. "movq (%[src],%[ystride]),%%mm1\n\t"
  816. /*Compute [src+2*ystride]-128 and [src+3*ystride]-128*/
  817. "movq %%mm2,%%mm4\n\t"
  818. "punpcklbw %%mm7,%%mm2\n\t"
  819. "movq %%mm3,%%mm5\n\t"
  820. "punpckhbw %%mm7,%%mm4\n\t"
  821. "psubw %%mm6,%%mm2\n\t"
  822. "punpcklbw %%mm7,%%mm3\n\t"
  823. "psubw %%mm6,%%mm4\n\t"
  824. "punpckhbw %%mm7,%%mm5\n\t"
  825. "psubw %%mm6,%%mm3\n\t"
  826. "psubw %%mm6,%%mm5\n\t"
  827. /*Write the answer out.*/
  828. "movq %%mm2,0x20(%[residue])\n\t"
  829. "movq %%mm4,0x28(%[residue])\n\t"
  830. "movq %%mm3,0x30(%[residue])\n\t"
  831. "movq %%mm5,0x38(%[residue])\n\t"
  832. /*mm2=[src+6*ystride]*/
  833. "movq (%[src],%[ystride],2),%%mm2\n\t"
  834. /*mm3=[src+7*ystride]*/
  835. "movq (%[src],%[ystride3]),%%mm3\n\t"
  836. /*Compute [src+4*ystride]-128 and [src+5*ystride]-128*/
  837. "movq %%mm0,%%mm4\n\t"
  838. "punpcklbw %%mm7,%%mm0\n\t"
  839. "movq %%mm1,%%mm5\n\t"
  840. "punpckhbw %%mm7,%%mm4\n\t"
  841. "psubw %%mm6,%%mm0\n\t"
  842. "punpcklbw %%mm7,%%mm1\n\t"
  843. "psubw %%mm6,%%mm4\n\t"
  844. "punpckhbw %%mm7,%%mm5\n\t"
  845. "psubw %%mm6,%%mm1\n\t"
  846. "psubw %%mm6,%%mm5\n\t"
  847. /*Write the answer out.*/
  848. "movq %%mm0,0x40(%[residue])\n\t"
  849. "movq %%mm4,0x48(%[residue])\n\t"
  850. "movq %%mm1,0x50(%[residue])\n\t"
  851. "movq %%mm5,0x58(%[residue])\n\t"
  852. /*Compute [src+6*ystride]-128 and [src+7*ystride]-128*/
  853. "movq %%mm2,%%mm4\n\t"
  854. "punpcklbw %%mm7,%%mm2\n\t"
  855. "movq %%mm3,%%mm5\n\t"
  856. "punpckhbw %%mm7,%%mm4\n\t"
  857. "psubw %%mm6,%%mm2\n\t"
  858. "punpcklbw %%mm7,%%mm3\n\t"
  859. "psubw %%mm6,%%mm4\n\t"
  860. "punpckhbw %%mm7,%%mm5\n\t"
  861. "psubw %%mm6,%%mm3\n\t"
  862. "psubw %%mm6,%%mm5\n\t"
  863. /*Write the answer out.*/
  864. "movq %%mm2,0x60(%[residue])\n\t"
  865. "movq %%mm4,0x68(%[residue])\n\t"
  866. "movq %%mm3,0x70(%[residue])\n\t"
  867. "movq %%mm5,0x78(%[residue])\n\t"
  868. :[src]"+r"(_src),[ystride3]"=&r"(ystride3)
  869. :[residue]"r"(_residue),[ystride]"r"((ptrdiff_t)_ystride)
  870. :"memory"
  871. );
  872. }
  873. void oc_enc_frag_copy2_mmxext(unsigned char *_dst,
  874. const unsigned char *_src1,const unsigned char *_src2,int _ystride){
  875. oc_int_frag_copy2_mmxext(_dst,_ystride,_src1,_src2,_ystride);
  876. }
  877. #endif