mmxfrag.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. * *
  8. * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
  9. * by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
  10. * *
  11. ********************************************************************
  12. function:
  13. last mod: $Id$
  14. ********************************************************************/
  15. /*MMX acceleration of fragment reconstruction for motion compensation.
  16. Originally written by Rudolf Marek.
  17. Additional optimization by Nils Pipenbrinck.
  18. Note: Loops are unrolled for best performance.
  19. The iteration each instruction belongs to is marked in the comments as #i.*/
  20. #include <stddef.h>
  21. #include "x86int.h"
  22. #if defined(OC_X86_ASM)
  23. /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
  24. between rows.*/
  25. # define OC_FRAG_COPY_MMX(_dst,_src,_ystride) \
  26. do{ \
  27. const unsigned char *src; \
  28. unsigned char *dst; \
  29. ptrdiff_t ystride3; \
  30. src=(_src); \
  31. dst=(_dst); \
  32. __asm__ __volatile__( \
  33. /*src+0*ystride*/ \
  34. "movq (%[src]),%%mm0\n\t" \
  35. /*src+1*ystride*/ \
  36. "movq (%[src],%[ystride]),%%mm1\n\t" \
  37. /*ystride3=ystride*3*/ \
  38. "lea (%[ystride],%[ystride],2),%[ystride3]\n\t" \
  39. /*src+2*ystride*/ \
  40. "movq (%[src],%[ystride],2),%%mm2\n\t" \
  41. /*src+3*ystride*/ \
  42. "movq (%[src],%[ystride3]),%%mm3\n\t" \
  43. /*dst+0*ystride*/ \
  44. "movq %%mm0,(%[dst])\n\t" \
  45. /*dst+1*ystride*/ \
  46. "movq %%mm1,(%[dst],%[ystride])\n\t" \
  47. /*Pointer to next 4.*/ \
  48. "lea (%[src],%[ystride],4),%[src]\n\t" \
  49. /*dst+2*ystride*/ \
  50. "movq %%mm2,(%[dst],%[ystride],2)\n\t" \
  51. /*dst+3*ystride*/ \
  52. "movq %%mm3,(%[dst],%[ystride3])\n\t" \
  53. /*Pointer to next 4.*/ \
  54. "lea (%[dst],%[ystride],4),%[dst]\n\t" \
  55. /*src+0*ystride*/ \
  56. "movq (%[src]),%%mm0\n\t" \
  57. /*src+1*ystride*/ \
  58. "movq (%[src],%[ystride]),%%mm1\n\t" \
  59. /*src+2*ystride*/ \
  60. "movq (%[src],%[ystride],2),%%mm2\n\t" \
  61. /*src+3*ystride*/ \
  62. "movq (%[src],%[ystride3]),%%mm3\n\t" \
  63. /*dst+0*ystride*/ \
  64. "movq %%mm0,(%[dst])\n\t" \
  65. /*dst+1*ystride*/ \
  66. "movq %%mm1,(%[dst],%[ystride])\n\t" \
  67. /*dst+2*ystride*/ \
  68. "movq %%mm2,(%[dst],%[ystride],2)\n\t" \
  69. /*dst+3*ystride*/ \
  70. "movq %%mm3,(%[dst],%[ystride3])\n\t" \
  71. :[dst]"+r"(dst),[src]"+r"(src),[ystride3]"=&r"(ystride3) \
  72. :[ystride]"r"((ptrdiff_t)(_ystride)) \
  73. :"memory" \
  74. ); \
  75. } \
  76. while(0)
  77. /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
  78. between rows.*/
  79. void oc_frag_copy_mmx(unsigned char *_dst,
  80. const unsigned char *_src,int _ystride){
  81. OC_FRAG_COPY_MMX(_dst,_src,_ystride);
  82. }
  83. /*Copies the fragments specified by the lists of fragment indices from one
  84. frame to another.
  85. _dst_frame: The reference frame to copy to.
  86. _src_frame: The reference frame to copy from.
  87. _ystride: The row stride of the reference frames.
  88. _fragis: A pointer to a list of fragment indices.
  89. _nfragis: The number of fragment indices to copy.
  90. _frag_buf_offs: The offsets of fragments in the reference frames.*/
  91. void oc_frag_copy_list_mmx(unsigned char *_dst_frame,
  92. const unsigned char *_src_frame,int _ystride,
  93. const ptrdiff_t *_fragis,ptrdiff_t _nfragis,const ptrdiff_t *_frag_buf_offs){
  94. ptrdiff_t fragii;
  95. for(fragii=0;fragii<_nfragis;fragii++){
  96. ptrdiff_t frag_buf_off;
  97. frag_buf_off=_frag_buf_offs[_fragis[fragii]];
  98. OC_FRAG_COPY_MMX(_dst_frame+frag_buf_off,
  99. _src_frame+frag_buf_off,_ystride);
  100. }
  101. }
  102. void oc_frag_recon_intra_mmx(unsigned char *_dst,int _ystride,
  103. const ogg_int16_t *_residue){
  104. __asm__ __volatile__(
  105. /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
  106. "pcmpeqw %%mm0,%%mm0\n\t"
  107. /*#0 Load low residue.*/
  108. "movq 0*8(%[residue]),%%mm1\n\t"
  109. /*#0 Load high residue.*/
  110. "movq 1*8(%[residue]),%%mm2\n\t"
  111. /*Set mm0 to 0x8000800080008000.*/
  112. "psllw $15,%%mm0\n\t"
  113. /*#1 Load low residue.*/
  114. "movq 2*8(%[residue]),%%mm3\n\t"
  115. /*#1 Load high residue.*/
  116. "movq 3*8(%[residue]),%%mm4\n\t"
  117. /*Set mm0 to 0x0080008000800080.*/
  118. "psrlw $8,%%mm0\n\t"
  119. /*#2 Load low residue.*/
  120. "movq 4*8(%[residue]),%%mm5\n\t"
  121. /*#2 Load high residue.*/
  122. "movq 5*8(%[residue]),%%mm6\n\t"
  123. /*#0 Bias low residue.*/
  124. "paddsw %%mm0,%%mm1\n\t"
  125. /*#0 Bias high residue.*/
  126. "paddsw %%mm0,%%mm2\n\t"
  127. /*#0 Pack to byte.*/
  128. "packuswb %%mm2,%%mm1\n\t"
  129. /*#1 Bias low residue.*/
  130. "paddsw %%mm0,%%mm3\n\t"
  131. /*#1 Bias high residue.*/
  132. "paddsw %%mm0,%%mm4\n\t"
  133. /*#1 Pack to byte.*/
  134. "packuswb %%mm4,%%mm3\n\t"
  135. /*#2 Bias low residue.*/
  136. "paddsw %%mm0,%%mm5\n\t"
  137. /*#2 Bias high residue.*/
  138. "paddsw %%mm0,%%mm6\n\t"
  139. /*#2 Pack to byte.*/
  140. "packuswb %%mm6,%%mm5\n\t"
  141. /*#0 Write row.*/
  142. "movq %%mm1,(%[dst])\n\t"
  143. /*#1 Write row.*/
  144. "movq %%mm3,(%[dst],%[ystride])\n\t"
  145. /*#2 Write row.*/
  146. "movq %%mm5,(%[dst],%[ystride],2)\n\t"
  147. /*#3 Load low residue.*/
  148. "movq 6*8(%[residue]),%%mm1\n\t"
  149. /*#3 Load high residue.*/
  150. "movq 7*8(%[residue]),%%mm2\n\t"
  151. /*#4 Load high residue.*/
  152. "movq 8*8(%[residue]),%%mm3\n\t"
  153. /*#4 Load high residue.*/
  154. "movq 9*8(%[residue]),%%mm4\n\t"
  155. /*#5 Load high residue.*/
  156. "movq 10*8(%[residue]),%%mm5\n\t"
  157. /*#5 Load high residue.*/
  158. "movq 11*8(%[residue]),%%mm6\n\t"
  159. /*#3 Bias low residue.*/
  160. "paddsw %%mm0,%%mm1\n\t"
  161. /*#3 Bias high residue.*/
  162. "paddsw %%mm0,%%mm2\n\t"
  163. /*#3 Pack to byte.*/
  164. "packuswb %%mm2,%%mm1\n\t"
  165. /*#4 Bias low residue.*/
  166. "paddsw %%mm0,%%mm3\n\t"
  167. /*#4 Bias high residue.*/
  168. "paddsw %%mm0,%%mm4\n\t"
  169. /*#4 Pack to byte.*/
  170. "packuswb %%mm4,%%mm3\n\t"
  171. /*#5 Bias low residue.*/
  172. "paddsw %%mm0,%%mm5\n\t"
  173. /*#5 Bias high residue.*/
  174. "paddsw %%mm0,%%mm6\n\t"
  175. /*#5 Pack to byte.*/
  176. "packuswb %%mm6,%%mm5\n\t"
  177. /*#3 Write row.*/
  178. "movq %%mm1,(%[dst],%[ystride3])\n\t"
  179. /*#4 Write row.*/
  180. "movq %%mm3,(%[dst4])\n\t"
  181. /*#5 Write row.*/
  182. "movq %%mm5,(%[dst4],%[ystride])\n\t"
  183. /*#6 Load low residue.*/
  184. "movq 12*8(%[residue]),%%mm1\n\t"
  185. /*#6 Load high residue.*/
  186. "movq 13*8(%[residue]),%%mm2\n\t"
  187. /*#7 Load low residue.*/
  188. "movq 14*8(%[residue]),%%mm3\n\t"
  189. /*#7 Load high residue.*/
  190. "movq 15*8(%[residue]),%%mm4\n\t"
  191. /*#6 Bias low residue.*/
  192. "paddsw %%mm0,%%mm1\n\t"
  193. /*#6 Bias high residue.*/
  194. "paddsw %%mm0,%%mm2\n\t"
  195. /*#6 Pack to byte.*/
  196. "packuswb %%mm2,%%mm1\n\t"
  197. /*#7 Bias low residue.*/
  198. "paddsw %%mm0,%%mm3\n\t"
  199. /*#7 Bias high residue.*/
  200. "paddsw %%mm0,%%mm4\n\t"
  201. /*#7 Pack to byte.*/
  202. "packuswb %%mm4,%%mm3\n\t"
  203. /*#6 Write row.*/
  204. "movq %%mm1,(%[dst4],%[ystride],2)\n\t"
  205. /*#7 Write row.*/
  206. "movq %%mm3,(%[dst4],%[ystride3])\n\t"
  207. :
  208. :[residue]"r"(_residue),
  209. [dst]"r"(_dst),
  210. [dst4]"r"(_dst+(_ystride<<2)),
  211. [ystride]"r"((ptrdiff_t)_ystride),
  212. [ystride3]"r"((ptrdiff_t)_ystride*3)
  213. :"memory"
  214. );
  215. }
  216. void oc_frag_recon_inter_mmx(unsigned char *_dst,const unsigned char *_src,
  217. int _ystride,const ogg_int16_t *_residue){
  218. int i;
  219. /*Zero mm0.*/
  220. __asm__ __volatile__("pxor %%mm0,%%mm0\n\t"::);
  221. for(i=4;i-->0;){
  222. __asm__ __volatile__(
  223. /*#0 Load source.*/
  224. "movq (%[src]),%%mm3\n\t"
  225. /*#1 Load source.*/
  226. "movq (%[src],%[ystride]),%%mm7\n\t"
  227. /*#0 Get copy of src.*/
  228. "movq %%mm3,%%mm4\n\t"
  229. /*#0 Expand high source.*/
  230. "punpckhbw %%mm0,%%mm4\n\t"
  231. /*#0 Expand low source.*/
  232. "punpcklbw %%mm0,%%mm3\n\t"
  233. /*#0 Add residue high.*/
  234. "paddsw 8(%[residue]),%%mm4\n\t"
  235. /*#1 Get copy of src.*/
  236. "movq %%mm7,%%mm2\n\t"
  237. /*#0 Add residue low.*/
  238. "paddsw (%[residue]), %%mm3\n\t"
  239. /*#1 Expand high source.*/
  240. "punpckhbw %%mm0,%%mm2\n\t"
  241. /*#0 Pack final row pixels.*/
  242. "packuswb %%mm4,%%mm3\n\t"
  243. /*#1 Expand low source.*/
  244. "punpcklbw %%mm0,%%mm7\n\t"
  245. /*#1 Add residue low.*/
  246. "paddsw 16(%[residue]),%%mm7\n\t"
  247. /*#1 Add residue high.*/
  248. "paddsw 24(%[residue]),%%mm2\n\t"
  249. /*Advance residue.*/
  250. "lea 32(%[residue]),%[residue]\n\t"
  251. /*#1 Pack final row pixels.*/
  252. "packuswb %%mm2,%%mm7\n\t"
  253. /*Advance src.*/
  254. "lea (%[src],%[ystride],2),%[src]\n\t"
  255. /*#0 Write row.*/
  256. "movq %%mm3,(%[dst])\n\t"
  257. /*#1 Write row.*/
  258. "movq %%mm7,(%[dst],%[ystride])\n\t"
  259. /*Advance dst.*/
  260. "lea (%[dst],%[ystride],2),%[dst]\n\t"
  261. :[residue]"+r"(_residue),[dst]"+r"(_dst),[src]"+r"(_src)
  262. :[ystride]"r"((ptrdiff_t)_ystride)
  263. :"memory"
  264. );
  265. }
  266. }
  267. void oc_frag_recon_inter2_mmx(unsigned char *_dst,const unsigned char *_src1,
  268. const unsigned char *_src2,int _ystride,const ogg_int16_t *_residue){
  269. int i;
  270. /*Zero mm7.*/
  271. __asm__ __volatile__("pxor %%mm7,%%mm7\n\t"::);
  272. for(i=4;i-->0;){
  273. __asm__ __volatile__(
  274. /*#0 Load src1.*/
  275. "movq (%[src1]),%%mm0\n\t"
  276. /*#0 Load src2.*/
  277. "movq (%[src2]),%%mm2\n\t"
  278. /*#0 Copy src1.*/
  279. "movq %%mm0,%%mm1\n\t"
  280. /*#0 Copy src2.*/
  281. "movq %%mm2,%%mm3\n\t"
  282. /*#1 Load src1.*/
  283. "movq (%[src1],%[ystride]),%%mm4\n\t"
  284. /*#0 Unpack lower src1.*/
  285. "punpcklbw %%mm7,%%mm0\n\t"
  286. /*#1 Load src2.*/
  287. "movq (%[src2],%[ystride]),%%mm5\n\t"
  288. /*#0 Unpack higher src1.*/
  289. "punpckhbw %%mm7,%%mm1\n\t"
  290. /*#0 Unpack lower src2.*/
  291. "punpcklbw %%mm7,%%mm2\n\t"
  292. /*#0 Unpack higher src2.*/
  293. "punpckhbw %%mm7,%%mm3\n\t"
  294. /*Advance src1 ptr.*/
  295. "lea (%[src1],%[ystride],2),%[src1]\n\t"
  296. /*Advance src2 ptr.*/
  297. "lea (%[src2],%[ystride],2),%[src2]\n\t"
  298. /*#0 Lower src1+src2.*/
  299. "paddsw %%mm2,%%mm0\n\t"
  300. /*#0 Higher src1+src2.*/
  301. "paddsw %%mm3,%%mm1\n\t"
  302. /*#1 Copy src1.*/
  303. "movq %%mm4,%%mm2\n\t"
  304. /*#0 Build lo average.*/
  305. "psraw $1,%%mm0\n\t"
  306. /*#1 Copy src2.*/
  307. "movq %%mm5,%%mm3\n\t"
  308. /*#1 Unpack lower src1.*/
  309. "punpcklbw %%mm7,%%mm4\n\t"
  310. /*#0 Build hi average.*/
  311. "psraw $1,%%mm1\n\t"
  312. /*#1 Unpack higher src1.*/
  313. "punpckhbw %%mm7,%%mm2\n\t"
  314. /*#0 low+=residue.*/
  315. "paddsw (%[residue]),%%mm0\n\t"
  316. /*#1 Unpack lower src2.*/
  317. "punpcklbw %%mm7,%%mm5\n\t"
  318. /*#0 high+=residue.*/
  319. "paddsw 8(%[residue]),%%mm1\n\t"
  320. /*#1 Unpack higher src2.*/
  321. "punpckhbw %%mm7,%%mm3\n\t"
  322. /*#1 Lower src1+src2.*/
  323. "paddsw %%mm4,%%mm5\n\t"
  324. /*#0 Pack and saturate.*/
  325. "packuswb %%mm1,%%mm0\n\t"
  326. /*#1 Higher src1+src2.*/
  327. "paddsw %%mm2,%%mm3\n\t"
  328. /*#0 Write row.*/
  329. "movq %%mm0,(%[dst])\n\t"
  330. /*#1 Build lo average.*/
  331. "psraw $1,%%mm5\n\t"
  332. /*#1 Build hi average.*/
  333. "psraw $1,%%mm3\n\t"
  334. /*#1 low+=residue.*/
  335. "paddsw 16(%[residue]),%%mm5\n\t"
  336. /*#1 high+=residue.*/
  337. "paddsw 24(%[residue]),%%mm3\n\t"
  338. /*#1 Pack and saturate.*/
  339. "packuswb %%mm3,%%mm5\n\t"
  340. /*#1 Write row ptr.*/
  341. "movq %%mm5,(%[dst],%[ystride])\n\t"
  342. /*Advance residue ptr.*/
  343. "add $32,%[residue]\n\t"
  344. /*Advance dest ptr.*/
  345. "lea (%[dst],%[ystride],2),%[dst]\n\t"
  346. :[dst]"+r"(_dst),[residue]"+r"(_residue),
  347. [src1]"+%r"(_src1),[src2]"+r"(_src2)
  348. :[ystride]"r"((ptrdiff_t)_ystride)
  349. :"memory"
  350. );
  351. }
  352. }
  353. void oc_restore_fpu_mmx(void){
  354. __asm__ __volatile__("emms\n\t");
  355. }
  356. #endif