mmxfrag.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. *
  4. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  5. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  6. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  7. * *
  8. * THE Theora SOURCE CODE IS COPYRIGHT (C) 2002-2009 *
  9. * by the Xiph.Org Foundation and contributors http://www.xiph.org/ *
  10. * *
  11. ********************************************************************
  12. function:
  13. last mod: $Id: mmxfrag.c 16578 2009-09-25 19:50:48Z cristianadam $
  14. ********************************************************************/
  15. /*MMX acceleration of fragment reconstruction for motion compensation.
  16. Originally written by Rudolf Marek.
  17. Additional optimization by Nils Pipenbrinck.
  18. Note: Loops are unrolled for best performance.
  19. The iteration each instruction belongs to is marked in the comments as #i.*/
  20. #include <stddef.h>
  21. #include "x86int.h"
  22. #include "mmxfrag.h"
  23. #if defined(OC_X86_ASM)
  24. /*Copies an 8x8 block of pixels from _src to _dst, assuming _ystride bytes
  25. between rows.*/
  26. void oc_frag_copy_mmx(unsigned char *_dst,
  27. const unsigned char *_src,int _ystride){
  28. #define SRC edx
  29. #define DST eax
  30. #define YSTRIDE ecx
  31. #define YSTRIDE3 esi
  32. OC_FRAG_COPY_MMX(_dst,_src,_ystride);
  33. #undef SRC
  34. #undef DST
  35. #undef YSTRIDE
  36. #undef YSTRIDE3
  37. }
  38. void oc_frag_recon_intra_mmx(unsigned char *_dst,int _ystride,
  39. const ogg_int16_t *_residue){
  40. __asm{
  41. #define DST edx
  42. #define DST4 esi
  43. #define YSTRIDE eax
  44. #define YSTRIDE3 edi
  45. #define RESIDUE ecx
  46. mov DST,_dst
  47. mov YSTRIDE,_ystride
  48. mov RESIDUE,_residue
  49. lea DST4,[DST+YSTRIDE*4]
  50. lea YSTRIDE3,[YSTRIDE+YSTRIDE*2]
  51. /*Set mm0 to 0xFFFFFFFFFFFFFFFF.*/
  52. pcmpeqw mm0,mm0
  53. /*#0 Load low residue.*/
  54. movq mm1,[0*8+RESIDUE]
  55. /*#0 Load high residue.*/
  56. movq mm2,[1*8+RESIDUE]
  57. /*Set mm0 to 0x8000800080008000.*/
  58. psllw mm0,15
  59. /*#1 Load low residue.*/
  60. movq mm3,[2*8+RESIDUE]
  61. /*#1 Load high residue.*/
  62. movq mm4,[3*8+RESIDUE]
  63. /*Set mm0 to 0x0080008000800080.*/
  64. psrlw mm0,8
  65. /*#2 Load low residue.*/
  66. movq mm5,[4*8+RESIDUE]
  67. /*#2 Load high residue.*/
  68. movq mm6,[5*8+RESIDUE]
  69. /*#0 Bias low residue.*/
  70. paddsw mm1,mm0
  71. /*#0 Bias high residue.*/
  72. paddsw mm2,mm0
  73. /*#0 Pack to byte.*/
  74. packuswb mm1,mm2
  75. /*#1 Bias low residue.*/
  76. paddsw mm3,mm0
  77. /*#1 Bias high residue.*/
  78. paddsw mm4,mm0
  79. /*#1 Pack to byte.*/
  80. packuswb mm3,mm4
  81. /*#2 Bias low residue.*/
  82. paddsw mm5,mm0
  83. /*#2 Bias high residue.*/
  84. paddsw mm6,mm0
  85. /*#2 Pack to byte.*/
  86. packuswb mm5,mm6
  87. /*#0 Write row.*/
  88. movq [DST],mm1
  89. /*#1 Write row.*/
  90. movq [DST+YSTRIDE],mm3
  91. /*#2 Write row.*/
  92. movq [DST+YSTRIDE*2],mm5
  93. /*#3 Load low residue.*/
  94. movq mm1,[6*8+RESIDUE]
  95. /*#3 Load high residue.*/
  96. movq mm2,[7*8+RESIDUE]
  97. /*#4 Load high residue.*/
  98. movq mm3,[8*8+RESIDUE]
  99. /*#4 Load high residue.*/
  100. movq mm4,[9*8+RESIDUE]
  101. /*#5 Load high residue.*/
  102. movq mm5,[10*8+RESIDUE]
  103. /*#5 Load high residue.*/
  104. movq mm6,[11*8+RESIDUE]
  105. /*#3 Bias low residue.*/
  106. paddsw mm1,mm0
  107. /*#3 Bias high residue.*/
  108. paddsw mm2,mm0
  109. /*#3 Pack to byte.*/
  110. packuswb mm1,mm2
  111. /*#4 Bias low residue.*/
  112. paddsw mm3,mm0
  113. /*#4 Bias high residue.*/
  114. paddsw mm4,mm0
  115. /*#4 Pack to byte.*/
  116. packuswb mm3,mm4
  117. /*#5 Bias low residue.*/
  118. paddsw mm5,mm0
  119. /*#5 Bias high residue.*/
  120. paddsw mm6,mm0
  121. /*#5 Pack to byte.*/
  122. packuswb mm5,mm6
  123. /*#3 Write row.*/
  124. movq [DST+YSTRIDE3],mm1
  125. /*#4 Write row.*/
  126. movq [DST4],mm3
  127. /*#5 Write row.*/
  128. movq [DST4+YSTRIDE],mm5
  129. /*#6 Load low residue.*/
  130. movq mm1,[12*8+RESIDUE]
  131. /*#6 Load high residue.*/
  132. movq mm2,[13*8+RESIDUE]
  133. /*#7 Load low residue.*/
  134. movq mm3,[14*8+RESIDUE]
  135. /*#7 Load high residue.*/
  136. movq mm4,[15*8+RESIDUE]
  137. /*#6 Bias low residue.*/
  138. paddsw mm1,mm0
  139. /*#6 Bias high residue.*/
  140. paddsw mm2,mm0
  141. /*#6 Pack to byte.*/
  142. packuswb mm1,mm2
  143. /*#7 Bias low residue.*/
  144. paddsw mm3,mm0
  145. /*#7 Bias high residue.*/
  146. paddsw mm4,mm0
  147. /*#7 Pack to byte.*/
  148. packuswb mm3,mm4
  149. /*#6 Write row.*/
  150. movq [DST4+YSTRIDE*2],mm1
  151. /*#7 Write row.*/
  152. movq [DST4+YSTRIDE3],mm3
  153. #undef DST
  154. #undef DST4
  155. #undef YSTRIDE
  156. #undef YSTRIDE3
  157. #undef RESIDUE
  158. }
  159. }
  160. void oc_frag_recon_inter_mmx(unsigned char *_dst,const unsigned char *_src,
  161. int _ystride,const ogg_int16_t *_residue){
  162. int i;
  163. /*Zero mm0.*/
  164. __asm pxor mm0,mm0;
  165. for(i=4;i-->0;){
  166. __asm{
  167. #define DST edx
  168. #define SRC ecx
  169. #define YSTRIDE edi
  170. #define RESIDUE eax
  171. mov DST,_dst
  172. mov SRC,_src
  173. mov YSTRIDE,_ystride
  174. mov RESIDUE,_residue
  175. /*#0 Load source.*/
  176. movq mm3,[SRC]
  177. /*#1 Load source.*/
  178. movq mm7,[SRC+YSTRIDE]
  179. /*#0 Get copy of src.*/
  180. movq mm4,mm3
  181. /*#0 Expand high source.*/
  182. punpckhbw mm4,mm0
  183. /*#0 Expand low source.*/
  184. punpcklbw mm3,mm0
  185. /*#0 Add residue high.*/
  186. paddsw mm4,[8+RESIDUE]
  187. /*#1 Get copy of src.*/
  188. movq mm2,mm7
  189. /*#0 Add residue low.*/
  190. paddsw mm3,[RESIDUE]
  191. /*#1 Expand high source.*/
  192. punpckhbw mm2,mm0
  193. /*#0 Pack final row pixels.*/
  194. packuswb mm3,mm4
  195. /*#1 Expand low source.*/
  196. punpcklbw mm7,mm0
  197. /*#1 Add residue low.*/
  198. paddsw mm7,[16+RESIDUE]
  199. /*#1 Add residue high.*/
  200. paddsw mm2,[24+RESIDUE]
  201. /*Advance residue.*/
  202. lea RESIDUE,[32+RESIDUE]
  203. /*#1 Pack final row pixels.*/
  204. packuswb mm7,mm2
  205. /*Advance src.*/
  206. lea SRC,[SRC+YSTRIDE*2]
  207. /*#0 Write row.*/
  208. movq [DST],mm3
  209. /*#1 Write row.*/
  210. movq [DST+YSTRIDE],mm7
  211. /*Advance dst.*/
  212. lea DST,[DST+YSTRIDE*2]
  213. mov _residue,RESIDUE
  214. mov _dst,DST
  215. mov _src,SRC
  216. #undef DST
  217. #undef SRC
  218. #undef YSTRIDE
  219. #undef RESIDUE
  220. }
  221. }
  222. }
  223. void oc_frag_recon_inter2_mmx(unsigned char *_dst,const unsigned char *_src1,
  224. const unsigned char *_src2,int _ystride,const ogg_int16_t *_residue){
  225. int i;
  226. /*Zero mm7.*/
  227. __asm pxor mm7,mm7;
  228. for(i=4;i-->0;){
  229. __asm{
  230. #define SRC1 ecx
  231. #define SRC2 edi
  232. #define YSTRIDE esi
  233. #define RESIDUE edx
  234. #define DST eax
  235. mov YSTRIDE,_ystride
  236. mov DST,_dst
  237. mov RESIDUE,_residue
  238. mov SRC1,_src1
  239. mov SRC2,_src2
  240. /*#0 Load src1.*/
  241. movq mm0,[SRC1]
  242. /*#0 Load src2.*/
  243. movq mm2,[SRC2]
  244. /*#0 Copy src1.*/
  245. movq mm1,mm0
  246. /*#0 Copy src2.*/
  247. movq mm3,mm2
  248. /*#1 Load src1.*/
  249. movq mm4,[SRC1+YSTRIDE]
  250. /*#0 Unpack lower src1.*/
  251. punpcklbw mm0,mm7
  252. /*#1 Load src2.*/
  253. movq mm5,[SRC2+YSTRIDE]
  254. /*#0 Unpack higher src1.*/
  255. punpckhbw mm1,mm7
  256. /*#0 Unpack lower src2.*/
  257. punpcklbw mm2,mm7
  258. /*#0 Unpack higher src2.*/
  259. punpckhbw mm3,mm7
  260. /*Advance src1 ptr.*/
  261. lea SRC1,[SRC1+YSTRIDE*2]
  262. /*Advance src2 ptr.*/
  263. lea SRC2,[SRC2+YSTRIDE*2]
  264. /*#0 Lower src1+src2.*/
  265. paddsw mm0,mm2
  266. /*#0 Higher src1+src2.*/
  267. paddsw mm1,mm3
  268. /*#1 Copy src1.*/
  269. movq mm2,mm4
  270. /*#0 Build lo average.*/
  271. psraw mm0,1
  272. /*#1 Copy src2.*/
  273. movq mm3,mm5
  274. /*#1 Unpack lower src1.*/
  275. punpcklbw mm4,mm7
  276. /*#0 Build hi average.*/
  277. psraw mm1,1
  278. /*#1 Unpack higher src1.*/
  279. punpckhbw mm2,mm7
  280. /*#0 low+=residue.*/
  281. paddsw mm0,[RESIDUE]
  282. /*#1 Unpack lower src2.*/
  283. punpcklbw mm5,mm7
  284. /*#0 high+=residue.*/
  285. paddsw mm1,[8+RESIDUE]
  286. /*#1 Unpack higher src2.*/
  287. punpckhbw mm3,mm7
  288. /*#1 Lower src1+src2.*/
  289. paddsw mm5,mm4
  290. /*#0 Pack and saturate.*/
  291. packuswb mm0,mm1
  292. /*#1 Higher src1+src2.*/
  293. paddsw mm3,mm2
  294. /*#0 Write row.*/
  295. movq [DST],mm0
  296. /*#1 Build lo average.*/
  297. psraw mm5,1
  298. /*#1 Build hi average.*/
  299. psraw mm3,1
  300. /*#1 low+=residue.*/
  301. paddsw mm5,[16+RESIDUE]
  302. /*#1 high+=residue.*/
  303. paddsw mm3,[24+RESIDUE]
  304. /*#1 Pack and saturate.*/
  305. packuswb mm5,mm3
  306. /*#1 Write row ptr.*/
  307. movq [DST+YSTRIDE],mm5
  308. /*Advance residue ptr.*/
  309. add RESIDUE,32
  310. /*Advance dest ptr.*/
  311. lea DST,[DST+YSTRIDE*2]
  312. mov _dst,DST
  313. mov _residue,RESIDUE
  314. mov _src1,SRC1
  315. mov _src2,SRC2
  316. #undef SRC1
  317. #undef SRC2
  318. #undef YSTRIDE
  319. #undef RESIDUE
  320. #undef DST
  321. }
  322. }
  323. }
  324. void oc_restore_fpu_mmx(void){
  325. __asm emms;
  326. }
  327. #endif