123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794 |
- ;
- ; Copyright (c) 2014 The WebM project authors. All Rights Reserved.
- ;
- ; Use of this source code is governed by a BSD-style license
- ; that can be found in the LICENSE file in the root of the source
- ; tree. An additional intellectual property rights grant can be found
- ; in the file PATENTS. All contributing project authors may
- ; be found in the AUTHORS file in the root of the source tree.
- ;
- %include "third_party/x86inc/x86inc.asm"
- ; This file provides SSSE3 version of the inverse transformation. Part
- ; of the functions are originally derived from the ffmpeg project.
- ; Note that the current version applies to x86 64-bit only.
- SECTION_RODATA
- pw_11585x2: times 8 dw 23170
- pw_m2404x2: times 8 dw -2404*2
- pw_m4756x2: times 8 dw -4756*2
- pw_m5520x2: times 8 dw -5520*2
- pw_m8423x2: times 8 dw -8423*2
- pw_m9102x2: times 8 dw -9102*2
- pw_m10394x2: times 8 dw -10394*2
- pw_m11003x2: times 8 dw -11003*2
- pw_16364x2: times 8 dw 16364*2
- pw_16305x2: times 8 dw 16305*2
- pw_16207x2: times 8 dw 16207*2
- pw_16069x2: times 8 dw 16069*2
- pw_15893x2: times 8 dw 15893*2
- pw_15679x2: times 8 dw 15679*2
- pw_15426x2: times 8 dw 15426*2
- pw_15137x2: times 8 dw 15137*2
- pw_14811x2: times 8 dw 14811*2
- pw_14449x2: times 8 dw 14449*2
- pw_14053x2: times 8 dw 14053*2
- pw_13623x2: times 8 dw 13623*2
- pw_13160x2: times 8 dw 13160*2
- pw_12665x2: times 8 dw 12665*2
- pw_12140x2: times 8 dw 12140*2
- pw__9760x2: times 8 dw 9760*2
- pw__7723x2: times 8 dw 7723*2
- pw__7005x2: times 8 dw 7005*2
- pw__6270x2: times 8 dw 6270*2
- pw__3981x2: times 8 dw 3981*2
- pw__3196x2: times 8 dw 3196*2
- pw__1606x2: times 8 dw 1606*2
- pw___804x2: times 8 dw 804*2
- pd_8192: times 4 dd 8192
- pw_32: times 8 dw 32
- pw_16: times 8 dw 16
- %macro TRANSFORM_COEFFS 2
- pw_%1_%2: dw %1, %2, %1, %2, %1, %2, %1, %2
- pw_m%2_%1: dw -%2, %1, -%2, %1, -%2, %1, -%2, %1
- pw_m%1_m%2: dw -%1, -%2, -%1, -%2, -%1, -%2, -%1, -%2
- %endmacro
- TRANSFORM_COEFFS 6270, 15137
- TRANSFORM_COEFFS 3196, 16069
- TRANSFORM_COEFFS 13623, 9102
- ; constants for 32x32_34
- TRANSFORM_COEFFS 804, 16364
- TRANSFORM_COEFFS 15426, 5520
- TRANSFORM_COEFFS 3981, 15893
- TRANSFORM_COEFFS 16207, 2404
- TRANSFORM_COEFFS 1606, 16305
- TRANSFORM_COEFFS 15679, 4756
- TRANSFORM_COEFFS 11585, 11585
- ; constants for 32x32_1024
- TRANSFORM_COEFFS 12140, 11003
- TRANSFORM_COEFFS 7005, 14811
- TRANSFORM_COEFFS 14053, 8423
- TRANSFORM_COEFFS 9760, 13160
- TRANSFORM_COEFFS 12665, 10394
- TRANSFORM_COEFFS 7723, 14449
- %macro PAIR_PP_COEFFS 2
- dpw_%1_%2: dw %1, %1, %1, %1, %2, %2, %2, %2
- %endmacro
- %macro PAIR_MP_COEFFS 2
- dpw_m%1_%2: dw -%1, -%1, -%1, -%1, %2, %2, %2, %2
- %endmacro
- %macro PAIR_MM_COEFFS 2
- dpw_m%1_m%2: dw -%1, -%1, -%1, -%1, -%2, -%2, -%2, -%2
- %endmacro
- PAIR_PP_COEFFS 30274, 12540
- PAIR_PP_COEFFS 6392, 32138
- PAIR_MP_COEFFS 18204, 27246
- PAIR_PP_COEFFS 12540, 12540
- PAIR_PP_COEFFS 30274, 30274
- PAIR_PP_COEFFS 6392, 6392
- PAIR_PP_COEFFS 32138, 32138
- PAIR_MM_COEFFS 18204, 18204
- PAIR_PP_COEFFS 27246, 27246
- SECTION .text
- %if ARCH_X86_64
- %macro SUM_SUB 3
- psubw m%3, m%1, m%2
- paddw m%1, m%2
- SWAP %2, %3
- %endmacro
- ; butterfly operation
- %macro MUL_ADD_2X 6 ; dst1, dst2, src, round, coefs1, coefs2
- pmaddwd m%1, m%3, %5
- pmaddwd m%2, m%3, %6
- paddd m%1, %4
- paddd m%2, %4
- psrad m%1, 14
- psrad m%2, 14
- %endmacro
- %macro BUTTERFLY_4X 7 ; dst1, dst2, coef1, coef2, round, tmp1, tmp2
- punpckhwd m%6, m%2, m%1
- MUL_ADD_2X %7, %6, %6, %5, [pw_m%4_%3], [pw_%3_%4]
- punpcklwd m%2, m%1
- MUL_ADD_2X %1, %2, %2, %5, [pw_m%4_%3], [pw_%3_%4]
- packssdw m%1, m%7
- packssdw m%2, m%6
- %endmacro
- %macro BUTTERFLY_4Xmm 7 ; dst1, dst2, coef1, coef2, round, tmp1, tmp2
- punpckhwd m%6, m%2, m%1
- MUL_ADD_2X %7, %6, %6, %5, [pw_m%4_%3], [pw_m%3_m%4]
- punpcklwd m%2, m%1
- MUL_ADD_2X %1, %2, %2, %5, [pw_m%4_%3], [pw_m%3_m%4]
- packssdw m%1, m%7
- packssdw m%2, m%6
- %endmacro
- ; matrix transpose
- %macro INTERLEAVE_2X 4
- punpckh%1 m%4, m%2, m%3
- punpckl%1 m%2, m%3
- SWAP %3, %4
- %endmacro
- %macro TRANSPOSE8X8 9
- INTERLEAVE_2X wd, %1, %2, %9
- INTERLEAVE_2X wd, %3, %4, %9
- INTERLEAVE_2X wd, %5, %6, %9
- INTERLEAVE_2X wd, %7, %8, %9
- INTERLEAVE_2X dq, %1, %3, %9
- INTERLEAVE_2X dq, %2, %4, %9
- INTERLEAVE_2X dq, %5, %7, %9
- INTERLEAVE_2X dq, %6, %8, %9
- INTERLEAVE_2X qdq, %1, %5, %9
- INTERLEAVE_2X qdq, %3, %7, %9
- INTERLEAVE_2X qdq, %2, %6, %9
- INTERLEAVE_2X qdq, %4, %8, %9
- SWAP %2, %5
- SWAP %4, %7
- %endmacro
- %macro IDCT8_1D 0
- SUM_SUB 0, 4, 9
- BUTTERFLY_4X 2, 6, 6270, 15137, m8, 9, 10
- pmulhrsw m0, m12
- pmulhrsw m4, m12
- BUTTERFLY_4X 1, 7, 3196, 16069, m8, 9, 10
- BUTTERFLY_4X 5, 3, 13623, 9102, m8, 9, 10
- SUM_SUB 1, 5, 9
- SUM_SUB 7, 3, 9
- SUM_SUB 0, 6, 9
- SUM_SUB 4, 2, 9
- SUM_SUB 3, 5, 9
- pmulhrsw m3, m12
- pmulhrsw m5, m12
- SUM_SUB 0, 7, 9
- SUM_SUB 4, 3, 9
- SUM_SUB 2, 5, 9
- SUM_SUB 6, 1, 9
- SWAP 3, 6
- SWAP 1, 4
- %endmacro
- ; This macro handles 8 pixels per line
- %macro ADD_STORE_8P_2X 5; src1, src2, tmp1, tmp2, zero
- paddw m%1, m11
- paddw m%2, m11
- psraw m%1, 5
- psraw m%2, 5
- movh m%3, [outputq]
- movh m%4, [outputq + strideq]
- punpcklbw m%3, m%5
- punpcklbw m%4, m%5
- paddw m%3, m%1
- paddw m%4, m%2
- packuswb m%3, m%5
- packuswb m%4, m%5
- movh [outputq], m%3
- movh [outputq + strideq], m%4
- %endmacro
- INIT_XMM ssse3
- ; full inverse 8x8 2D-DCT transform
- cglobal idct8x8_64_add, 3, 5, 13, input, output, stride
- mova m8, [pd_8192]
- mova m11, [pw_16]
- mova m12, [pw_11585x2]
- lea r3, [2 * strideq]
- %if CONFIG_VP9_HIGHBITDEPTH
- mova m0, [inputq + 0]
- packssdw m0, [inputq + 16]
- mova m1, [inputq + 32]
- packssdw m1, [inputq + 48]
- mova m2, [inputq + 64]
- packssdw m2, [inputq + 80]
- mova m3, [inputq + 96]
- packssdw m3, [inputq + 112]
- mova m4, [inputq + 128]
- packssdw m4, [inputq + 144]
- mova m5, [inputq + 160]
- packssdw m5, [inputq + 176]
- mova m6, [inputq + 192]
- packssdw m6, [inputq + 208]
- mova m7, [inputq + 224]
- packssdw m7, [inputq + 240]
- %else
- mova m0, [inputq + 0]
- mova m1, [inputq + 16]
- mova m2, [inputq + 32]
- mova m3, [inputq + 48]
- mova m4, [inputq + 64]
- mova m5, [inputq + 80]
- mova m6, [inputq + 96]
- mova m7, [inputq + 112]
- %endif
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- IDCT8_1D
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- IDCT8_1D
- pxor m12, m12
- ADD_STORE_8P_2X 0, 1, 9, 10, 12
- lea outputq, [outputq + r3]
- ADD_STORE_8P_2X 2, 3, 9, 10, 12
- lea outputq, [outputq + r3]
- ADD_STORE_8P_2X 4, 5, 9, 10, 12
- lea outputq, [outputq + r3]
- ADD_STORE_8P_2X 6, 7, 9, 10, 12
- RET
- ; inverse 8x8 2D-DCT transform with only first 10 coeffs non-zero
- cglobal idct8x8_12_add, 3, 5, 13, input, output, stride
- mova m8, [pd_8192]
- mova m11, [pw_16]
- mova m12, [pw_11585x2]
- lea r3, [2 * strideq]
- %if CONFIG_VP9_HIGHBITDEPTH
- mova m0, [inputq + 0]
- packssdw m0, [inputq + 16]
- mova m1, [inputq + 32]
- packssdw m1, [inputq + 48]
- mova m2, [inputq + 64]
- packssdw m2, [inputq + 80]
- mova m3, [inputq + 96]
- packssdw m3, [inputq + 112]
- %else
- mova m0, [inputq + 0]
- mova m1, [inputq + 16]
- mova m2, [inputq + 32]
- mova m3, [inputq + 48]
- %endif
- punpcklwd m0, m1
- punpcklwd m2, m3
- punpckhdq m9, m0, m2
- punpckldq m0, m2
- SWAP 2, 9
- ; m0 -> [0], [0]
- ; m1 -> [1], [1]
- ; m2 -> [2], [2]
- ; m3 -> [3], [3]
- punpckhqdq m10, m0, m0
- punpcklqdq m0, m0
- punpckhqdq m9, m2, m2
- punpcklqdq m2, m2
- SWAP 1, 10
- SWAP 3, 9
- pmulhrsw m0, m12
- pmulhrsw m2, [dpw_30274_12540]
- pmulhrsw m1, [dpw_6392_32138]
- pmulhrsw m3, [dpw_m18204_27246]
- SUM_SUB 0, 2, 9
- SUM_SUB 1, 3, 9
- punpcklqdq m9, m3, m3
- punpckhqdq m5, m3, m9
- SUM_SUB 3, 5, 9
- punpckhqdq m5, m3
- pmulhrsw m5, m12
- punpckhqdq m9, m1, m5
- punpcklqdq m1, m5
- SWAP 5, 9
- SUM_SUB 0, 5, 9
- SUM_SUB 2, 1, 9
- punpckhqdq m3, m0, m0
- punpckhqdq m4, m1, m1
- punpckhqdq m6, m5, m5
- punpckhqdq m7, m2, m2
- punpcklwd m0, m3
- punpcklwd m7, m2
- punpcklwd m1, m4
- punpcklwd m6, m5
- punpckhdq m4, m0, m7
- punpckldq m0, m7
- punpckhdq m10, m1, m6
- punpckldq m5, m1, m6
- punpckhqdq m1, m0, m5
- punpcklqdq m0, m5
- punpckhqdq m3, m4, m10
- punpcklqdq m2, m4, m10
- pmulhrsw m0, m12
- pmulhrsw m6, m2, [dpw_30274_30274]
- pmulhrsw m4, m2, [dpw_12540_12540]
- pmulhrsw m7, m1, [dpw_32138_32138]
- pmulhrsw m1, [dpw_6392_6392]
- pmulhrsw m5, m3, [dpw_m18204_m18204]
- pmulhrsw m3, [dpw_27246_27246]
- mova m2, m0
- SUM_SUB 0, 6, 9
- SUM_SUB 2, 4, 9
- SUM_SUB 1, 5, 9
- SUM_SUB 7, 3, 9
- SUM_SUB 3, 5, 9
- pmulhrsw m3, m12
- pmulhrsw m5, m12
- SUM_SUB 0, 7, 9
- SUM_SUB 2, 3, 9
- SUM_SUB 4, 5, 9
- SUM_SUB 6, 1, 9
- SWAP 3, 6
- SWAP 1, 2
- SWAP 2, 4
- pxor m12, m12
- ADD_STORE_8P_2X 0, 1, 9, 10, 12
- lea outputq, [outputq + r3]
- ADD_STORE_8P_2X 2, 3, 9, 10, 12
- lea outputq, [outputq + r3]
- ADD_STORE_8P_2X 4, 5, 9, 10, 12
- lea outputq, [outputq + r3]
- ADD_STORE_8P_2X 6, 7, 9, 10, 12
- RET
- %define idx0 16 * 0
- %define idx1 16 * 1
- %define idx2 16 * 2
- %define idx3 16 * 3
- %define idx4 16 * 4
- %define idx5 16 * 5
- %define idx6 16 * 6
- %define idx7 16 * 7
- %define idx8 16 * 0
- %define idx9 16 * 1
- %define idx10 16 * 2
- %define idx11 16 * 3
- %define idx12 16 * 4
- %define idx13 16 * 5
- %define idx14 16 * 6
- %define idx15 16 * 7
- %define idx16 16 * 0
- %define idx17 16 * 1
- %define idx18 16 * 2
- %define idx19 16 * 3
- %define idx20 16 * 4
- %define idx21 16 * 5
- %define idx22 16 * 6
- %define idx23 16 * 7
- %define idx24 16 * 0
- %define idx25 16 * 1
- %define idx26 16 * 2
- %define idx27 16 * 3
- %define idx28 16 * 4
- %define idx29 16 * 5
- %define idx30 16 * 6
- %define idx31 16 * 7
- ; FROM idct32x32_add_neon.asm
- ;
- ; Instead of doing the transforms stage by stage, it is done by loading
- ; some input values and doing as many stages as possible to minimize the
- ; storing/loading of intermediate results. To fit within registers, the
- ; final coefficients are cut into four blocks:
- ; BLOCK A: 16-19,28-31
- ; BLOCK B: 20-23,24-27
- ; BLOCK C: 8-11,12-15
- ; BLOCK D: 0-3,4-7
- ; Blocks A and C are straight calculation through the various stages. In
- ; block B, further calculations are performed using the results from
- ; block A. In block D, further calculations are performed using the results
- ; from block C and then the final calculations are done using results from
- ; block A and B which have been combined at the end of block B.
- ;
- %macro IDCT32X32_34 4
- ; BLOCK A STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m11, m1
- pmulhrsw m1, [pw___804x2] ; stp1_16
- mova [r4 + 0], m0
- pmulhrsw m11, [pw_16364x2] ; stp2_31
- mova [r4 + 16 * 2], m2
- mova m12, m7
- pmulhrsw m7, [pw_15426x2] ; stp1_28
- mova [r4 + 16 * 4], m4
- pmulhrsw m12, [pw_m5520x2] ; stp2_19
- mova [r4 + 16 * 6], m6
- ; BLOCK A STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m2, m1 ; stp1_16
- mova m0, m11 ; stp1_31
- mova m4, m7 ; stp1_28
- mova m15, m12 ; stp1_19
- ; BLOCK A STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 0, 2, 3196, 16069, m8, 9, 10 ; stp1_17, stp1_30
- BUTTERFLY_4Xmm 4, 15, 3196, 16069, m8, 9, 10 ; stp1_29, stp1_18
- ; BLOCK A STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 1, 12, 9 ; stp2_16, stp2_19
- SUM_SUB 0, 15, 9 ; stp2_17, stp2_18
- SUM_SUB 11, 7, 9 ; stp2_31, stp2_28
- SUM_SUB 2, 4, 9 ; stp2_30, stp2_29
- ; BLOCK A STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 4, 15, 6270, 15137, m8, 9, 10 ; stp1_18, stp1_29
- BUTTERFLY_4X 7, 12, 6270, 15137, m8, 9, 10 ; stp1_19, stp1_28
- ; BLOCK B STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m6, m5
- pmulhrsw m5, [pw__3981x2] ; stp1_20
- mova [stp + %4 + idx28], m12
- mova [stp + %4 + idx29], m15
- pmulhrsw m6, [pw_15893x2] ; stp2_27
- mova [stp + %4 + idx30], m2
- mova m2, m3
- pmulhrsw m3, [pw_m2404x2] ; stp1_23
- mova [stp + %4 + idx31], m11
- pmulhrsw m2, [pw_16207x2] ; stp2_24
- ; BLOCK B STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m13, m5 ; stp1_20
- mova m14, m6 ; stp1_27
- mova m15, m3 ; stp1_23
- mova m11, m2 ; stp1_24
- ; BLOCK B STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 14, 13, 13623, 9102, m8, 9, 10 ; stp1_21, stp1_26
- BUTTERFLY_4Xmm 11, 15, 13623, 9102, m8, 9, 10 ; stp1_25, stp1_22
- ; BLOCK B STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 3, 5, 9 ; stp2_23, stp2_20
- SUM_SUB 15, 14, 9 ; stp2_22, stp2_21
- SUM_SUB 2, 6, 9 ; stp2_24, stp2_27
- SUM_SUB 11, 13, 9 ; stp2_25, stp2_26
- ; BLOCK B STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4Xmm 6, 5, 6270, 15137, m8, 9, 10 ; stp1_27, stp1_20
- BUTTERFLY_4Xmm 13, 14, 6270, 15137, m8, 9, 10 ; stp1_26, stp1_21
- ; BLOCK B STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 1, 3, 9 ; stp2_16, stp2_23
- SUM_SUB 0, 15, 9 ; stp2_17, stp2_22
- SUM_SUB 4, 14, 9 ; stp2_18, stp2_21
- SUM_SUB 7, 5, 9 ; stp2_19, stp2_20
- mova [stp + %3 + idx16], m1
- mova [stp + %3 + idx17], m0
- mova [stp + %3 + idx18], m4
- mova [stp + %3 + idx19], m7
- mova m4, [stp + %4 + idx28]
- mova m7, [stp + %4 + idx29]
- mova m10, [stp + %4 + idx30]
- mova m12, [stp + %4 + idx31]
- SUM_SUB 4, 6, 9 ; stp2_28, stp2_27
- SUM_SUB 7, 13, 9 ; stp2_29, stp2_26
- SUM_SUB 10, 11, 9 ; stp2_30, stp2_25
- SUM_SUB 12, 2, 9 ; stp2_31, stp2_24
- mova [stp + %4 + idx28], m4
- mova [stp + %4 + idx29], m7
- mova [stp + %4 + idx30], m10
- mova [stp + %4 + idx31], m12
- ; BLOCK B STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 6, 5, 9
- pmulhrsw m6, m10 ; stp1_27
- pmulhrsw m5, m10 ; stp1_20
- SUM_SUB 13, 14, 9
- pmulhrsw m13, m10 ; stp1_26
- pmulhrsw m14, m10 ; stp1_21
- SUM_SUB 11, 15, 9
- pmulhrsw m11, m10 ; stp1_25
- pmulhrsw m15, m10 ; stp1_22
- SUM_SUB 2, 3, 9
- pmulhrsw m2, m10 ; stp1_24
- pmulhrsw m3, m10 ; stp1_23
- %else
- BUTTERFLY_4X 6, 5, 11585, 11585, m8, 9, 10 ; stp1_20, stp1_27
- SWAP 6, 5
- BUTTERFLY_4X 13, 14, 11585, 11585, m8, 9, 10 ; stp1_21, stp1_26
- SWAP 13, 14
- BUTTERFLY_4X 11, 15, 11585, 11585, m8, 9, 10 ; stp1_22, stp1_25
- SWAP 11, 15
- BUTTERFLY_4X 2, 3, 11585, 11585, m8, 9, 10 ; stp1_23, stp1_24
- SWAP 2, 3
- %endif
- mova [stp + %4 + idx24], m2
- mova [stp + %4 + idx25], m11
- mova [stp + %4 + idx26], m13
- mova [stp + %4 + idx27], m6
- ; BLOCK C STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK C STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m0, [rsp + transposed_in + 16 * 2]
- mova m6, [rsp + transposed_in + 16 * 6]
- mova m1, m0
- pmulhrsw m0, [pw__1606x2] ; stp1_8
- mova [stp + %3 + idx20], m5
- mova [stp + %3 + idx21], m14
- pmulhrsw m1, [pw_16305x2] ; stp2_15
- mova [stp + %3 + idx22], m15
- mova m7, m6
- pmulhrsw m7, [pw_m4756x2] ; stp2_11
- mova [stp + %3 + idx23], m3
- pmulhrsw m6, [pw_15679x2] ; stp1_12
- ; BLOCK C STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m3, m0 ; stp1_8
- mova m2, m1 ; stp1_15
- ; BLOCK C STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10 ; stp1_9, stp1_14
- mova m4, m7 ; stp1_11
- mova m5, m6 ; stp1_12
- BUTTERFLY_4Xmm 5, 4, 6270, 15137, m8, 9, 10 ; stp1_13, stp1_10
- ; BLOCK C STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 7, 9 ; stp1_8, stp1_11
- SUM_SUB 2, 4, 9 ; stp1_9, stp1_10
- SUM_SUB 1, 6, 9 ; stp1_15, stp1_12
- SUM_SUB 3, 5, 9 ; stp1_14, stp1_13
- ; BLOCK C STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 5, 4, 9
- pmulhrsw m5, m10 ; stp1_13
- pmulhrsw m4, m10 ; stp1_10
- SUM_SUB 6, 7, 9
- pmulhrsw m6, m10 ; stp1_12
- pmulhrsw m7, m10 ; stp1_11
- %else
- BUTTERFLY_4X 5, 4, 11585, 11585, m8, 9, 10 ; stp1_10, stp1_13
- SWAP 5, 4
- BUTTERFLY_4X 6, 7, 11585, 11585, m8, 9, 10 ; stp1_11, stp1_12
- SWAP 6, 7
- %endif
- ; BLOCK C STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova [stp + %2 + idx8], m0
- mova [stp + %2 + idx9], m2
- mova [stp + %2 + idx10], m4
- mova [stp + %2 + idx11], m7
- ; BLOCK D STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK D STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK D STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m11, [rsp + transposed_in + 16 * 4]
- mova m12, m11
- pmulhrsw m11, [pw__3196x2] ; stp1_4
- pmulhrsw m12, [pw_16069x2] ; stp1_7
- ; BLOCK D STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m0, [rsp + transposed_in + 16 * 0]
- mova m10, [pw_11585x2]
- pmulhrsw m0, m10 ; stp1_1
- mova m14, m11 ; stp1_4
- mova m13, m12 ; stp1_7
- ; BLOCK D STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- SUM_SUB 13, 14, 9
- pmulhrsw m13, m10 ; stp1_6
- pmulhrsw m14, m10 ; stp1_5
- %else
- BUTTERFLY_4X 13, 14, 11585, 11585, m8, 9, 10 ; stp1_5, stp1_6
- SWAP 13, 14
- %endif
- mova m7, m0 ; stp1_0 = stp1_1
- mova m4, m0 ; stp1_1
- mova m2, m7 ; stp1_0
- ; BLOCK D STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 12, 9 ; stp1_0, stp1_7
- SUM_SUB 7, 13, 9 ; stp1_1, stp1_6
- SUM_SUB 2, 14, 9 ; stp1_2, stp1_5
- SUM_SUB 4, 11, 9 ; stp1_3, stp1_4
- ; BLOCK D STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 1, 9 ; stp1_0, stp1_15
- SUM_SUB 7, 3, 9 ; stp1_1, stp1_14
- SUM_SUB 2, 5, 9 ; stp1_2, stp1_13
- SUM_SUB 4, 6, 9 ; stp1_3, stp1_12
- ; 0-3, 28-31 final stage
- mova m15, [stp + %4 + idx30]
- mova m10, [stp + %4 + idx31]
- SUM_SUB 0, 10, 9 ; stp1_0, stp1_31
- SUM_SUB 7, 15, 9 ; stp1_1, stp1_30
- mova [stp + %1 + idx0], m0
- mova [stp + %1 + idx1], m7
- mova [stp + %4 + idx30], m15
- mova [stp + %4 + idx31], m10
- mova m7, [stp + %4 + idx28]
- mova m0, [stp + %4 + idx29]
- SUM_SUB 2, 0, 9 ; stp1_2, stp1_29
- SUM_SUB 4, 7, 9 ; stp1_3, stp1_28
- mova [stp + %1 + idx2], m2
- mova [stp + %1 + idx3], m4
- mova [stp + %4 + idx28], m7
- mova [stp + %4 + idx29], m0
- ; 12-15, 16-19 final stage
- mova m0, [stp + %3 + idx16]
- mova m7, [stp + %3 + idx17]
- mova m2, [stp + %3 + idx18]
- mova m4, [stp + %3 + idx19]
- SUM_SUB 1, 0, 9 ; stp1_15, stp1_16
- SUM_SUB 3, 7, 9 ; stp1_14, stp1_17
- SUM_SUB 5, 2, 9 ; stp1_13, stp1_18
- SUM_SUB 6, 4, 9 ; stp1_12, stp1_19
- mova [stp + %2 + idx12], m6
- mova [stp + %2 + idx13], m5
- mova [stp + %2 + idx14], m3
- mova [stp + %2 + idx15], m1
- mova [stp + %3 + idx16], m0
- mova [stp + %3 + idx17], m7
- mova [stp + %3 + idx18], m2
- mova [stp + %3 + idx19], m4
- mova m4, [stp + %2 + idx8]
- mova m5, [stp + %2 + idx9]
- mova m6, [stp + %2 + idx10]
- mova m7, [stp + %2 + idx11]
- SUM_SUB 11, 7, 9 ; stp1_4, stp1_11
- SUM_SUB 14, 6, 9 ; stp1_5, stp1_10
- SUM_SUB 13, 5, 9 ; stp1_6, stp1_9
- SUM_SUB 12, 4, 9 ; stp1_7, stp1_8
- ; 4-7, 24-27 final stage
- mova m0, [stp + %4 + idx27]
- mova m1, [stp + %4 + idx26]
- mova m2, [stp + %4 + idx25]
- mova m3, [stp + %4 + idx24]
- SUM_SUB 11, 0, 9 ; stp1_4, stp1_27
- SUM_SUB 14, 1, 9 ; stp1_5, stp1_26
- SUM_SUB 13, 2, 9 ; stp1_6, stp1_25
- SUM_SUB 12, 3, 9 ; stp1_7, stp1_24
- mova [stp + %4 + idx27], m0
- mova [stp + %4 + idx26], m1
- mova [stp + %4 + idx25], m2
- mova [stp + %4 + idx24], m3
- mova [stp + %1 + idx4], m11
- mova [stp + %1 + idx5], m14
- mova [stp + %1 + idx6], m13
- mova [stp + %1 + idx7], m12
- ; 8-11, 20-23 final stage
- mova m0, [stp + %3 + idx20]
- mova m1, [stp + %3 + idx21]
- mova m2, [stp + %3 + idx22]
- mova m3, [stp + %3 + idx23]
- SUM_SUB 7, 0, 9 ; stp1_11, stp_20
- SUM_SUB 6, 1, 9 ; stp1_10, stp_21
- SUM_SUB 5, 2, 9 ; stp1_9, stp_22
- SUM_SUB 4, 3, 9 ; stp1_8, stp_23
- mova [stp + %2 + idx8], m4
- mova [stp + %2 + idx9], m5
- mova [stp + %2 + idx10], m6
- mova [stp + %2 + idx11], m7
- mova [stp + %3 + idx20], m0
- mova [stp + %3 + idx21], m1
- mova [stp + %3 + idx22], m2
- mova [stp + %3 + idx23], m3
- %endmacro
- %macro RECON_AND_STORE 1
- mova m11, [pw_32]
- lea stp, [rsp + %1]
- mov r6, 32
- pxor m8, m8
- %%recon_and_store:
- mova m0, [stp + 16 * 32 * 0]
- mova m1, [stp + 16 * 32 * 1]
- mova m2, [stp + 16 * 32 * 2]
- mova m3, [stp + 16 * 32 * 3]
- add stp, 16
- paddw m0, m11
- paddw m1, m11
- paddw m2, m11
- paddw m3, m11
- psraw m0, 6
- psraw m1, 6
- psraw m2, 6
- psraw m3, 6
- movh m4, [outputq + 0]
- movh m5, [outputq + 8]
- movh m6, [outputq + 16]
- movh m7, [outputq + 24]
- punpcklbw m4, m8
- punpcklbw m5, m8
- punpcklbw m6, m8
- punpcklbw m7, m8
- paddw m0, m4
- paddw m1, m5
- paddw m2, m6
- paddw m3, m7
- packuswb m0, m1
- packuswb m2, m3
- mova [outputq + 0], m0
- mova [outputq + 16], m2
- lea outputq, [outputq + strideq]
- dec r6
- jnz %%recon_and_store
- %endmacro
- %define i32x32_size 16*32*5
- %define pass_two_start 16*32*0
- %define transposed_in 16*32*4
- %define pass_one_start 16*32*0
- %define stp r8
- INIT_XMM ssse3
- cglobal idct32x32_34_add, 3, 11, 16, i32x32_size, input, output, stride
- mova m8, [pd_8192]
- lea stp, [rsp + pass_one_start]
- idct32x32_34:
- mov r3, inputq
- lea r4, [rsp + transposed_in]
- idct32x32_34_transpose:
- %if CONFIG_VP9_HIGHBITDEPTH
- mova m0, [r3 + 0]
- packssdw m0, [r3 + 16]
- mova m1, [r3 + 32 * 4]
- packssdw m1, [r3 + 32 * 4 + 16]
- mova m2, [r3 + 32 * 8]
- packssdw m2, [r3 + 32 * 8 + 16]
- mova m3, [r3 + 32 * 12]
- packssdw m3, [r3 + 32 * 12 + 16]
- mova m4, [r3 + 32 * 16]
- packssdw m4, [r3 + 32 * 16 + 16]
- mova m5, [r3 + 32 * 20]
- packssdw m5, [r3 + 32 * 20 + 16]
- mova m6, [r3 + 32 * 24]
- packssdw m6, [r3 + 32 * 24 + 16]
- mova m7, [r3 + 32 * 28]
- packssdw m7, [r3 + 32 * 28 + 16]
- %else
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 4]
- mova m2, [r3 + 16 * 8]
- mova m3, [r3 + 16 * 12]
- mova m4, [r3 + 16 * 16]
- mova m5, [r3 + 16 * 20]
- mova m6, [r3 + 16 * 24]
- mova m7, [r3 + 16 * 28]
- %endif
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- IDCT32X32_34 16*0, 16*32, 16*64, 16*96
- lea stp, [stp + 16 * 8]
- mov r6, 4
- lea stp, [rsp + pass_one_start]
- lea r9, [rsp + pass_one_start]
- idct32x32_34_2:
- lea r4, [rsp + transposed_in]
- mov r3, r9
- idct32x32_34_transpose_2:
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 1]
- mova m2, [r3 + 16 * 2]
- mova m3, [r3 + 16 * 3]
- mova m4, [r3 + 16 * 4]
- mova m5, [r3 + 16 * 5]
- mova m6, [r3 + 16 * 6]
- mova m7, [r3 + 16 * 7]
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- IDCT32X32_34 16*0, 16*8, 16*16, 16*24
- lea stp, [stp + 16 * 32]
- add r9, 16 * 32
- dec r6
- jnz idct32x32_34_2
- RECON_AND_STORE pass_two_start
- RET
- %macro IDCT32X32_135 4
- ; BLOCK A STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m1, [rsp + transposed_in + 16 * 1]
- mova m11, m1
- pmulhrsw m1, [pw___804x2] ; stp1_16
- pmulhrsw m11, [pw_16364x2] ; stp2_31
- mova m7, [rsp + transposed_in + 16 * 7]
- mova m12, m7
- pmulhrsw m7, [pw_15426x2] ; stp1_28
- pmulhrsw m12, [pw_m5520x2] ; stp2_19
- mova m3, [rsp + transposed_in + 16 * 9]
- mova m4, m3
- pmulhrsw m3, [pw__7005x2] ; stp1_18
- pmulhrsw m4, [pw_14811x2] ; stp2_29
- mova m0, [rsp + transposed_in + 16 * 15]
- mova m2, m0
- pmulhrsw m0, [pw_12140x2] ; stp1_30
- pmulhrsw m2, [pw_m11003x2] ; stp2_17
- ; BLOCK A STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 1, 2, 9 ; stp2_16, stp2_17
- SUM_SUB 12, 3, 9 ; stp2_19, stp2_18
- SUM_SUB 7, 4, 9 ; stp2_28, stp2_29
- SUM_SUB 11, 0, 9 ; stp2_31, stp2_30
- ; BLOCK A STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 0, 2, 3196, 16069, m8, 9, 10 ; stp1_17, stp1_30
- BUTTERFLY_4Xmm 4, 3, 3196, 16069, m8, 9, 10 ; stp1_29, stp1_18
- ; BLOCK A STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 1, 12, 9 ; stp2_16, stp2_19
- SUM_SUB 0, 3, 9 ; stp2_17, stp2_18
- SUM_SUB 11, 7, 9 ; stp2_31, stp2_28
- SUM_SUB 2, 4, 9 ; stp2_30, stp2_29
- ; BLOCK A STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 4, 3, 6270, 15137, m8, 9, 10 ; stp1_18, stp1_29
- BUTTERFLY_4X 7, 12, 6270, 15137, m8, 9, 10 ; stp1_19, stp1_28
- mova [stp + %3 + idx16], m1
- mova [stp + %3 + idx17], m0
- mova [stp + %3 + idx18], m4
- mova [stp + %3 + idx19], m7
- mova [stp + %4 + idx28], m12
- mova [stp + %4 + idx29], m3
- mova [stp + %4 + idx30], m2
- mova [stp + %4 + idx31], m11
- ; BLOCK B STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m2, [rsp + transposed_in + 16 * 3]
- mova m3, m2
- pmulhrsw m3, [pw_m2404x2] ; stp1_23
- pmulhrsw m2, [pw_16207x2] ; stp2_24
- mova m5, [rsp + transposed_in + 16 * 5]
- mova m6, m5
- pmulhrsw m5, [pw__3981x2] ; stp1_20
- pmulhrsw m6, [pw_15893x2] ; stp2_27
- mova m14, [rsp + transposed_in + 16 * 11]
- mova m13, m14
- pmulhrsw m13, [pw_m8423x2] ; stp1_21
- pmulhrsw m14, [pw_14053x2] ; stp2_26
- mova m0, [rsp + transposed_in + 16 * 13]
- mova m1, m0
- pmulhrsw m0, [pw__9760x2] ; stp1_22
- pmulhrsw m1, [pw_13160x2] ; stp2_25
- ; BLOCK B STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 5, 13, 9 ; stp2_20, stp2_21
- SUM_SUB 3, 0, 9 ; stp2_23, stp2_22
- SUM_SUB 2, 1, 9 ; stp2_24, stp2_25
- SUM_SUB 6, 14, 9 ; stp2_27, stp2_26
- ; BLOCK B STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 14, 13, 13623, 9102, m8, 9, 10 ; stp1_21, stp1_26
- BUTTERFLY_4Xmm 1, 0, 13623, 9102, m8, 9, 10 ; stp1_25, stp1_22
- ; BLOCK B STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 3, 5, 9 ; stp2_23, stp2_20
- SUM_SUB 0, 14, 9 ; stp2_22, stp2_21
- SUM_SUB 2, 6, 9 ; stp2_24, stp2_27
- SUM_SUB 1, 13, 9 ; stp2_25, stp2_26
- ; BLOCK B STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4Xmm 6, 5, 6270, 15137, m8, 9, 10 ; stp1_27, stp1_20
- BUTTERFLY_4Xmm 13, 14, 6270, 15137, m8, 9, 10 ; stp1_26, stp1_21
- ; BLOCK B STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m4, [stp + %3 + idx16]
- mova m7, [stp + %3 + idx17]
- mova m11, [stp + %3 + idx18]
- mova m12, [stp + %3 + idx19]
- SUM_SUB 4, 3, 9 ; stp2_16, stp2_23
- SUM_SUB 7, 0, 9 ; stp2_17, stp2_22
- SUM_SUB 11, 14, 9 ; stp2_18, stp2_21
- SUM_SUB 12, 5, 9 ; stp2_19, stp2_20
- mova [stp + %3 + idx16], m4
- mova [stp + %3 + idx17], m7
- mova [stp + %3 + idx18], m11
- mova [stp + %3 + idx19], m12
- mova m4, [stp + %4 + idx28]
- mova m7, [stp + %4 + idx29]
- mova m11, [stp + %4 + idx30]
- mova m12, [stp + %4 + idx31]
- SUM_SUB 4, 6, 9 ; stp2_28, stp2_27
- SUM_SUB 7, 13, 9 ; stp2_29, stp2_26
- SUM_SUB 11, 1, 9 ; stp2_30, stp2_25
- SUM_SUB 12, 2, 9 ; stp2_31, stp2_24
- mova [stp + %4 + idx28], m4
- mova [stp + %4 + idx29], m7
- mova [stp + %4 + idx30], m11
- mova [stp + %4 + idx31], m12
- ; BLOCK B STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 6, 5, 9
- pmulhrsw m6, m10 ; stp1_27
- pmulhrsw m5, m10 ; stp1_20
- SUM_SUB 13, 14, 9
- pmulhrsw m13, m10 ; stp1_26
- pmulhrsw m14, m10 ; stp1_21
- SUM_SUB 1, 0, 9
- pmulhrsw m1, m10 ; stp1_25
- pmulhrsw m0, m10 ; stp1_22
- SUM_SUB 2, 3, 9
- pmulhrsw m2, m10 ; stp1_25
- pmulhrsw m3, m10 ; stp1_22
- %else
- BUTTERFLY_4X 6, 5, 11585, 11585, m8, 9, 10 ; stp1_20, stp1_27
- SWAP 6, 5
- BUTTERFLY_4X 13, 14, 11585, 11585, m8, 9, 10 ; stp1_21, stp1_26
- SWAP 13, 14
- BUTTERFLY_4X 1, 0, 11585, 11585, m8, 9, 10 ; stp1_22, stp1_25
- SWAP 1, 0
- BUTTERFLY_4X 2, 3, 11585, 11585, m8, 9, 10 ; stp1_23, stp1_24
- SWAP 2, 3
- %endif
- mova [stp + %3 + idx20], m5
- mova [stp + %3 + idx21], m14
- mova [stp + %3 + idx22], m0
- mova [stp + %3 + idx23], m3
- mova [stp + %4 + idx24], m2
- mova [stp + %4 + idx25], m1
- mova [stp + %4 + idx26], m13
- mova [stp + %4 + idx27], m6
- ; BLOCK C STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK C STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m0, [rsp + transposed_in + 16 * 2]
- mova m1, m0
- pmulhrsw m0, [pw__1606x2] ; stp1_8
- pmulhrsw m1, [pw_16305x2] ; stp2_15
- mova m6, [rsp + transposed_in + 16 * 6]
- mova m7, m6
- pmulhrsw m7, [pw_m4756x2] ; stp2_11
- pmulhrsw m6, [pw_15679x2] ; stp1_12
- mova m4, [rsp + transposed_in + 16 * 10]
- mova m5, m4
- pmulhrsw m4, [pw__7723x2] ; stp1_10
- pmulhrsw m5, [pw_14449x2] ; stp2_13
- mova m2, [rsp + transposed_in + 16 * 14]
- mova m3, m2
- pmulhrsw m3, [pw_m10394x2] ; stp1_9
- pmulhrsw m2, [pw_12665x2] ; stp2_14
- ; BLOCK C STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 3, 9 ; stp1_8, stp1_9
- SUM_SUB 7, 4, 9 ; stp1_11, stp1_10
- SUM_SUB 6, 5, 9 ; stp1_12, stp1_13
- SUM_SUB 1, 2, 9 ; stp1_15, stp1_14
- ; BLOCK C STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10 ; stp1_9, stp1_14
- BUTTERFLY_4Xmm 5, 4, 6270, 15137, m8, 9, 10 ; stp1_13, stp1_10
- ; BLOCK C STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 7, 9 ; stp1_8, stp1_11
- SUM_SUB 2, 4, 9 ; stp1_9, stp1_10
- SUM_SUB 1, 6, 9 ; stp1_15, stp1_12
- SUM_SUB 3, 5, 9 ; stp1_14, stp1_13
- ; BLOCK C STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 5, 4, 9
- pmulhrsw m5, m10 ; stp1_13
- pmulhrsw m4, m10 ; stp1_10
- SUM_SUB 6, 7, 9
- pmulhrsw m6, m10 ; stp1_12
- pmulhrsw m7, m10 ; stp1_11
- %else
- BUTTERFLY_4X 5, 4, 11585, 11585, m8, 9, 10 ; stp1_10, stp1_13
- SWAP 5, 4
- BUTTERFLY_4X 6, 7, 11585, 11585, m8, 9, 10 ; stp1_11, stp1_12
- SWAP 6, 7
- %endif
- ; BLOCK C STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova [stp + %2 + idx8], m0
- mova [stp + %2 + idx9], m2
- mova [stp + %2 + idx10], m4
- mova [stp + %2 + idx11], m7
- mova [stp + %2 + idx12], m6
- mova [stp + %2 + idx13], m5
- mova [stp + %2 + idx14], m3
- mova [stp + %2 + idx15], m1
- ; BLOCK D STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK D STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK D STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m11, [rsp + transposed_in + 16 * 4]
- mova m12, m11
- pmulhrsw m11, [pw__3196x2] ; stp1_4
- pmulhrsw m12, [pw_16069x2] ; stp1_7
- mova m13, [rsp + transposed_in + 16 * 12]
- mova m14, m13
- pmulhrsw m13, [pw_13623x2] ; stp1_6
- pmulhrsw m14, [pw_m9102x2] ; stp1_5
- ; BLOCK D STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m0, [rsp + transposed_in + 16 * 0]
- mova m2, [rsp + transposed_in + 16 * 8]
- pmulhrsw m0, [pw_11585x2] ; stp1_1
- mova m3, m2
- pmulhrsw m2, [pw__6270x2] ; stp1_2
- pmulhrsw m3, [pw_15137x2] ; stp1_3
- SUM_SUB 11, 14, 9 ; stp1_4, stp1_5
- SUM_SUB 12, 13, 9 ; stp1_7, stp1_6
- ; BLOCK D STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 13, 14, 9
- pmulhrsw m13, m10 ; stp1_6
- pmulhrsw m14, m10 ; stp1_5
- %else
- BUTTERFLY_4X 13, 14, 11585, 11585, m8, 9, 10 ; stp1_5, stp1_6
- SWAP 13, 14
- %endif
- mova m1, m0 ; stp1_0 = stp1_1
- SUM_SUB 0, 3, 9 ; stp1_0, stp1_3
- SUM_SUB 1, 2, 9 ; stp1_1, stp1_2
- ; BLOCK D STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 12, 9 ; stp1_0, stp1_7
- SUM_SUB 1, 13, 9 ; stp1_1, stp1_6
- SUM_SUB 2, 14, 9 ; stp1_2, stp1_5
- SUM_SUB 3, 11, 9 ; stp1_3, stp1_4
- ; BLOCK D STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m4, [stp + %2 + idx12]
- mova m5, [stp + %2 + idx13]
- mova m6, [stp + %2 + idx14]
- mova m7, [stp + %2 + idx15]
- SUM_SUB 0, 7, 9 ; stp1_0, stp1_15
- SUM_SUB 1, 6, 9 ; stp1_1, stp1_14
- SUM_SUB 2, 5, 9 ; stp1_2, stp1_13
- SUM_SUB 3, 4, 9 ; stp1_3, stp1_12
- ; 0-3, 28-31 final stage
- mova m10, [stp + %4 + idx31]
- mova m15, [stp + %4 + idx30]
- SUM_SUB 0, 10, 9 ; stp1_0, stp1_31
- SUM_SUB 1, 15, 9 ; stp1_1, stp1_30
- mova [stp + %1 + idx0], m0
- mova [stp + %1 + idx1], m1
- mova [stp + %4 + idx31], m10
- mova [stp + %4 + idx30], m15
- mova m0, [stp + %4 + idx29]
- mova m1, [stp + %4 + idx28]
- SUM_SUB 2, 0, 9 ; stp1_2, stp1_29
- SUM_SUB 3, 1, 9 ; stp1_3, stp1_28
- mova [stp + %1 + idx2], m2
- mova [stp + %1 + idx3], m3
- mova [stp + %4 + idx29], m0
- mova [stp + %4 + idx28], m1
- ; 12-15, 16-19 final stage
- mova m0, [stp + %3 + idx16]
- mova m1, [stp + %3 + idx17]
- mova m2, [stp + %3 + idx18]
- mova m3, [stp + %3 + idx19]
- SUM_SUB 7, 0, 9 ; stp1_15, stp1_16
- SUM_SUB 6, 1, 9 ; stp1_14, stp1_17
- SUM_SUB 5, 2, 9 ; stp1_13, stp1_18
- SUM_SUB 4, 3, 9 ; stp1_12, stp1_19
- mova [stp + %2 + idx12], m4
- mova [stp + %2 + idx13], m5
- mova [stp + %2 + idx14], m6
- mova [stp + %2 + idx15], m7
- mova [stp + %3 + idx16], m0
- mova [stp + %3 + idx17], m1
- mova [stp + %3 + idx18], m2
- mova [stp + %3 + idx19], m3
- mova m4, [stp + %2 + idx8]
- mova m5, [stp + %2 + idx9]
- mova m6, [stp + %2 + idx10]
- mova m7, [stp + %2 + idx11]
- SUM_SUB 11, 7, 9 ; stp1_4, stp1_11
- SUM_SUB 14, 6, 9 ; stp1_5, stp1_10
- SUM_SUB 13, 5, 9 ; stp1_6, stp1_9
- SUM_SUB 12, 4, 9 ; stp1_7, stp1_8
- ; 4-7, 24-27 final stage
- mova m3, [stp + %4 + idx24]
- mova m2, [stp + %4 + idx25]
- mova m1, [stp + %4 + idx26]
- mova m0, [stp + %4 + idx27]
- SUM_SUB 12, 3, 9 ; stp1_7, stp1_24
- SUM_SUB 13, 2, 9 ; stp1_6, stp1_25
- SUM_SUB 14, 1, 9 ; stp1_5, stp1_26
- SUM_SUB 11, 0, 9 ; stp1_4, stp1_27
- mova [stp + %4 + idx24], m3
- mova [stp + %4 + idx25], m2
- mova [stp + %4 + idx26], m1
- mova [stp + %4 + idx27], m0
- mova [stp + %1 + idx4], m11
- mova [stp + %1 + idx5], m14
- mova [stp + %1 + idx6], m13
- mova [stp + %1 + idx7], m12
- ; 8-11, 20-23 final stage
- mova m0, [stp + %3 + idx20]
- mova m1, [stp + %3 + idx21]
- mova m2, [stp + %3 + idx22]
- mova m3, [stp + %3 + idx23]
- SUM_SUB 7, 0, 9 ; stp1_11, stp_20
- SUM_SUB 6, 1, 9 ; stp1_10, stp_21
- SUM_SUB 5, 2, 9 ; stp1_9, stp_22
- SUM_SUB 4, 3, 9 ; stp1_8, stp_23
- mova [stp + %2 + idx8], m4
- mova [stp + %2 + idx9], m5
- mova [stp + %2 + idx10], m6
- mova [stp + %2 + idx11], m7
- mova [stp + %3 + idx20], m0
- mova [stp + %3 + idx21], m1
- mova [stp + %3 + idx22], m2
- mova [stp + %3 + idx23], m3
- %endmacro
- INIT_XMM ssse3
- cglobal idct32x32_135_add, 3, 11, 16, i32x32_size, input, output, stride
- mova m8, [pd_8192]
- mov r6, 2
- lea stp, [rsp + pass_one_start]
- idct32x32_135:
- mov r3, inputq
- lea r4, [rsp + transposed_in]
- mov r7, 2
- idct32x32_135_transpose:
- %if CONFIG_VP9_HIGHBITDEPTH
- mova m0, [r3 + 0]
- packssdw m0, [r3 + 16]
- mova m1, [r3 + 32 * 4]
- packssdw m1, [r3 + 32 * 4 + 16]
- mova m2, [r3 + 32 * 8]
- packssdw m2, [r3 + 32 * 8 + 16]
- mova m3, [r3 + 32 * 12]
- packssdw m3, [r3 + 32 * 12 + 16]
- mova m4, [r3 + 32 * 16]
- packssdw m4, [r3 + 32 * 16 + 16]
- mova m5, [r3 + 32 * 20]
- packssdw m5, [r3 + 32 * 20 + 16]
- mova m6, [r3 + 32 * 24]
- packssdw m6, [r3 + 32 * 24 + 16]
- mova m7, [r3 + 32 * 28]
- packssdw m7, [r3 + 32 * 28 + 16]
- %else
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 4]
- mova m2, [r3 + 16 * 8]
- mova m3, [r3 + 16 * 12]
- mova m4, [r3 + 16 * 16]
- mova m5, [r3 + 16 * 20]
- mova m6, [r3 + 16 * 24]
- mova m7, [r3 + 16 * 28]
- %endif
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- mova [r4 + 0], m0
- mova [r4 + 16 * 1], m1
- mova [r4 + 16 * 2], m2
- mova [r4 + 16 * 3], m3
- mova [r4 + 16 * 4], m4
- mova [r4 + 16 * 5], m5
- mova [r4 + 16 * 6], m6
- mova [r4 + 16 * 7], m7
- %if CONFIG_VP9_HIGHBITDEPTH
- add r3, 32
- %else
- add r3, 16
- %endif
- add r4, 16 * 8
- dec r7
- jne idct32x32_135_transpose
- IDCT32X32_135 16*0, 16*32, 16*64, 16*96
- lea stp, [stp + 16 * 8]
- %if CONFIG_VP9_HIGHBITDEPTH
- lea inputq, [inputq + 32 * 32]
- %else
- lea inputq, [inputq + 16 * 32]
- %endif
- dec r6
- jnz idct32x32_135
- mov r6, 4
- lea stp, [rsp + pass_one_start]
- lea r9, [rsp + pass_one_start]
- idct32x32_135_2:
- lea r4, [rsp + transposed_in]
- mov r3, r9
- mov r7, 2
- idct32x32_135_transpose_2:
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 1]
- mova m2, [r3 + 16 * 2]
- mova m3, [r3 + 16 * 3]
- mova m4, [r3 + 16 * 4]
- mova m5, [r3 + 16 * 5]
- mova m6, [r3 + 16 * 6]
- mova m7, [r3 + 16 * 7]
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- mova [r4 + 0], m0
- mova [r4 + 16 * 1], m1
- mova [r4 + 16 * 2], m2
- mova [r4 + 16 * 3], m3
- mova [r4 + 16 * 4], m4
- mova [r4 + 16 * 5], m5
- mova [r4 + 16 * 6], m6
- mova [r4 + 16 * 7], m7
- add r3, 16 * 8
- add r4, 16 * 8
- dec r7
- jne idct32x32_135_transpose_2
- IDCT32X32_135 16*0, 16*8, 16*16, 16*24
- lea stp, [stp + 16 * 32]
- add r9, 16 * 32
- dec r6
- jnz idct32x32_135_2
- RECON_AND_STORE pass_two_start
- RET
- %macro IDCT32X32_1024 4
- ; BLOCK A STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m1, [rsp + transposed_in + 16 * 1]
- mova m11, [rsp + transposed_in + 16 * 31]
- BUTTERFLY_4X 1, 11, 804, 16364, m8, 9, 10 ; stp1_16, stp1_31
- mova m0, [rsp + transposed_in + 16 * 15]
- mova m2, [rsp + transposed_in + 16 * 17]
- BUTTERFLY_4X 2, 0, 12140, 11003, m8, 9, 10 ; stp1_17, stp1_30
- mova m7, [rsp + transposed_in + 16 * 7]
- mova m12, [rsp + transposed_in + 16 * 25]
- BUTTERFLY_4X 12, 7, 15426, 5520, m8, 9, 10 ; stp1_19, stp1_28
- mova m3, [rsp + transposed_in + 16 * 9]
- mova m4, [rsp + transposed_in + 16 * 23]
- BUTTERFLY_4X 3, 4, 7005, 14811, m8, 9, 10 ; stp1_18, stp1_29
- ; BLOCK A STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 1, 2, 9 ; stp2_16, stp2_17
- SUM_SUB 12, 3, 9 ; stp2_19, stp2_18
- SUM_SUB 7, 4, 9 ; stp2_28, stp2_29
- SUM_SUB 11, 0, 9 ; stp2_31, stp2_30
- ; BLOCK A STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 0, 2, 3196, 16069, m8, 9, 10 ; stp1_17, stp1_30
- BUTTERFLY_4Xmm 4, 3, 3196, 16069, m8, 9, 10 ; stp1_29, stp1_18
- ; BLOCK A STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 1, 12, 9 ; stp2_16, stp2_19
- SUM_SUB 0, 3, 9 ; stp2_17, stp2_18
- SUM_SUB 11, 7, 9 ; stp2_31, stp2_28
- SUM_SUB 2, 4, 9 ; stp2_30, stp2_29
- ; BLOCK A STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 4, 3, 6270, 15137, m8, 9, 10 ; stp1_18, stp1_29
- BUTTERFLY_4X 7, 12, 6270, 15137, m8, 9, 10 ; stp1_19, stp1_28
- mova [stp + %3 + idx16], m1
- mova [stp + %3 + idx17], m0
- mova [stp + %3 + idx18], m4
- mova [stp + %3 + idx19], m7
- mova [stp + %4 + idx28], m12
- mova [stp + %4 + idx29], m3
- mova [stp + %4 + idx30], m2
- mova [stp + %4 + idx31], m11
- ; BLOCK B STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m5, [rsp + transposed_in + 16 * 5]
- mova m6, [rsp + transposed_in + 16 * 27]
- BUTTERFLY_4X 5, 6, 3981, 15893, m8, 9, 10 ; stp1_20, stp1_27
- mova m13, [rsp + transposed_in + 16 * 21]
- mova m14, [rsp + transposed_in + 16 * 11]
- BUTTERFLY_4X 13, 14, 14053, 8423, m8, 9, 10 ; stp1_21, stp1_26
- mova m0, [rsp + transposed_in + 16 * 13]
- mova m1, [rsp + transposed_in + 16 * 19]
- BUTTERFLY_4X 0, 1, 9760, 13160, m8, 9, 10 ; stp1_22, stp1_25
- mova m2, [rsp + transposed_in + 16 * 3]
- mova m3, [rsp + transposed_in + 16 * 29]
- BUTTERFLY_4X 3, 2, 16207, 2404, m8, 9, 10 ; stp1_23, stp1_24
- ; BLOCK B STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 5, 13, 9 ; stp2_20, stp2_21
- SUM_SUB 3, 0, 9 ; stp2_23, stp2_22
- SUM_SUB 2, 1, 9 ; stp2_24, stp2_25
- SUM_SUB 6, 14, 9 ; stp2_27, stp2_26
- ; BLOCK B STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 14, 13, 13623, 9102, m8, 9, 10 ; stp1_21, stp1_26
- BUTTERFLY_4Xmm 1, 0, 13623, 9102, m8, 9, 10 ; stp1_25, stp1_22
- ; BLOCK B STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 3, 5, 9 ; stp2_23, stp2_20
- SUM_SUB 0, 14, 9 ; stp2_22, stp2_21
- SUM_SUB 2, 6, 9 ; stp2_24, stp2_27
- SUM_SUB 1, 13, 9 ; stp2_25, stp2_26
- ; BLOCK B STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4Xmm 6, 5, 6270, 15137, m8, 9, 10 ; stp1_27, stp1_20
- BUTTERFLY_4Xmm 13, 14, 6270, 15137, m8, 9, 10 ; stp1_26, stp1_21
- ; BLOCK B STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m4, [stp + %3 + idx16]
- mova m7, [stp + %3 + idx17]
- mova m11, [stp + %3 + idx18]
- mova m12, [stp + %3 + idx19]
- SUM_SUB 4, 3, 9 ; stp2_16, stp2_23
- SUM_SUB 7, 0, 9 ; stp2_17, stp2_22
- SUM_SUB 11, 14, 9 ; stp2_18, stp2_21
- SUM_SUB 12, 5, 9 ; stp2_19, stp2_20
- mova [stp + %3 + idx16], m4
- mova [stp + %3 + idx17], m7
- mova [stp + %3 + idx18], m11
- mova [stp + %3 + idx19], m12
- mova m4, [stp + %4 + idx28]
- mova m7, [stp + %4 + idx29]
- mova m11, [stp + %4 + idx30]
- mova m12, [stp + %4 + idx31]
- SUM_SUB 4, 6, 9 ; stp2_28, stp2_27
- SUM_SUB 7, 13, 9 ; stp2_29, stp2_26
- SUM_SUB 11, 1, 9 ; stp2_30, stp2_25
- SUM_SUB 12, 2, 9 ; stp2_31, stp2_24
- mova [stp + %4 + idx28], m4
- mova [stp + %4 + idx29], m7
- mova [stp + %4 + idx30], m11
- mova [stp + %4 + idx31], m12
- ; BLOCK B STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 6, 5, 9
- pmulhrsw m6, m10 ; stp1_27
- pmulhrsw m5, m10 ; stp1_20
- SUM_SUB 13, 14, 9
- pmulhrsw m13, m10 ; stp1_26
- pmulhrsw m14, m10 ; stp1_21
- SUM_SUB 1, 0, 9
- pmulhrsw m1, m10 ; stp1_25
- pmulhrsw m0, m10 ; stp1_22
- SUM_SUB 2, 3, 9
- pmulhrsw m2, m10 ; stp1_25
- pmulhrsw m3, m10 ; stp1_22
- %else
- BUTTERFLY_4X 6, 5, 11585, 11585, m8, 9, 10 ; stp1_20, stp1_27
- SWAP 6, 5
- BUTTERFLY_4X 13, 14, 11585, 11585, m8, 9, 10 ; stp1_21, stp1_26
- SWAP 13, 14
- BUTTERFLY_4X 1, 0, 11585, 11585, m8, 9, 10 ; stp1_22, stp1_25
- SWAP 1, 0
- BUTTERFLY_4X 2, 3, 11585, 11585, m8, 9, 10 ; stp1_23, stp1_24
- SWAP 2, 3
- %endif
- mova [stp + %3 + idx20], m5
- mova [stp + %3 + idx21], m14
- mova [stp + %3 + idx22], m0
- mova [stp + %3 + idx23], m3
- mova [stp + %4 + idx24], m2
- mova [stp + %4 + idx25], m1
- mova [stp + %4 + idx26], m13
- mova [stp + %4 + idx27], m6
- ; BLOCK C STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK C STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m0, [rsp + transposed_in + 16 * 2]
- mova m1, [rsp + transposed_in + 16 * 30]
- BUTTERFLY_4X 0, 1, 1606, 16305, m8, 9, 10 ; stp1_8, stp1_15
- mova m2, [rsp + transposed_in + 16 * 14]
- mova m3, [rsp + transposed_in + 16 * 18]
- BUTTERFLY_4X 3, 2, 12665, 10394, m8, 9, 10 ; stp1_9, stp1_14
- mova m4, [rsp + transposed_in + 16 * 10]
- mova m5, [rsp + transposed_in + 16 * 22]
- BUTTERFLY_4X 4, 5, 7723, 14449, m8, 9, 10 ; stp1_10, stp1_13
- mova m6, [rsp + transposed_in + 16 * 6]
- mova m7, [rsp + transposed_in + 16 * 26]
- BUTTERFLY_4X 7, 6, 15679, 4756, m8, 9, 10 ; stp1_11, stp1_12
- ; BLOCK C STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 3, 9 ; stp1_8, stp1_9
- SUM_SUB 7, 4, 9 ; stp1_11, stp1_10
- SUM_SUB 6, 5, 9 ; stp1_12, stp1_13
- SUM_SUB 1, 2, 9 ; stp1_15, stp1_14
- ; BLOCK C STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10 ; stp1_9, stp1_14
- BUTTERFLY_4Xmm 5, 4, 6270, 15137, m8, 9, 10 ; stp1_13, stp1_10
- ; BLOCK C STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 7, 9 ; stp1_8, stp1_11
- SUM_SUB 2, 4, 9 ; stp1_9, stp1_10
- SUM_SUB 1, 6, 9 ; stp1_15, stp1_12
- SUM_SUB 3, 5, 9 ; stp1_14, stp1_13
- ; BLOCK C STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 5, 4, 9
- pmulhrsw m5, m10 ; stp1_13
- pmulhrsw m4, m10 ; stp1_10
- SUM_SUB 6, 7, 9
- pmulhrsw m6, m10 ; stp1_12
- pmulhrsw m7, m10 ; stp1_11
- %else
- BUTTERFLY_4X 5, 4, 11585, 11585, m8, 9, 10 ; stp1_10, stp1_13
- SWAP 5, 4
- BUTTERFLY_4X 6, 7, 11585, 11585, m8, 9, 10 ; stp1_11, stp1_12
- SWAP 6, 7
- %endif
- ; BLOCK C STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova [stp + %2 + idx8], m0
- mova [stp + %2 + idx9], m2
- mova [stp + %2 + idx10], m4
- mova [stp + %2 + idx11], m7
- mova [stp + %2 + idx12], m6
- mova [stp + %2 + idx13], m5
- mova [stp + %2 + idx14], m3
- mova [stp + %2 + idx15], m1
- ; BLOCK D STAGE 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK D STAGE 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ;
- ; BLOCK D STAGE 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m11, [rsp + transposed_in + 16 * 4]
- mova m12, [rsp + transposed_in + 16 * 28]
- BUTTERFLY_4X 11, 12, 3196, 16069, m8, 9, 10 ; stp1_4, stp1_7
- mova m13, [rsp + transposed_in + 16 * 12]
- mova m14, [rsp + transposed_in + 16 * 20]
- BUTTERFLY_4X 14, 13, 13623, 9102, m8, 9, 10 ; stp1_5, stp1_6
- ; BLOCK D STAGE 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m0, [rsp + transposed_in + 16 * 0]
- mova m1, [rsp + transposed_in + 16 * 16]
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- mova m10, [pw_11585x2]
- SUM_SUB 0, 1, 9
- pmulhrsw m0, m10 ; stp1_1
- pmulhrsw m1, m10 ; stp1_0
- %else
- BUTTERFLY_4X 0, 1, 11585, 11585, m8, 9, 10 ; stp1_1, stp1_0
- SWAP 0, 1
- %endif
- mova m2, [rsp + transposed_in + 16 * 8]
- mova m3, [rsp + transposed_in + 16 * 24]
- BUTTERFLY_4X 2, 3, 6270, 15137, m8, 9, 10 ; stp1_2, stp1_3
- mova m10, [pw_11585x2]
- SUM_SUB 11, 14, 9 ; stp1_4, stp1_5
- SUM_SUB 12, 13, 9 ; stp1_7, stp1_6
- ; BLOCK D STAGE 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- %if 0 ; overflow occurs in SUM_SUB when using test streams
- SUM_SUB 13, 14, 9
- pmulhrsw m13, m10 ; stp1_6
- pmulhrsw m14, m10 ; stp1_5
- %else
- BUTTERFLY_4X 13, 14, 11585, 11585, m8, 9, 10 ; stp1_5, stp1_6
- SWAP 13, 14
- %endif
- SUM_SUB 0, 3, 9 ; stp1_0, stp1_3
- SUM_SUB 1, 2, 9 ; stp1_1, stp1_2
- ; BLOCK D STAGE 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- SUM_SUB 0, 12, 9 ; stp1_0, stp1_7
- SUM_SUB 1, 13, 9 ; stp1_1, stp1_6
- SUM_SUB 2, 14, 9 ; stp1_2, stp1_5
- SUM_SUB 3, 11, 9 ; stp1_3, stp1_4
- ; BLOCK D STAGE 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- mova m4, [stp + %2 + idx12]
- mova m5, [stp + %2 + idx13]
- mova m6, [stp + %2 + idx14]
- mova m7, [stp + %2 + idx15]
- SUM_SUB 0, 7, 9 ; stp1_0, stp1_15
- SUM_SUB 1, 6, 9 ; stp1_1, stp1_14
- SUM_SUB 2, 5, 9 ; stp1_2, stp1_13
- SUM_SUB 3, 4, 9 ; stp1_3, stp1_12
- ; 0-3, 28-31 final stage
- mova m10, [stp + %4 + idx31]
- mova m15, [stp + %4 + idx30]
- SUM_SUB 0, 10, 9 ; stp1_0, stp1_31
- SUM_SUB 1, 15, 9 ; stp1_1, stp1_30
- mova [stp + %1 + idx0], m0
- mova [stp + %1 + idx1], m1
- mova [stp + %4 + idx31], m10
- mova [stp + %4 + idx30], m15
- mova m0, [stp + %4 + idx29]
- mova m1, [stp + %4 + idx28]
- SUM_SUB 2, 0, 9 ; stp1_2, stp1_29
- SUM_SUB 3, 1, 9 ; stp1_3, stp1_28
- mova [stp + %1 + idx2], m2
- mova [stp + %1 + idx3], m3
- mova [stp + %4 + idx29], m0
- mova [stp + %4 + idx28], m1
- ; 12-15, 16-19 final stage
- mova m0, [stp + %3 + idx16]
- mova m1, [stp + %3 + idx17]
- mova m2, [stp + %3 + idx18]
- mova m3, [stp + %3 + idx19]
- SUM_SUB 7, 0, 9 ; stp1_15, stp1_16
- SUM_SUB 6, 1, 9 ; stp1_14, stp1_17
- SUM_SUB 5, 2, 9 ; stp1_13, stp1_18
- SUM_SUB 4, 3, 9 ; stp1_12, stp1_19
- mova [stp + %2 + idx12], m4
- mova [stp + %2 + idx13], m5
- mova [stp + %2 + idx14], m6
- mova [stp + %2 + idx15], m7
- mova [stp + %3 + idx16], m0
- mova [stp + %3 + idx17], m1
- mova [stp + %3 + idx18], m2
- mova [stp + %3 + idx19], m3
- mova m4, [stp + %2 + idx8]
- mova m5, [stp + %2 + idx9]
- mova m6, [stp + %2 + idx10]
- mova m7, [stp + %2 + idx11]
- SUM_SUB 11, 7, 9 ; stp1_4, stp1_11
- SUM_SUB 14, 6, 9 ; stp1_5, stp1_10
- SUM_SUB 13, 5, 9 ; stp1_6, stp1_9
- SUM_SUB 12, 4, 9 ; stp1_7, stp1_8
- ; 4-7, 24-27 final stage
- mova m3, [stp + %4 + idx24]
- mova m2, [stp + %4 + idx25]
- mova m1, [stp + %4 + idx26]
- mova m0, [stp + %4 + idx27]
- SUM_SUB 12, 3, 9 ; stp1_7, stp1_24
- SUM_SUB 13, 2, 9 ; stp1_6, stp1_25
- SUM_SUB 14, 1, 9 ; stp1_5, stp1_26
- SUM_SUB 11, 0, 9 ; stp1_4, stp1_27
- mova [stp + %4 + idx24], m3
- mova [stp + %4 + idx25], m2
- mova [stp + %4 + idx26], m1
- mova [stp + %4 + idx27], m0
- mova [stp + %1 + idx4], m11
- mova [stp + %1 + idx5], m14
- mova [stp + %1 + idx6], m13
- mova [stp + %1 + idx7], m12
- ; 8-11, 20-23 final stage
- mova m0, [stp + %3 + idx20]
- mova m1, [stp + %3 + idx21]
- mova m2, [stp + %3 + idx22]
- mova m3, [stp + %3 + idx23]
- SUM_SUB 7, 0, 9 ; stp1_11, stp_20
- SUM_SUB 6, 1, 9 ; stp1_10, stp_21
- SUM_SUB 5, 2, 9 ; stp1_9, stp_22
- SUM_SUB 4, 3, 9 ; stp1_8, stp_23
- mova [stp + %2 + idx8], m4
- mova [stp + %2 + idx9], m5
- mova [stp + %2 + idx10], m6
- mova [stp + %2 + idx11], m7
- mova [stp + %3 + idx20], m0
- mova [stp + %3 + idx21], m1
- mova [stp + %3 + idx22], m2
- mova [stp + %3 + idx23], m3
- %endmacro
- INIT_XMM ssse3
- cglobal idct32x32_1024_add, 3, 11, 16, i32x32_size, input, output, stride
- mova m8, [pd_8192]
- mov r6, 4
- lea stp, [rsp + pass_one_start]
- idct32x32_1024:
- mov r3, inputq
- lea r4, [rsp + transposed_in]
- mov r7, 4
- idct32x32_1024_transpose:
- %if CONFIG_VP9_HIGHBITDEPTH
- mova m0, [r3 + 0]
- packssdw m0, [r3 + 16]
- mova m1, [r3 + 32 * 4]
- packssdw m1, [r3 + 32 * 4 + 16]
- mova m2, [r3 + 32 * 8]
- packssdw m2, [r3 + 32 * 8 + 16]
- mova m3, [r3 + 32 * 12]
- packssdw m3, [r3 + 32 * 12 + 16]
- mova m4, [r3 + 32 * 16]
- packssdw m4, [r3 + 32 * 16 + 16]
- mova m5, [r3 + 32 * 20]
- packssdw m5, [r3 + 32 * 20 + 16]
- mova m6, [r3 + 32 * 24]
- packssdw m6, [r3 + 32 * 24 + 16]
- mova m7, [r3 + 32 * 28]
- packssdw m7, [r3 + 32 * 28 + 16]
- %else
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 4]
- mova m2, [r3 + 16 * 8]
- mova m3, [r3 + 16 * 12]
- mova m4, [r3 + 16 * 16]
- mova m5, [r3 + 16 * 20]
- mova m6, [r3 + 16 * 24]
- mova m7, [r3 + 16 * 28]
- %endif
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- mova [r4 + 0], m0
- mova [r4 + 16 * 1], m1
- mova [r4 + 16 * 2], m2
- mova [r4 + 16 * 3], m3
- mova [r4 + 16 * 4], m4
- mova [r4 + 16 * 5], m5
- mova [r4 + 16 * 6], m6
- mova [r4 + 16 * 7], m7
- %if CONFIG_VP9_HIGHBITDEPTH
- add r3, 32
- %else
- add r3, 16
- %endif
- add r4, 16 * 8
- dec r7
- jne idct32x32_1024_transpose
- IDCT32X32_1024 16*0, 16*32, 16*64, 16*96
- lea stp, [stp + 16 * 8]
- %if CONFIG_VP9_HIGHBITDEPTH
- lea inputq, [inputq + 32 * 32]
- %else
- lea inputq, [inputq + 16 * 32]
- %endif
- dec r6
- jnz idct32x32_1024
- mov r6, 4
- lea stp, [rsp + pass_one_start]
- lea r9, [rsp + pass_one_start]
- idct32x32_1024_2:
- lea r4, [rsp + transposed_in]
- mov r3, r9
- mov r7, 4
- idct32x32_1024_transpose_2:
- mova m0, [r3 + 0]
- mova m1, [r3 + 16 * 1]
- mova m2, [r3 + 16 * 2]
- mova m3, [r3 + 16 * 3]
- mova m4, [r3 + 16 * 4]
- mova m5, [r3 + 16 * 5]
- mova m6, [r3 + 16 * 6]
- mova m7, [r3 + 16 * 7]
- TRANSPOSE8X8 0, 1, 2, 3, 4, 5, 6, 7, 9
- mova [r4 + 0], m0
- mova [r4 + 16 * 1], m1
- mova [r4 + 16 * 2], m2
- mova [r4 + 16 * 3], m3
- mova [r4 + 16 * 4], m4
- mova [r4 + 16 * 5], m5
- mova [r4 + 16 * 6], m6
- mova [r4 + 16 * 7], m7
- add r3, 16 * 8
- add r4, 16 * 8
- dec r7
- jne idct32x32_1024_transpose_2
- IDCT32X32_1024 16*0, 16*8, 16*16, 16*24
- lea stp, [stp + 16 * 32]
- add r9, 16 * 32
- dec r6
- jnz idct32x32_1024_2
- RECON_AND_STORE pass_two_start
- RET
- %endif
|