123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975 |
- /*
- Copyright (C) 1996-1997 Id Software, Inc.
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
- //
- // d_draw16.s
- // x86 assembly-language horizontal 8-bpp span-drawing code, with 16-pixel
- // subdivision.
- //
- #include "asm_i386.h"
- #include "quakeasm.h"
- #include "asm_draw.h"
- #include "d_ifacea.h"
- #if id386
- //----------------------------------------------------------------------
- // 8-bpp horizontal span drawing code for polygons, with no transparency and
- // 16-pixel subdivision.
- //
- // Assumes there is at least one span in pspans, and that every span
- // contains at least one pixel
- //----------------------------------------------------------------------
- .data
- .text
- // out-of-line, rarely-needed clamping code
- LClampHigh0:
- movl C(bbextents),%esi
- jmp LClampReentry0
- LClampHighOrLow0:
- jg LClampHigh0
- xorl %esi,%esi
- jmp LClampReentry0
- LClampHigh1:
- movl C(bbextentt),%edx
- jmp LClampReentry1
- LClampHighOrLow1:
- jg LClampHigh1
- xorl %edx,%edx
- jmp LClampReentry1
- LClampLow2:
- movl $4096,%ebp
- jmp LClampReentry2
- LClampHigh2:
- movl C(bbextents),%ebp
- jmp LClampReentry2
- LClampLow3:
- movl $4096,%ecx
- jmp LClampReentry3
- LClampHigh3:
- movl C(bbextentt),%ecx
- jmp LClampReentry3
- LClampLow4:
- movl $4096,%eax
- jmp LClampReentry4
- LClampHigh4:
- movl C(bbextents),%eax
- jmp LClampReentry4
- LClampLow5:
- movl $4096,%ebx
- jmp LClampReentry5
- LClampHigh5:
- movl C(bbextentt),%ebx
- jmp LClampReentry5
- #define pspans 4+16
- .align 4
- .globl C(D_DrawSpans16)
- C(D_DrawSpans16):
- pushl %ebp // preserve caller's stack frame
- pushl %edi
- pushl %esi // preserve register variables
- pushl %ebx
- //
- // set up scaled-by-16 steps, for 16-long segments; also set up cacheblock
- // and span list pointers
- //
- // TODO: any overlap from rearranging?
- flds C(d_sdivzstepu)
- fmuls fp_16
- movl C(cacheblock),%edx
- flds C(d_tdivzstepu)
- fmuls fp_16
- movl pspans(%esp),%ebx // point to the first span descriptor
- flds C(d_zistepu)
- fmuls fp_16
- movl %edx,pbase // pbase = cacheblock
- fstps zi16stepu
- fstps tdivz16stepu
- fstps sdivz16stepu
- LSpanLoop:
- //
- // set up the initial s/z, t/z, and 1/z on the FP stack, and generate the
- // initial s and t values
- //
- // FIXME: pipeline FILD?
- fildl espan_t_v(%ebx)
- fildl espan_t_u(%ebx)
- fld %st(1) // dv | du | dv
- fmuls C(d_sdivzstepv) // dv*d_sdivzstepv | du | dv
- fld %st(1) // du | dv*d_sdivzstepv | du | dv
- fmuls C(d_sdivzstepu) // du*d_sdivzstepu | dv*d_sdivzstepv | du | dv
- fld %st(2) // du | du*d_sdivzstepu | dv*d_sdivzstepv | du | dv
- fmuls C(d_tdivzstepu) // du*d_tdivzstepu | du*d_sdivzstepu |
- // dv*d_sdivzstepv | du | dv
- fxch %st(1) // du*d_sdivzstepu | du*d_tdivzstepu |
- // dv*d_sdivzstepv | du | dv
- faddp %st(0),%st(2) // du*d_tdivzstepu |
- // du*d_sdivzstepu + dv*d_sdivzstepv | du | dv
- fxch %st(1) // du*d_sdivzstepu + dv*d_sdivzstepv |
- // du*d_tdivzstepu | du | dv
- fld %st(3) // dv | du*d_sdivzstepu + dv*d_sdivzstepv |
- // du*d_tdivzstepu | du | dv
- fmuls C(d_tdivzstepv) // dv*d_tdivzstepv |
- // du*d_sdivzstepu + dv*d_sdivzstepv |
- // du*d_tdivzstepu | du | dv
- fxch %st(1) // du*d_sdivzstepu + dv*d_sdivzstepv |
- // dv*d_tdivzstepv | du*d_tdivzstepu | du | dv
- fadds C(d_sdivzorigin) // sdivz = d_sdivzorigin + dv*d_sdivzstepv +
- // du*d_sdivzstepu; stays in %st(2) at end
- fxch %st(4) // dv | dv*d_tdivzstepv | du*d_tdivzstepu | du |
- // s/z
- fmuls C(d_zistepv) // dv*d_zistepv | dv*d_tdivzstepv |
- // du*d_tdivzstepu | du | s/z
- fxch %st(1) // dv*d_tdivzstepv | dv*d_zistepv |
- // du*d_tdivzstepu | du | s/z
- faddp %st(0),%st(2) // dv*d_zistepv |
- // dv*d_tdivzstepv + du*d_tdivzstepu | du | s/z
- fxch %st(2) // du | dv*d_tdivzstepv + du*d_tdivzstepu |
- // dv*d_zistepv | s/z
- fmuls C(d_zistepu) // du*d_zistepu |
- // dv*d_tdivzstepv + du*d_tdivzstepu |
- // dv*d_zistepv | s/z
- fxch %st(1) // dv*d_tdivzstepv + du*d_tdivzstepu |
- // du*d_zistepu | dv*d_zistepv | s/z
- fadds C(d_tdivzorigin) // tdivz = d_tdivzorigin + dv*d_tdivzstepv +
- // du*d_tdivzstepu; stays in %st(1) at end
- fxch %st(2) // dv*d_zistepv | du*d_zistepu | t/z | s/z
- faddp %st(0),%st(1) // dv*d_zistepv + du*d_zistepu | t/z | s/z
- flds fp_64k // fp_64k | dv*d_zistepv + du*d_zistepu | t/z | s/z
- fxch %st(1) // dv*d_zistepv + du*d_zistepu | fp_64k | t/z | s/z
- fadds C(d_ziorigin) // zi = d_ziorigin + dv*d_zistepv +
- // du*d_zistepu; stays in %st(0) at end
- // 1/z | fp_64k | t/z | s/z
- //
- // calculate and clamp s & t
- //
- fdivr %st(0),%st(1) // 1/z | z*64k | t/z | s/z
- //
- // point %edi to the first pixel in the span
- //
- movl C(d_viewbuffer),%ecx
- movl espan_t_v(%ebx),%eax
- movl %ebx,pspantemp // preserve spans pointer
- movl C(tadjust),%edx
- movl C(sadjust),%esi
- movl C(d_scantable)(,%eax,4),%edi // v * screenwidth
- addl %ecx,%edi
- movl espan_t_u(%ebx),%ecx
- addl %ecx,%edi // pdest = &pdestspan[scans->u];
- movl espan_t_count(%ebx),%ecx
- //
- // now start the FDIV for the end of the span
- //
- cmpl $16,%ecx
- ja LSetupNotLast1
- decl %ecx
- jz LCleanup1 // if only one pixel, no need to start an FDIV
- movl %ecx,spancountminus1
- // finish up the s and t calcs
- fxch %st(1) // z*64k | 1/z | t/z | s/z
- fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
- fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
- fxch %st(1) // z*64k | s | 1/z | t/z | s/z
- fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
- fxch %st(1) // s | t | 1/z | t/z | s/z
- fistpl s // 1/z | t | t/z | s/z
- fistpl t // 1/z | t/z | s/z
- fildl spancountminus1
- flds C(d_tdivzstepu) // C(d_tdivzstepu) | spancountminus1
- flds C(d_zistepu) // C(d_zistepu) | C(d_tdivzstepu) | spancountminus1
- fmul %st(2),%st(0) // C(d_zistepu)*scm1 | C(d_tdivzstepu) | scm1
- fxch %st(1) // C(d_tdivzstepu) | C(d_zistepu)*scm1 | scm1
- fmul %st(2),%st(0) // C(d_tdivzstepu)*scm1 | C(d_zistepu)*scm1 | scm1
- fxch %st(2) // scm1 | C(d_zistepu)*scm1 | C(d_tdivzstepu)*scm1
- fmuls C(d_sdivzstepu) // C(d_sdivzstepu)*scm1 | C(d_zistepu)*scm1 |
- // C(d_tdivzstepu)*scm1
- fxch %st(1) // C(d_zistepu)*scm1 | C(d_sdivzstepu)*scm1 |
- // C(d_tdivzstepu)*scm1
- faddp %st(0),%st(3) // C(d_sdivzstepu)*scm1 | C(d_tdivzstepu)*scm1
- fxch %st(1) // C(d_tdivzstepu)*scm1 | C(d_sdivzstepu)*scm1
- faddp %st(0),%st(3) // C(d_sdivzstepu)*scm1
- faddp %st(0),%st(3)
- flds fp_64k
- fdiv %st(1),%st(0) // this is what we've gone to all this trouble to
- // overlap
- jmp LFDIVInFlight1
- LCleanup1:
- // finish up the s and t calcs
- fxch %st(1) // z*64k | 1/z | t/z | s/z
- fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
- fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
- fxch %st(1) // z*64k | s | 1/z | t/z | s/z
- fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
- fxch %st(1) // s | t | 1/z | t/z | s/z
- fistpl s // 1/z | t | t/z | s/z
- fistpl t // 1/z | t/z | s/z
- jmp LFDIVInFlight1
- .align 4
- LSetupNotLast1:
- // finish up the s and t calcs
- fxch %st(1) // z*64k | 1/z | t/z | s/z
- fld %st(0) // z*64k | z*64k | 1/z | t/z | s/z
- fmul %st(4),%st(0) // s | z*64k | 1/z | t/z | s/z
- fxch %st(1) // z*64k | s | 1/z | t/z | s/z
- fmul %st(3),%st(0) // t | s | 1/z | t/z | s/z
- fxch %st(1) // s | t | 1/z | t/z | s/z
- fistpl s // 1/z | t | t/z | s/z
- fistpl t // 1/z | t/z | s/z
- fadds zi16stepu
- fxch %st(2)
- fadds sdivz16stepu
- fxch %st(2)
- flds tdivz16stepu
- faddp %st(0),%st(2)
- flds fp_64k
- fdiv %st(1),%st(0) // z = 1/1/z
- // this is what we've gone to all this trouble to
- // overlap
- LFDIVInFlight1:
- addl s,%esi
- addl t,%edx
- movl C(bbextents),%ebx
- movl C(bbextentt),%ebp
- cmpl %ebx,%esi
- ja LClampHighOrLow0
- LClampReentry0:
- movl %esi,s
- movl pbase,%ebx
- shll $16,%esi
- cmpl %ebp,%edx
- movl %esi,sfracf
- ja LClampHighOrLow1
- LClampReentry1:
- movl %edx,t
- movl s,%esi // sfrac = scans->sfrac;
- shll $16,%edx
- movl t,%eax // tfrac = scans->tfrac;
- sarl $16,%esi
- movl %edx,tfracf
- //
- // calculate the texture starting address
- //
- sarl $16,%eax
- movl C(cachewidth),%edx
- imull %edx,%eax // (tfrac >> 16) * cachewidth
- addl %ebx,%esi
- addl %eax,%esi // psource = pbase + (sfrac >> 16) +
- // ((tfrac >> 16) * cachewidth);
- //
- // determine whether last span or not
- //
- cmpl $16,%ecx
- jna LLastSegment
- //
- // not the last segment; do full 16-wide segment
- //
- LNotLastSegment:
- //
- // advance s/z, t/z, and 1/z, and calculate s & t at end of span and steps to
- // get there
- //
- // pick up after the FDIV that was left in flight previously
- fld %st(0) // duplicate it
- fmul %st(4),%st(0) // s = s/z * z
- fxch %st(1)
- fmul %st(3),%st(0) // t = t/z * z
- fxch %st(1)
- fistpl snext
- fistpl tnext
- movl snext,%eax
- movl tnext,%edx
- movb (%esi),%bl // get first source texel
- subl $16,%ecx // count off this segments' pixels
- movl C(sadjust),%ebp
- movl %ecx,counttemp // remember count of remaining pixels
- movl C(tadjust),%ecx
- movb %bl,(%edi) // store first dest pixel
- addl %eax,%ebp
- addl %edx,%ecx
- movl C(bbextents),%eax
- movl C(bbextentt),%edx
- cmpl $4096,%ebp
- jl LClampLow2
- cmpl %eax,%ebp
- ja LClampHigh2
- LClampReentry2:
- cmpl $4096,%ecx
- jl LClampLow3
- cmpl %edx,%ecx
- ja LClampHigh3
- LClampReentry3:
- movl %ebp,snext
- movl %ecx,tnext
- subl s,%ebp
- subl t,%ecx
-
- //
- // set up advancetable
- //
- movl %ecx,%eax
- movl %ebp,%edx
- sarl $20,%eax // tstep >>= 16;
- jz LZero
- sarl $20,%edx // sstep >>= 16;
- movl C(cachewidth),%ebx
- imull %ebx,%eax
- jmp LSetUp1
- LZero:
- sarl $20,%edx // sstep >>= 16;
- movl C(cachewidth),%ebx
- LSetUp1:
- addl %edx,%eax // add in sstep
- // (tstep >> 16) * cachewidth + (sstep >> 16);
- movl tfracf,%edx
- movl %eax,advancetable+4 // advance base in t
- addl %ebx,%eax // ((tstep >> 16) + 1) * cachewidth +
- // (sstep >> 16);
- shll $12,%ebp // left-justify sstep fractional part
- movl sfracf,%ebx
- shll $12,%ecx // left-justify tstep fractional part
- movl %eax,advancetable // advance extra in t
- movl %ecx,tstep
- addl %ecx,%edx // advance tfrac fractional part by tstep frac
- sbbl %ecx,%ecx // turn tstep carry into -1 (0 if none)
- addl %ebp,%ebx // advance sfrac fractional part by sstep frac
- adcl advancetable+4(,%ecx,4),%esi // point to next source texel
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb (%esi),%al
- addl %ebp,%ebx
- movb %al,1(%edi)
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,2(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,3(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,4(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,5(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,6(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,7(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- //
- // start FDIV for end of next segment in flight, so it can overlap
- //
- movl counttemp,%ecx
- cmpl $16,%ecx // more than one segment after this?
- ja LSetupNotLast2 // yes
- decl %ecx
- jz LFDIVInFlight2 // if only one pixel, no need to start an FDIV
- movl %ecx,spancountminus1
- fildl spancountminus1
- flds C(d_zistepu) // C(d_zistepu) | spancountminus1
- fmul %st(1),%st(0) // C(d_zistepu)*scm1 | scm1
- flds C(d_tdivzstepu) // C(d_tdivzstepu) | C(d_zistepu)*scm1 | scm1
- fmul %st(2),%st(0) // C(d_tdivzstepu)*scm1 | C(d_zistepu)*scm1 | scm1
- fxch %st(1) // C(d_zistepu)*scm1 | C(d_tdivzstepu)*scm1 | scm1
- faddp %st(0),%st(3) // C(d_tdivzstepu)*scm1 | scm1
- fxch %st(1) // scm1 | C(d_tdivzstepu)*scm1
- fmuls C(d_sdivzstepu) // C(d_sdivzstepu)*scm1 | C(d_tdivzstepu)*scm1
- fxch %st(1) // C(d_tdivzstepu)*scm1 | C(d_sdivzstepu)*scm1
- faddp %st(0),%st(3) // C(d_sdivzstepu)*scm1
- flds fp_64k // 64k | C(d_sdivzstepu)*scm1
- fxch %st(1) // C(d_sdivzstepu)*scm1 | 64k
- faddp %st(0),%st(4) // 64k
- fdiv %st(1),%st(0) // this is what we've gone to all this trouble to
- // overlap
- jmp LFDIVInFlight2
- .align 4
- LSetupNotLast2:
- fadds zi16stepu
- fxch %st(2)
- fadds sdivz16stepu
- fxch %st(2)
- flds tdivz16stepu
- faddp %st(0),%st(2)
- flds fp_64k
- fdiv %st(1),%st(0) // z = 1/1/z
- // this is what we've gone to all this trouble to
- // overlap
- LFDIVInFlight2:
- movl %ecx,counttemp
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,8(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,9(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,10(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,11(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,12(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,13(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,14(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl $16,%edi
- movl %edx,tfracf
- movl snext,%edx
- movl %ebx,sfracf
- movl tnext,%ebx
- movl %edx,s
- movl %ebx,t
- movl counttemp,%ecx // retrieve count
- //
- // determine whether last span or not
- //
- cmpl $16,%ecx // are there multiple segments remaining?
- movb %al,-1(%edi)
- ja LNotLastSegment // yes
- //
- // last segment of scan
- //
- LLastSegment:
- //
- // advance s/z, t/z, and 1/z, and calculate s & t at end of span and steps to
- // get there. The number of pixels left is variable, and we want to land on the
- // last pixel, not step one past it, so we can't run into arithmetic problems
- //
- testl %ecx,%ecx
- jz LNoSteps // just draw the last pixel and we're done
- // pick up after the FDIV that was left in flight previously
- fld %st(0) // duplicate it
- fmul %st(4),%st(0) // s = s/z * z
- fxch %st(1)
- fmul %st(3),%st(0) // t = t/z * z
- fxch %st(1)
- fistpl snext
- fistpl tnext
- movb (%esi),%al // load first texel in segment
- movl C(tadjust),%ebx
- movb %al,(%edi) // store first pixel in segment
- movl C(sadjust),%eax
- addl snext,%eax
- addl tnext,%ebx
- movl C(bbextents),%ebp
- movl C(bbextentt),%edx
- cmpl $4096,%eax
- jl LClampLow4
- cmpl %ebp,%eax
- ja LClampHigh4
- LClampReentry4:
- movl %eax,snext
- cmpl $4096,%ebx
- jl LClampLow5
- cmpl %edx,%ebx
- ja LClampHigh5
- LClampReentry5:
- cmpl $1,%ecx // don't bother
- je LOnlyOneStep // if two pixels in segment, there's only one step,
- // of the segment length
- subl s,%eax
- subl t,%ebx
- addl %eax,%eax // convert to 15.17 format so multiply by 1.31
- addl %ebx,%ebx // reciprocal yields 16.48
- imull reciprocal_table_16-8(,%ecx,4) // sstep = (snext - s) /
- // (spancount-1)
- movl %edx,%ebp
- movl %ebx,%eax
- imull reciprocal_table_16-8(,%ecx,4) // tstep = (tnext - t) /
- // (spancount-1)
- LSetEntryvec:
- //
- // set up advancetable
- //
- movl entryvec_table_16(,%ecx,4),%ebx
- movl %edx,%eax
- movl %ebx,jumptemp // entry point into code for RET later
- movl %ebp,%ecx
- sarl $16,%edx // tstep >>= 16;
- movl C(cachewidth),%ebx
- sarl $16,%ecx // sstep >>= 16;
- imull %ebx,%edx
- addl %ecx,%edx // add in sstep
- // (tstep >> 16) * cachewidth + (sstep >> 16);
- movl tfracf,%ecx
- movl %edx,advancetable+4 // advance base in t
- addl %ebx,%edx // ((tstep >> 16) + 1) * cachewidth +
- // (sstep >> 16);
- shll $16,%ebp // left-justify sstep fractional part
- movl sfracf,%ebx
- shll $16,%eax // left-justify tstep fractional part
- movl %edx,advancetable // advance extra in t
- movl %eax,tstep
- movl %ecx,%edx
- addl %eax,%edx
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- jmp *jumptemp // jump to the number-of-pixels handler
- //----------------------------------------
- LNoSteps:
- movb (%esi),%al // load first texel in segment
- subl $15,%edi // adjust for hardwired offset
- jmp LEndSpan
- LOnlyOneStep:
- subl s,%eax
- subl t,%ebx
- movl %eax,%ebp
- movl %ebx,%edx
- jmp LSetEntryvec
- //----------------------------------------
- .globl Entry2_16, Entry3_16, Entry4_16, Entry5_16
- .globl Entry6_16, Entry7_16, Entry8_16, Entry9_16
- .globl Entry10_16, Entry11_16, Entry12_16, Entry13_16
- .globl Entry14_16, Entry15_16, Entry16_16
- Entry2_16:
- subl $14,%edi // adjust for hardwired offsets
- movb (%esi),%al
- jmp LEntry2_16
- //----------------------------------------
- Entry3_16:
- subl $13,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- jmp LEntry3_16
- //----------------------------------------
- Entry4_16:
- subl $12,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry4_16
- //----------------------------------------
- Entry5_16:
- subl $11,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry5_16
- //----------------------------------------
- Entry6_16:
- subl $10,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry6_16
- //----------------------------------------
- Entry7_16:
- subl $9,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry7_16
- //----------------------------------------
- Entry8_16:
- subl $8,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry8_16
- //----------------------------------------
- Entry9_16:
- subl $7,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry9_16
- //----------------------------------------
- Entry10_16:
- subl $6,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry10_16
- //----------------------------------------
- Entry11_16:
- subl $5,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry11_16
- //----------------------------------------
- Entry12_16:
- subl $4,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry12_16
- //----------------------------------------
- Entry13_16:
- subl $3,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry13_16
- //----------------------------------------
- Entry14_16:
- subl $2,%edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry14_16
- //----------------------------------------
- Entry15_16:
- decl %edi // adjust for hardwired offsets
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- jmp LEntry15_16
- //----------------------------------------
- Entry16_16:
- addl %eax,%edx
- movb (%esi),%al
- sbbl %ecx,%ecx
- addl %ebp,%ebx
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- sbbl %ecx,%ecx
- movb %al,1(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry15_16:
- sbbl %ecx,%ecx
- movb %al,2(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry14_16:
- sbbl %ecx,%ecx
- movb %al,3(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry13_16:
- sbbl %ecx,%ecx
- movb %al,4(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry12_16:
- sbbl %ecx,%ecx
- movb %al,5(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry11_16:
- sbbl %ecx,%ecx
- movb %al,6(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry10_16:
- sbbl %ecx,%ecx
- movb %al,7(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry9_16:
- sbbl %ecx,%ecx
- movb %al,8(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry8_16:
- sbbl %ecx,%ecx
- movb %al,9(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry7_16:
- sbbl %ecx,%ecx
- movb %al,10(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry6_16:
- sbbl %ecx,%ecx
- movb %al,11(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry5_16:
- sbbl %ecx,%ecx
- movb %al,12(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- addl tstep,%edx
- LEntry4_16:
- sbbl %ecx,%ecx
- movb %al,13(%edi)
- addl %ebp,%ebx
- movb (%esi),%al
- adcl advancetable+4(,%ecx,4),%esi
- LEntry3_16:
- movb %al,14(%edi)
- movb (%esi),%al
- LEntry2_16:
- LEndSpan:
- //
- // clear s/z, t/z, 1/z from FP stack
- //
- fstp %st(0)
- fstp %st(0)
- fstp %st(0)
- movl pspantemp,%ebx // restore spans pointer
- movl espan_t_pnext(%ebx),%ebx // point to next span
- testl %ebx,%ebx // any more spans?
- movb %al,15(%edi)
- jnz LSpanLoop // more spans
- popl %ebx // restore register variables
- popl %esi
- popl %edi
- popl %ebp // restore the caller's stack frame
- ret
- #endif // id386
|