rendering_device.cpp 346 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204
  1. /**************************************************************************/
  2. /* rendering_device.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #include "rendering_device.h"
  31. #include "rendering_device.compat.inc"
  32. #include "rendering_device_binds.h"
  33. #include "shader_include_db.h"
  34. #include "core/config/project_settings.h"
  35. #include "core/io/dir_access.h"
  36. // TODO: Thread safety
  37. // - Roll back thread safe attribute for RID_Owner members after the read-only/atomic update scheme is implemented.
  38. #define FORCE_SEPARATE_PRESENT_QUEUE 0
  39. #define PRINT_FRAMEBUFFER_FORMAT 0
  40. #define ERR_RENDER_THREAD_MSG String("This function (") + String(__func__) + String(") can only be called from the render thread. ")
  41. #define ERR_RENDER_THREAD_GUARD() ERR_FAIL_COND_MSG(render_thread_id != Thread::get_caller_id(), ERR_RENDER_THREAD_MSG);
  42. #define ERR_RENDER_THREAD_GUARD_V(m_ret) ERR_FAIL_COND_V_MSG(render_thread_id != Thread::get_caller_id(), (m_ret), ERR_RENDER_THREAD_MSG);
  43. /**************************/
  44. /**** HELPER FUNCTIONS ****/
  45. /**************************/
  46. static String _get_device_vendor_name(const RenderingContextDriver::Device &p_device) {
  47. switch (p_device.vendor) {
  48. case RenderingContextDriver::VENDOR_AMD:
  49. return "AMD";
  50. case RenderingContextDriver::VENDOR_IMGTEC:
  51. return "ImgTec";
  52. case RenderingContextDriver::VENDOR_APPLE:
  53. return "Apple";
  54. case RenderingContextDriver::VENDOR_NVIDIA:
  55. return "NVIDIA";
  56. case RenderingContextDriver::VENDOR_ARM:
  57. return "ARM";
  58. case RenderingContextDriver::VENDOR_MICROSOFT:
  59. return "Microsoft";
  60. case RenderingContextDriver::VENDOR_QUALCOMM:
  61. return "Qualcomm";
  62. case RenderingContextDriver::VENDOR_INTEL:
  63. return "Intel";
  64. default:
  65. return "Unknown";
  66. }
  67. }
  68. static String _get_device_type_name(const RenderingContextDriver::Device &p_device) {
  69. switch (p_device.type) {
  70. case RenderingContextDriver::DEVICE_TYPE_INTEGRATED_GPU:
  71. return "Integrated";
  72. case RenderingContextDriver::DEVICE_TYPE_DISCRETE_GPU:
  73. return "Discrete";
  74. case RenderingContextDriver::DEVICE_TYPE_VIRTUAL_GPU:
  75. return "Virtual";
  76. case RenderingContextDriver::DEVICE_TYPE_CPU:
  77. return "CPU";
  78. case RenderingContextDriver::DEVICE_TYPE_OTHER:
  79. default:
  80. return "Other";
  81. }
  82. }
  83. static uint32_t _get_device_type_score(const RenderingContextDriver::Device &p_device) {
  84. static const bool prefer_integrated = OS::get_singleton()->get_user_prefers_integrated_gpu();
  85. switch (p_device.type) {
  86. case RenderingContextDriver::DEVICE_TYPE_INTEGRATED_GPU:
  87. return prefer_integrated ? 5 : 4;
  88. case RenderingContextDriver::DEVICE_TYPE_DISCRETE_GPU:
  89. return prefer_integrated ? 4 : 5;
  90. case RenderingContextDriver::DEVICE_TYPE_VIRTUAL_GPU:
  91. return 3;
  92. case RenderingContextDriver::DEVICE_TYPE_CPU:
  93. return 2;
  94. case RenderingContextDriver::DEVICE_TYPE_OTHER:
  95. default:
  96. return 1;
  97. }
  98. }
  99. /**************************/
  100. /**** RENDERING DEVICE ****/
  101. /**************************/
  102. // When true, the command graph will attempt to reorder the rendering commands submitted by the user based on the dependencies detected from
  103. // the commands automatically. This should improve rendering performance in most scenarios at the cost of some extra CPU overhead.
  104. //
  105. // This behavior can be disabled if it's suspected that the graph is not detecting dependencies correctly and more control over the order of
  106. // the commands is desired (e.g. debugging).
  107. #define RENDER_GRAPH_REORDER 1
  108. // Synchronization barriers are issued between the graph's levels only with the necessary amount of detail to achieve the correct result. If
  109. // it's suspected that the graph is not doing this correctly, full barriers can be issued instead that will block all types of operations
  110. // between the synchronization levels. This setting will have a very negative impact on performance when enabled, so it's only intended for
  111. // debugging purposes.
  112. #define RENDER_GRAPH_FULL_BARRIERS 0
  113. // The command graph can automatically issue secondary command buffers and record them on background threads when they reach an arbitrary
  114. // size threshold. This can be very beneficial towards reducing the time the main thread takes to record all the rendering commands. However,
  115. // this setting is not enabled by default as it's been shown to cause some strange issues with certain IHVs that have yet to be understood.
  116. #define SECONDARY_COMMAND_BUFFERS_PER_FRAME 0
  117. RenderingDevice *RenderingDevice::singleton = nullptr;
  118. RenderingDevice *RenderingDevice::get_singleton() {
  119. return singleton;
  120. }
  121. RenderingDevice::ShaderCompileToSPIRVFunction RenderingDevice::compile_to_spirv_function = nullptr;
  122. RenderingDevice::ShaderCacheFunction RenderingDevice::cache_function = nullptr;
  123. RenderingDevice::ShaderSPIRVGetCacheKeyFunction RenderingDevice::get_spirv_cache_key_function = nullptr;
  124. /***************************/
  125. /**** ID INFRASTRUCTURE ****/
  126. /***************************/
  127. void RenderingDevice::_add_dependency(RID p_id, RID p_depends_on) {
  128. _THREAD_SAFE_METHOD_
  129. HashSet<RID> *set = dependency_map.getptr(p_depends_on);
  130. if (set == nullptr) {
  131. set = &dependency_map.insert(p_depends_on, HashSet<RID>())->value;
  132. }
  133. set->insert(p_id);
  134. set = reverse_dependency_map.getptr(p_id);
  135. if (set == nullptr) {
  136. set = &reverse_dependency_map.insert(p_id, HashSet<RID>())->value;
  137. }
  138. set->insert(p_depends_on);
  139. }
  140. void RenderingDevice::_free_dependencies(RID p_id) {
  141. _THREAD_SAFE_METHOD_
  142. // Direct dependencies must be freed.
  143. HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id);
  144. if (E) {
  145. while (E->value.size()) {
  146. free(*E->value.begin());
  147. }
  148. dependency_map.remove(E);
  149. }
  150. // Reverse dependencies must be unreferenced.
  151. E = reverse_dependency_map.find(p_id);
  152. if (E) {
  153. for (const RID &F : E->value) {
  154. HashMap<RID, HashSet<RID>>::Iterator G = dependency_map.find(F);
  155. ERR_CONTINUE(!G);
  156. ERR_CONTINUE(!G->value.has(p_id));
  157. G->value.erase(p_id);
  158. }
  159. reverse_dependency_map.remove(E);
  160. }
  161. }
  162. /*******************************/
  163. /**** SHADER INFRASTRUCTURE ****/
  164. /*******************************/
  165. void RenderingDevice::shader_set_compile_to_spirv_function(ShaderCompileToSPIRVFunction p_function) {
  166. compile_to_spirv_function = p_function;
  167. }
  168. void RenderingDevice::shader_set_spirv_cache_function(ShaderCacheFunction p_function) {
  169. cache_function = p_function;
  170. }
  171. void RenderingDevice::shader_set_get_cache_key_function(ShaderSPIRVGetCacheKeyFunction p_function) {
  172. get_spirv_cache_key_function = p_function;
  173. }
  174. Vector<uint8_t> RenderingDevice::shader_compile_spirv_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language, String *r_error, bool p_allow_cache) {
  175. if (p_allow_cache && cache_function) {
  176. Vector<uint8_t> cache = cache_function(p_stage, p_source_code, p_language);
  177. if (cache.size()) {
  178. return cache;
  179. }
  180. }
  181. ERR_FAIL_NULL_V(compile_to_spirv_function, Vector<uint8_t>());
  182. return compile_to_spirv_function(p_stage, ShaderIncludeDB::parse_include_files(p_source_code), p_language, r_error, this);
  183. }
  184. String RenderingDevice::shader_get_spirv_cache_key() const {
  185. if (get_spirv_cache_key_function) {
  186. return get_spirv_cache_key_function(this);
  187. }
  188. return String();
  189. }
  190. RID RenderingDevice::shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
  191. Vector<uint8_t> bytecode = shader_compile_binary_from_spirv(p_spirv, p_shader_name);
  192. ERR_FAIL_COND_V(bytecode.is_empty(), RID());
  193. return shader_create_from_bytecode(bytecode);
  194. }
  195. /***************************/
  196. /**** BUFFER MANAGEMENT ****/
  197. /***************************/
  198. RenderingDevice::Buffer *RenderingDevice::_get_buffer_from_owner(RID p_buffer) {
  199. Buffer *buffer = nullptr;
  200. if (vertex_buffer_owner.owns(p_buffer)) {
  201. buffer = vertex_buffer_owner.get_or_null(p_buffer);
  202. } else if (index_buffer_owner.owns(p_buffer)) {
  203. buffer = index_buffer_owner.get_or_null(p_buffer);
  204. } else if (uniform_buffer_owner.owns(p_buffer)) {
  205. buffer = uniform_buffer_owner.get_or_null(p_buffer);
  206. } else if (texture_buffer_owner.owns(p_buffer)) {
  207. DEV_ASSERT(false && "FIXME: Broken.");
  208. //buffer = texture_buffer_owner.get_or_null(p_buffer)->buffer;
  209. } else if (storage_buffer_owner.owns(p_buffer)) {
  210. buffer = storage_buffer_owner.get_or_null(p_buffer);
  211. }
  212. return buffer;
  213. }
  214. Error RenderingDevice::_buffer_initialize(Buffer *p_buffer, const uint8_t *p_data, size_t p_data_size, uint32_t p_required_align) {
  215. uint32_t transfer_worker_offset;
  216. TransferWorker *transfer_worker = _acquire_transfer_worker(p_data_size, p_required_align, transfer_worker_offset);
  217. p_buffer->transfer_worker_index = transfer_worker->index;
  218. {
  219. MutexLock lock(transfer_worker->operations_mutex);
  220. p_buffer->transfer_worker_operation = ++transfer_worker->operations_counter;
  221. }
  222. // Copy to the worker's staging buffer.
  223. uint8_t *data_ptr = driver->buffer_map(transfer_worker->staging_buffer);
  224. ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
  225. memcpy(data_ptr + transfer_worker_offset, p_data, p_data_size);
  226. driver->buffer_unmap(transfer_worker->staging_buffer);
  227. // Copy from the staging buffer to the real buffer.
  228. RDD::BufferCopyRegion region;
  229. region.src_offset = transfer_worker_offset;
  230. region.dst_offset = 0;
  231. region.size = p_data_size;
  232. driver->command_copy_buffer(transfer_worker->command_buffer, transfer_worker->staging_buffer, p_buffer->driver_id, region);
  233. _release_transfer_worker(transfer_worker);
  234. return OK;
  235. }
  236. Error RenderingDevice::_insert_staging_block(StagingBuffers &p_staging_buffers) {
  237. StagingBufferBlock block;
  238. block.driver_id = driver->buffer_create(p_staging_buffers.block_size, p_staging_buffers.usage_bits, RDD::MEMORY_ALLOCATION_TYPE_CPU);
  239. ERR_FAIL_COND_V(!block.driver_id, ERR_CANT_CREATE);
  240. block.frame_used = 0;
  241. block.fill_amount = 0;
  242. p_staging_buffers.blocks.insert(p_staging_buffers.current, block);
  243. return OK;
  244. }
  245. Error RenderingDevice::_staging_buffer_allocate(StagingBuffers &p_staging_buffers, uint32_t p_amount, uint32_t p_required_align, uint32_t &r_alloc_offset, uint32_t &r_alloc_size, StagingRequiredAction &r_required_action, bool p_can_segment) {
  246. // Determine a block to use.
  247. r_alloc_size = p_amount;
  248. r_required_action = STAGING_REQUIRED_ACTION_NONE;
  249. while (true) {
  250. r_alloc_offset = 0;
  251. // See if we can use current block.
  252. if (p_staging_buffers.blocks[p_staging_buffers.current].frame_used == frames_drawn) {
  253. // We used this block this frame, let's see if there is still room.
  254. uint32_t write_from = p_staging_buffers.blocks[p_staging_buffers.current].fill_amount;
  255. {
  256. uint32_t align_remainder = write_from % p_required_align;
  257. if (align_remainder != 0) {
  258. write_from += p_required_align - align_remainder;
  259. }
  260. }
  261. int32_t available_bytes = int32_t(p_staging_buffers.block_size) - int32_t(write_from);
  262. if ((int32_t)p_amount < available_bytes) {
  263. // All is good, we should be ok, all will fit.
  264. r_alloc_offset = write_from;
  265. } else if (p_can_segment && available_bytes >= (int32_t)p_required_align) {
  266. // Ok all won't fit but at least we can fit a chunkie.
  267. // All is good, update what needs to be written to.
  268. r_alloc_offset = write_from;
  269. r_alloc_size = available_bytes - (available_bytes % p_required_align);
  270. } else {
  271. // Can't fit it into this buffer.
  272. // Will need to try next buffer.
  273. p_staging_buffers.current = (p_staging_buffers.current + 1) % p_staging_buffers.blocks.size();
  274. // Before doing anything, though, let's check that we didn't manage to fill all blocks.
  275. // Possible in a single frame.
  276. if (p_staging_buffers.blocks[p_staging_buffers.current].frame_used == frames_drawn) {
  277. // Guess we did.. ok, let's see if we can insert a new block.
  278. if ((uint64_t)p_staging_buffers.blocks.size() * p_staging_buffers.block_size < p_staging_buffers.max_size) {
  279. // We can, so we are safe.
  280. Error err = _insert_staging_block(p_staging_buffers);
  281. if (err) {
  282. return err;
  283. }
  284. // Claim for this frame.
  285. p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
  286. } else {
  287. // Ok, worst case scenario, all the staging buffers belong to this frame
  288. // and this frame is not even done.
  289. // If this is the main thread, it means the user is likely loading a lot of resources at once,.
  290. // Otherwise, the thread should just be blocked until the next frame (currently unimplemented).
  291. r_required_action = STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL;
  292. }
  293. } else {
  294. // Not from current frame, so continue and try again.
  295. continue;
  296. }
  297. }
  298. } else if (p_staging_buffers.blocks[p_staging_buffers.current].frame_used <= frames_drawn - frames.size()) {
  299. // This is an old block, which was already processed, let's reuse.
  300. p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
  301. p_staging_buffers.blocks.write[p_staging_buffers.current].fill_amount = 0;
  302. } else {
  303. // This block may still be in use, let's not touch it unless we have to, so.. can we create a new one?
  304. if ((uint64_t)p_staging_buffers.blocks.size() * p_staging_buffers.block_size < p_staging_buffers.max_size) {
  305. // We are still allowed to create a new block, so let's do that and insert it for current pos.
  306. Error err = _insert_staging_block(p_staging_buffers);
  307. if (err) {
  308. return err;
  309. }
  310. // Claim for this frame.
  311. p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
  312. } else {
  313. // Oops, we are out of room and we can't create more.
  314. // Let's flush older frames.
  315. // The logic here is that if a game is loading a lot of data from the main thread, it will need to be stalled anyway.
  316. // If loading from a separate thread, we can block that thread until next frame when more room is made (not currently implemented, though).
  317. r_required_action = STAGING_REQUIRED_ACTION_STALL_PREVIOUS;
  318. }
  319. }
  320. // All was good, break.
  321. break;
  322. }
  323. p_staging_buffers.used = true;
  324. return OK;
  325. }
  326. void RenderingDevice::_staging_buffer_execute_required_action(StagingBuffers &p_staging_buffers, StagingRequiredAction p_required_action) {
  327. switch (p_required_action) {
  328. case STAGING_REQUIRED_ACTION_NONE: {
  329. // Do nothing.
  330. } break;
  331. case STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL: {
  332. _flush_and_stall_for_all_frames();
  333. // Clear the whole staging buffer.
  334. for (int i = 0; i < p_staging_buffers.blocks.size(); i++) {
  335. p_staging_buffers.blocks.write[i].frame_used = 0;
  336. p_staging_buffers.blocks.write[i].fill_amount = 0;
  337. }
  338. // Claim for current frame.
  339. p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
  340. } break;
  341. case STAGING_REQUIRED_ACTION_STALL_PREVIOUS: {
  342. _stall_for_previous_frames();
  343. for (int i = 0; i < p_staging_buffers.blocks.size(); i++) {
  344. // Clear all blocks but the ones from this frame.
  345. int block_idx = (i + p_staging_buffers.current) % p_staging_buffers.blocks.size();
  346. if (p_staging_buffers.blocks[block_idx].frame_used == frames_drawn) {
  347. break; // Ok, we reached something from this frame, abort.
  348. }
  349. p_staging_buffers.blocks.write[block_idx].frame_used = 0;
  350. p_staging_buffers.blocks.write[block_idx].fill_amount = 0;
  351. }
  352. // Claim for current frame.
  353. p_staging_buffers.blocks.write[p_staging_buffers.current].frame_used = frames_drawn;
  354. } break;
  355. default: {
  356. DEV_ASSERT(false && "Unknown required action.");
  357. } break;
  358. }
  359. }
  360. Error RenderingDevice::buffer_copy(RID p_src_buffer, RID p_dst_buffer, uint32_t p_src_offset, uint32_t p_dst_offset, uint32_t p_size) {
  361. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  362. ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
  363. "Copying buffers is forbidden during creation of a draw list");
  364. ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
  365. "Copying buffers is forbidden during creation of a compute list");
  366. Buffer *src_buffer = _get_buffer_from_owner(p_src_buffer);
  367. if (!src_buffer) {
  368. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Source buffer argument is not a valid buffer of any type.");
  369. }
  370. Buffer *dst_buffer = _get_buffer_from_owner(p_dst_buffer);
  371. if (!dst_buffer) {
  372. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Destination buffer argument is not a valid buffer of any type.");
  373. }
  374. // Validate the copy's dimensions for both buffers.
  375. ERR_FAIL_COND_V_MSG((p_size + p_src_offset) > src_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the source buffer.");
  376. ERR_FAIL_COND_V_MSG((p_size + p_dst_offset) > dst_buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the destination buffer.");
  377. _check_transfer_worker_buffer(src_buffer);
  378. _check_transfer_worker_buffer(dst_buffer);
  379. // Perform the copy.
  380. RDD::BufferCopyRegion region;
  381. region.src_offset = p_src_offset;
  382. region.dst_offset = p_dst_offset;
  383. region.size = p_size;
  384. if (_buffer_make_mutable(dst_buffer, p_dst_buffer)) {
  385. // The destination buffer must be mutable to be used as a copy destination.
  386. draw_graph.add_synchronization();
  387. }
  388. draw_graph.add_buffer_copy(src_buffer->driver_id, src_buffer->draw_tracker, dst_buffer->driver_id, dst_buffer->draw_tracker, region);
  389. return OK;
  390. }
  391. Error RenderingDevice::buffer_update(RID p_buffer, uint32_t p_offset, uint32_t p_size, const void *p_data) {
  392. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  393. copy_bytes_count += p_size;
  394. ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
  395. "Updating buffers is forbidden during creation of a draw list");
  396. ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
  397. "Updating buffers is forbidden during creation of a compute list");
  398. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  399. ERR_FAIL_NULL_V_MSG(buffer, ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
  400. ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER, "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
  401. _check_transfer_worker_buffer(buffer);
  402. // Submitting may get chunked for various reasons, so convert this to a task.
  403. size_t to_submit = p_size;
  404. size_t submit_from = 0;
  405. thread_local LocalVector<RDG::RecordedBufferCopy> command_buffer_copies_vector;
  406. command_buffer_copies_vector.clear();
  407. const uint8_t *src_data = reinterpret_cast<const uint8_t *>(p_data);
  408. const uint32_t required_align = 32;
  409. while (to_submit > 0) {
  410. uint32_t block_write_offset;
  411. uint32_t block_write_amount;
  412. StagingRequiredAction required_action;
  413. Error err = _staging_buffer_allocate(upload_staging_buffers, MIN(to_submit, upload_staging_buffers.block_size), required_align, block_write_offset, block_write_amount, required_action);
  414. if (err) {
  415. return err;
  416. }
  417. if (!command_buffer_copies_vector.is_empty() && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL) {
  418. if (_buffer_make_mutable(buffer, p_buffer)) {
  419. // The buffer must be mutable to be used as a copy destination.
  420. draw_graph.add_synchronization();
  421. }
  422. draw_graph.add_buffer_update(buffer->driver_id, buffer->draw_tracker, command_buffer_copies_vector);
  423. command_buffer_copies_vector.clear();
  424. }
  425. _staging_buffer_execute_required_action(upload_staging_buffers, required_action);
  426. // Map staging buffer (It's CPU and coherent).
  427. uint8_t *data_ptr = driver->buffer_map(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
  428. ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
  429. // Copy to staging buffer.
  430. memcpy(data_ptr + block_write_offset, src_data + submit_from, block_write_amount);
  431. // Unmap.
  432. driver->buffer_unmap(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
  433. // Insert a command to copy this.
  434. RDD::BufferCopyRegion region;
  435. region.src_offset = block_write_offset;
  436. region.dst_offset = submit_from + p_offset;
  437. region.size = block_write_amount;
  438. RDG::RecordedBufferCopy buffer_copy;
  439. buffer_copy.source = upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id;
  440. buffer_copy.region = region;
  441. command_buffer_copies_vector.push_back(buffer_copy);
  442. upload_staging_buffers.blocks.write[upload_staging_buffers.current].fill_amount = block_write_offset + block_write_amount;
  443. to_submit -= block_write_amount;
  444. submit_from += block_write_amount;
  445. }
  446. if (!command_buffer_copies_vector.is_empty()) {
  447. if (_buffer_make_mutable(buffer, p_buffer)) {
  448. // The buffer must be mutable to be used as a copy destination.
  449. draw_graph.add_synchronization();
  450. }
  451. draw_graph.add_buffer_update(buffer->driver_id, buffer->draw_tracker, command_buffer_copies_vector);
  452. }
  453. gpu_copy_count++;
  454. return OK;
  455. }
  456. String RenderingDevice::get_perf_report() const {
  457. return perf_report_text;
  458. }
  459. void RenderingDevice::update_perf_report() {
  460. perf_report_text = "";
  461. perf_report_text += " gpu:" + String::num_int64(gpu_copy_count);
  462. perf_report_text += " bytes:" + String::num_int64(copy_bytes_count);
  463. perf_report_text += " lazily alloc:" + String::num_int64(driver->get_lazily_memory_used());
  464. gpu_copy_count = 0;
  465. copy_bytes_count = 0;
  466. }
  467. Error RenderingDevice::buffer_clear(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
  468. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  469. ERR_FAIL_COND_V_MSG((p_size % 4) != 0, ERR_INVALID_PARAMETER,
  470. "Size must be a multiple of four");
  471. ERR_FAIL_COND_V_MSG(draw_list, ERR_INVALID_PARAMETER,
  472. "Updating buffers in is forbidden during creation of a draw list");
  473. ERR_FAIL_COND_V_MSG(compute_list, ERR_INVALID_PARAMETER,
  474. "Updating buffers is forbidden during creation of a compute list");
  475. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  476. if (!buffer) {
  477. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer argument is not a valid buffer of any type.");
  478. }
  479. ERR_FAIL_COND_V_MSG(p_offset + p_size > buffer->size, ERR_INVALID_PARAMETER,
  480. "Attempted to write buffer (" + itos((p_offset + p_size) - buffer->size) + " bytes) past the end.");
  481. _check_transfer_worker_buffer(buffer);
  482. if (_buffer_make_mutable(buffer, p_buffer)) {
  483. // The destination buffer must be mutable to be used as a clear destination.
  484. draw_graph.add_synchronization();
  485. }
  486. draw_graph.add_buffer_clear(buffer->driver_id, buffer->draw_tracker, p_offset, p_size);
  487. return OK;
  488. }
  489. Vector<uint8_t> RenderingDevice::buffer_get_data(RID p_buffer, uint32_t p_offset, uint32_t p_size) {
  490. ERR_RENDER_THREAD_GUARD_V(Vector<uint8_t>());
  491. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  492. if (!buffer) {
  493. ERR_FAIL_V_MSG(Vector<uint8_t>(), "Buffer is either invalid or this type of buffer can't be retrieved.");
  494. }
  495. // Size of buffer to retrieve.
  496. if (!p_size) {
  497. p_size = buffer->size;
  498. } else {
  499. ERR_FAIL_COND_V_MSG(p_size + p_offset > buffer->size, Vector<uint8_t>(),
  500. "Size is larger than the buffer.");
  501. }
  502. _check_transfer_worker_buffer(buffer);
  503. RDD::BufferID tmp_buffer = driver->buffer_create(buffer->size, RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
  504. ERR_FAIL_COND_V(!tmp_buffer, Vector<uint8_t>());
  505. RDD::BufferCopyRegion region;
  506. region.src_offset = p_offset;
  507. region.size = p_size;
  508. draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, tmp_buffer, region);
  509. // Flush everything so memory can be safely mapped.
  510. _flush_and_stall_for_all_frames();
  511. uint8_t *buffer_mem = driver->buffer_map(tmp_buffer);
  512. ERR_FAIL_NULL_V(buffer_mem, Vector<uint8_t>());
  513. Vector<uint8_t> buffer_data;
  514. {
  515. buffer_data.resize(p_size);
  516. uint8_t *w = buffer_data.ptrw();
  517. memcpy(w, buffer_mem, p_size);
  518. }
  519. driver->buffer_unmap(tmp_buffer);
  520. driver->buffer_free(tmp_buffer);
  521. return buffer_data;
  522. }
  523. Error RenderingDevice::buffer_get_data_async(RID p_buffer, const Callable &p_callback, uint32_t p_offset, uint32_t p_size) {
  524. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  525. Buffer *buffer = _get_buffer_from_owner(p_buffer);
  526. if (buffer == nullptr) {
  527. ERR_FAIL_V_MSG(ERR_INVALID_PARAMETER, "Buffer is either invalid or this type of buffer can't be retrieved.");
  528. }
  529. if (p_size == 0) {
  530. p_size = buffer->size;
  531. }
  532. ERR_FAIL_COND_V_MSG(p_size + p_offset > buffer->size, ERR_INVALID_PARAMETER, "Size is larger than the buffer.");
  533. ERR_FAIL_COND_V_MSG(!p_callback.is_valid(), ERR_INVALID_PARAMETER, "Callback must be valid.");
  534. _check_transfer_worker_buffer(buffer);
  535. BufferGetDataRequest get_data_request;
  536. uint32_t flushed_copies = 0;
  537. get_data_request.callback = p_callback;
  538. get_data_request.frame_local_index = frames[frame].download_buffer_copy_regions.size();
  539. get_data_request.size = p_size;
  540. const uint32_t required_align = 32;
  541. uint32_t block_write_offset;
  542. uint32_t block_write_amount;
  543. StagingRequiredAction required_action;
  544. uint32_t to_submit = p_size;
  545. uint32_t submit_from = 0;
  546. while (to_submit > 0) {
  547. Error err = _staging_buffer_allocate(download_staging_buffers, MIN(to_submit, download_staging_buffers.block_size), required_align, block_write_offset, block_write_amount, required_action);
  548. if (err) {
  549. return err;
  550. }
  551. if ((get_data_request.frame_local_count > 0) && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL) {
  552. if (_buffer_make_mutable(buffer, p_buffer)) {
  553. // The buffer must be mutable to be used as a copy source.
  554. draw_graph.add_synchronization();
  555. }
  556. for (uint32_t i = flushed_copies; i < get_data_request.frame_local_count; i++) {
  557. uint32_t local_index = get_data_request.frame_local_index + i;
  558. draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, frames[frame].download_buffer_staging_buffers[local_index], frames[frame].download_buffer_copy_regions[local_index]);
  559. }
  560. flushed_copies = get_data_request.frame_local_count;
  561. }
  562. _staging_buffer_execute_required_action(download_staging_buffers, required_action);
  563. RDD::BufferCopyRegion region;
  564. region.src_offset = submit_from + p_offset;
  565. region.dst_offset = block_write_offset;
  566. region.size = block_write_amount;
  567. frames[frame].download_buffer_staging_buffers.push_back(download_staging_buffers.blocks[download_staging_buffers.current].driver_id);
  568. frames[frame].download_buffer_copy_regions.push_back(region);
  569. get_data_request.frame_local_count++;
  570. download_staging_buffers.blocks.write[download_staging_buffers.current].fill_amount = block_write_offset + block_write_amount;
  571. to_submit -= block_write_amount;
  572. submit_from += block_write_amount;
  573. }
  574. if (get_data_request.frame_local_count > 0) {
  575. if (_buffer_make_mutable(buffer, p_buffer)) {
  576. // The buffer must be mutable to be used as a copy source.
  577. draw_graph.add_synchronization();
  578. }
  579. for (uint32_t i = flushed_copies; i < get_data_request.frame_local_count; i++) {
  580. uint32_t local_index = get_data_request.frame_local_index + i;
  581. draw_graph.add_buffer_get_data(buffer->driver_id, buffer->draw_tracker, frames[frame].download_buffer_staging_buffers[local_index], frames[frame].download_buffer_copy_regions[local_index]);
  582. }
  583. frames[frame].download_buffer_get_data_requests.push_back(get_data_request);
  584. }
  585. return OK;
  586. }
  587. RID RenderingDevice::storage_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, BitField<StorageBufferUsage> p_usage) {
  588. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
  589. Buffer buffer;
  590. buffer.size = p_size_bytes;
  591. buffer.usage = (RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_STORAGE_BIT);
  592. if (p_usage.has_flag(STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT)) {
  593. buffer.usage.set_flag(RDD::BUFFER_USAGE_INDIRECT_BIT);
  594. }
  595. buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
  596. ERR_FAIL_COND_V(!buffer.driver_id, RID());
  597. // Storage buffers are assumed to be mutable.
  598. buffer.draw_tracker = RDG::resource_tracker_create();
  599. buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
  600. if (p_data.size()) {
  601. _buffer_initialize(&buffer, p_data.ptr(), p_data.size());
  602. }
  603. _THREAD_SAFE_LOCK_
  604. buffer_memory += buffer.size;
  605. _THREAD_SAFE_UNLOCK_
  606. RID id = storage_buffer_owner.make_rid(buffer);
  607. #ifdef DEV_ENABLED
  608. set_resource_name(id, "RID:" + itos(id.get_id()));
  609. #endif
  610. return id;
  611. }
  612. RID RenderingDevice::texture_buffer_create(uint32_t p_size_elements, DataFormat p_format, const Vector<uint8_t> &p_data) {
  613. uint32_t element_size = get_format_vertex_size(p_format);
  614. ERR_FAIL_COND_V_MSG(element_size == 0, RID(), "Format requested is not supported for texture buffers");
  615. uint64_t size_bytes = uint64_t(element_size) * p_size_elements;
  616. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != size_bytes, RID());
  617. Buffer texture_buffer;
  618. texture_buffer.size = size_bytes;
  619. BitField<RDD::BufferUsageBits> usage = (RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_TEXEL_BIT);
  620. texture_buffer.driver_id = driver->buffer_create(size_bytes, usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
  621. ERR_FAIL_COND_V(!texture_buffer.driver_id, RID());
  622. // Texture buffers are assumed to be immutable unless they don't have initial data.
  623. if (p_data.is_empty()) {
  624. texture_buffer.draw_tracker = RDG::resource_tracker_create();
  625. texture_buffer.draw_tracker->buffer_driver_id = texture_buffer.driver_id;
  626. }
  627. bool ok = driver->buffer_set_texel_format(texture_buffer.driver_id, p_format);
  628. if (!ok) {
  629. driver->buffer_free(texture_buffer.driver_id);
  630. ERR_FAIL_V(RID());
  631. }
  632. if (p_data.size()) {
  633. _buffer_initialize(&texture_buffer, p_data.ptr(), p_data.size());
  634. }
  635. _THREAD_SAFE_LOCK_
  636. buffer_memory += size_bytes;
  637. _THREAD_SAFE_UNLOCK_
  638. RID id = texture_buffer_owner.make_rid(texture_buffer);
  639. #ifdef DEV_ENABLED
  640. set_resource_name(id, "RID:" + itos(id.get_id()));
  641. #endif
  642. return id;
  643. }
  644. /*****************/
  645. /**** TEXTURE ****/
  646. /*****************/
  647. RID RenderingDevice::texture_create(const TextureFormat &p_format, const TextureView &p_view, const Vector<Vector<uint8_t>> &p_data) {
  648. // Some adjustments will happen.
  649. TextureFormat format = p_format;
  650. if (format.shareable_formats.size()) {
  651. ERR_FAIL_COND_V_MSG(!format.shareable_formats.has(format.format), RID(),
  652. "If supplied a list of shareable formats, the current format must be present in the list");
  653. ERR_FAIL_COND_V_MSG(p_view.format_override != DATA_FORMAT_MAX && !format.shareable_formats.has(p_view.format_override), RID(),
  654. "If supplied a list of shareable formats, the current view format override must be present in the list");
  655. }
  656. ERR_FAIL_INDEX_V(format.texture_type, RDD::TEXTURE_TYPE_MAX, RID());
  657. ERR_FAIL_COND_V_MSG(format.width < 1, RID(), "Width must be equal or greater than 1 for all textures");
  658. if (format.texture_type != TEXTURE_TYPE_1D && format.texture_type != TEXTURE_TYPE_1D_ARRAY) {
  659. ERR_FAIL_COND_V_MSG(format.height < 1, RID(), "Height must be equal or greater than 1 for 2D and 3D textures");
  660. }
  661. if (format.texture_type == TEXTURE_TYPE_3D) {
  662. ERR_FAIL_COND_V_MSG(format.depth < 1, RID(), "Depth must be equal or greater than 1 for 3D textures");
  663. }
  664. ERR_FAIL_COND_V(format.mipmaps < 1, RID());
  665. if (format.texture_type == TEXTURE_TYPE_1D_ARRAY || format.texture_type == TEXTURE_TYPE_2D_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE) {
  666. ERR_FAIL_COND_V_MSG(format.array_layers < 1, RID(),
  667. "Number of layers must be equal or greater than 1 for arrays and cubemaps.");
  668. ERR_FAIL_COND_V_MSG((format.texture_type == TEXTURE_TYPE_CUBE_ARRAY || format.texture_type == TEXTURE_TYPE_CUBE) && (format.array_layers % 6) != 0, RID(),
  669. "Cubemap and cubemap array textures must provide a layer number that is multiple of 6");
  670. ERR_FAIL_COND_V_MSG(format.array_layers > driver->limit_get(LIMIT_MAX_TEXTURE_ARRAY_LAYERS), RID(), "Number of layers exceeds device maximum.");
  671. } else {
  672. format.array_layers = 1;
  673. }
  674. ERR_FAIL_INDEX_V(format.samples, TEXTURE_SAMPLES_MAX, RID());
  675. ERR_FAIL_COND_V_MSG(format.usage_bits == 0, RID(), "No usage bits specified (at least one is needed)");
  676. format.height = format.texture_type != TEXTURE_TYPE_1D && format.texture_type != TEXTURE_TYPE_1D_ARRAY ? format.height : 1;
  677. format.depth = format.texture_type == TEXTURE_TYPE_3D ? format.depth : 1;
  678. uint64_t size_max = 0;
  679. switch (format.texture_type) {
  680. case TEXTURE_TYPE_1D:
  681. case TEXTURE_TYPE_1D_ARRAY:
  682. size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_1D);
  683. break;
  684. case TEXTURE_TYPE_2D:
  685. case TEXTURE_TYPE_2D_ARRAY:
  686. size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_2D);
  687. break;
  688. case TEXTURE_TYPE_CUBE:
  689. case TEXTURE_TYPE_CUBE_ARRAY:
  690. size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_CUBE);
  691. break;
  692. case TEXTURE_TYPE_3D:
  693. size_max = driver->limit_get(LIMIT_MAX_TEXTURE_SIZE_3D);
  694. break;
  695. case TEXTURE_TYPE_MAX:
  696. break;
  697. }
  698. ERR_FAIL_COND_V_MSG(format.width > size_max || format.height > size_max || format.depth > size_max, RID(), "Texture dimensions exceed device maximum.");
  699. uint32_t required_mipmaps = get_image_required_mipmaps(format.width, format.height, format.depth);
  700. ERR_FAIL_COND_V_MSG(required_mipmaps < format.mipmaps, RID(),
  701. "Too many mipmaps requested for texture format and dimensions (" + itos(format.mipmaps) + "), maximum allowed: (" + itos(required_mipmaps) + ").");
  702. uint32_t forced_usage_bits = 0;
  703. if (p_data.size()) {
  704. ERR_FAIL_COND_V_MSG(p_data.size() != (int)format.array_layers, RID(),
  705. "Default supplied data for image format is of invalid length (" + itos(p_data.size()) + "), should be (" + itos(format.array_layers) + ").");
  706. for (uint32_t i = 0; i < format.array_layers; i++) {
  707. uint32_t required_size = get_image_format_required_size(format.format, format.width, format.height, format.depth, format.mipmaps);
  708. ERR_FAIL_COND_V_MSG((uint32_t)p_data[i].size() != required_size, RID(),
  709. "Data for slice index " + itos(i) + " (mapped to layer " + itos(i) + ") differs in size (supplied: " + itos(p_data[i].size()) + ") than what is required by the format (" + itos(required_size) + ").");
  710. }
  711. ERR_FAIL_COND_V_MSG(format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, RID(),
  712. "Textures created as depth attachments can't be initialized with data directly. Use RenderingDevice::texture_update() instead.");
  713. if (!(format.usage_bits & TEXTURE_USAGE_CAN_UPDATE_BIT)) {
  714. forced_usage_bits = TEXTURE_USAGE_CAN_UPDATE_BIT;
  715. }
  716. }
  717. {
  718. // Validate that this image is supported for the intended use.
  719. bool cpu_readable = (format.usage_bits & RDD::TEXTURE_USAGE_CPU_READ_BIT);
  720. BitField<RDD::TextureUsageBits> supported_usage = driver->texture_get_usages_supported_by_format(format.format, cpu_readable);
  721. String format_text = "'" + String(FORMAT_NAMES[format.format]) + "'";
  722. if ((format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_SAMPLING_BIT)) {
  723. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as sampling texture.");
  724. }
  725. if ((format.usage_bits & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT)) {
  726. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as color attachment.");
  727. }
  728. if ((format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  729. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as depth-stencil attachment.");
  730. }
  731. if ((format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_STORAGE_BIT)) {
  732. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as storage image.");
  733. }
  734. if ((format.usage_bits & TEXTURE_USAGE_STORAGE_ATOMIC_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_STORAGE_ATOMIC_BIT)) {
  735. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as atomic storage image.");
  736. }
  737. if ((format.usage_bits & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && !supported_usage.has_flag(TEXTURE_USAGE_VRS_ATTACHMENT_BIT)) {
  738. ERR_FAIL_V_MSG(RID(), "Format " + format_text + " does not support usage as VRS attachment.");
  739. }
  740. }
  741. // Transfer and validate view info.
  742. RDD::TextureView tv;
  743. if (p_view.format_override == DATA_FORMAT_MAX) {
  744. tv.format = format.format;
  745. } else {
  746. ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
  747. tv.format = p_view.format_override;
  748. }
  749. ERR_FAIL_INDEX_V(p_view.swizzle_r, TEXTURE_SWIZZLE_MAX, RID());
  750. ERR_FAIL_INDEX_V(p_view.swizzle_g, TEXTURE_SWIZZLE_MAX, RID());
  751. ERR_FAIL_INDEX_V(p_view.swizzle_b, TEXTURE_SWIZZLE_MAX, RID());
  752. ERR_FAIL_INDEX_V(p_view.swizzle_a, TEXTURE_SWIZZLE_MAX, RID());
  753. tv.swizzle_r = p_view.swizzle_r;
  754. tv.swizzle_g = p_view.swizzle_g;
  755. tv.swizzle_b = p_view.swizzle_b;
  756. tv.swizzle_a = p_view.swizzle_a;
  757. // Create.
  758. Texture texture;
  759. format.usage_bits |= forced_usage_bits;
  760. texture.driver_id = driver->texture_create(format, tv);
  761. ERR_FAIL_COND_V(!texture.driver_id, RID());
  762. texture.type = format.texture_type;
  763. texture.format = format.format;
  764. texture.width = format.width;
  765. texture.height = format.height;
  766. texture.depth = format.depth;
  767. texture.layers = format.array_layers;
  768. texture.mipmaps = format.mipmaps;
  769. texture.base_mipmap = 0;
  770. texture.base_layer = 0;
  771. texture.is_resolve_buffer = format.is_resolve_buffer;
  772. texture.is_discardable = format.is_discardable;
  773. texture.usage_flags = format.usage_bits & ~forced_usage_bits;
  774. texture.samples = format.samples;
  775. texture.allowed_shared_formats = format.shareable_formats;
  776. texture.has_initial_data = !p_data.is_empty();
  777. if ((format.usage_bits & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  778. texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  779. texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  780. if (format_has_stencil(format.format)) {
  781. texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
  782. }
  783. } else {
  784. texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
  785. texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
  786. }
  787. texture.bound = false;
  788. // Textures are only assumed to be immutable if they have initial data and none of the other bits that indicate write usage are enabled.
  789. bool texture_mutable_by_default = texture.usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_STORAGE_BIT | TEXTURE_USAGE_STORAGE_ATOMIC_BIT | TEXTURE_USAGE_VRS_ATTACHMENT_BIT);
  790. if (p_data.is_empty() || texture_mutable_by_default) {
  791. _texture_make_mutable(&texture, RID());
  792. }
  793. texture_memory += driver->texture_get_allocation_size(texture.driver_id);
  794. RID id = texture_owner.make_rid(texture);
  795. #ifdef DEV_ENABLED
  796. set_resource_name(id, "RID:" + itos(id.get_id()));
  797. #endif
  798. if (p_data.size()) {
  799. for (uint32_t i = 0; i < p_format.array_layers; i++) {
  800. _texture_initialize(id, i, p_data[i]);
  801. }
  802. if (texture.draw_tracker != nullptr) {
  803. // Draw tracker can assume the texture will be in copy destination.
  804. texture.draw_tracker->usage = RDG::RESOURCE_USAGE_COPY_TO;
  805. }
  806. }
  807. return id;
  808. }
  809. RID RenderingDevice::texture_create_shared(const TextureView &p_view, RID p_with_texture) {
  810. Texture *src_texture = texture_owner.get_or_null(p_with_texture);
  811. ERR_FAIL_NULL_V(src_texture, RID());
  812. if (src_texture->owner.is_valid()) { // Ahh this is a share. The RenderingDeviceDriver needs the actual owner.
  813. p_with_texture = src_texture->owner;
  814. src_texture = texture_owner.get_or_null(src_texture->owner);
  815. ERR_FAIL_NULL_V(src_texture, RID()); // This is a bug.
  816. }
  817. // Create view.
  818. Texture texture = *src_texture;
  819. texture.shared_fallback = nullptr;
  820. RDD::TextureView tv;
  821. bool create_shared = true;
  822. bool raw_reintepretation = false;
  823. if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
  824. tv.format = texture.format;
  825. } else {
  826. ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
  827. ERR_FAIL_COND_V_MSG(!texture.allowed_shared_formats.has(p_view.format_override), RID(),
  828. "Format override is not in the list of allowed shareable formats for original texture.");
  829. tv.format = p_view.format_override;
  830. create_shared = driver->texture_can_make_shared_with_format(texture.driver_id, p_view.format_override, raw_reintepretation);
  831. }
  832. tv.swizzle_r = p_view.swizzle_r;
  833. tv.swizzle_g = p_view.swizzle_g;
  834. tv.swizzle_b = p_view.swizzle_b;
  835. tv.swizzle_a = p_view.swizzle_a;
  836. if (create_shared) {
  837. texture.driver_id = driver->texture_create_shared(texture.driver_id, tv);
  838. } else {
  839. // The regular view will use the same format as the main texture.
  840. RDD::TextureView regular_view = tv;
  841. regular_view.format = src_texture->format;
  842. texture.driver_id = driver->texture_create_shared(texture.driver_id, regular_view);
  843. // Create the independent texture for the alias.
  844. RDD::TextureFormat alias_format = texture.texture_format();
  845. alias_format.format = tv.format;
  846. alias_format.usage_bits = TEXTURE_USAGE_SAMPLING_BIT | TEXTURE_USAGE_CAN_COPY_TO_BIT;
  847. _texture_check_shared_fallback(src_texture);
  848. _texture_check_shared_fallback(&texture);
  849. texture.shared_fallback->texture = driver->texture_create(alias_format, tv);
  850. texture.shared_fallback->raw_reinterpretation = raw_reintepretation;
  851. texture_memory += driver->texture_get_allocation_size(texture.shared_fallback->texture);
  852. RDG::ResourceTracker *tracker = RDG::resource_tracker_create();
  853. tracker->texture_driver_id = texture.shared_fallback->texture;
  854. tracker->texture_size = Size2i(texture.width, texture.height);
  855. tracker->texture_subresources = texture.barrier_range();
  856. tracker->texture_usage = alias_format.usage_bits;
  857. tracker->is_discardable = texture.is_discardable;
  858. tracker->reference_count = 1;
  859. texture.shared_fallback->texture_tracker = tracker;
  860. texture.shared_fallback->revision = 0;
  861. if (raw_reintepretation && src_texture->shared_fallback->buffer.id == 0) {
  862. // For shared textures of the same size, we create the buffer on the main texture if it doesn't have it already.
  863. _texture_create_reinterpret_buffer(src_texture);
  864. }
  865. }
  866. ERR_FAIL_COND_V(!texture.driver_id, RID());
  867. texture.slice_trackers.clear();
  868. if (texture.draw_tracker != nullptr) {
  869. texture.draw_tracker->reference_count++;
  870. }
  871. texture.owner = p_with_texture;
  872. RID id = texture_owner.make_rid(texture);
  873. #ifdef DEV_ENABLED
  874. set_resource_name(id, "RID:" + itos(id.get_id()));
  875. #endif
  876. _add_dependency(id, p_with_texture);
  877. return id;
  878. }
  879. RID RenderingDevice::texture_create_from_extension(TextureType p_type, DataFormat p_format, TextureSamples p_samples, BitField<RenderingDevice::TextureUsageBits> p_usage, uint64_t p_image, uint64_t p_width, uint64_t p_height, uint64_t p_depth, uint64_t p_layers) {
  880. // This method creates a texture object using a VkImage created by an extension, module or other external source (OpenXR uses this).
  881. Texture texture;
  882. texture.type = p_type;
  883. texture.format = p_format;
  884. texture.samples = p_samples;
  885. texture.width = p_width;
  886. texture.height = p_height;
  887. texture.depth = p_depth;
  888. texture.layers = p_layers;
  889. texture.mipmaps = 1;
  890. texture.usage_flags = p_usage;
  891. texture.base_mipmap = 0;
  892. texture.base_layer = 0;
  893. texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_UNORM);
  894. texture.allowed_shared_formats.push_back(RD::DATA_FORMAT_R8G8B8A8_SRGB);
  895. if (p_usage.has_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
  896. texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  897. texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  898. /*if (format_has_stencil(p_format.format)) {
  899. texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
  900. }*/
  901. } else {
  902. texture.read_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
  903. texture.barrier_aspect_flags.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
  904. }
  905. texture.driver_id = driver->texture_create_from_extension(p_image, p_type, p_format, p_layers, (texture.usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT));
  906. ERR_FAIL_COND_V(!texture.driver_id, RID());
  907. _texture_make_mutable(&texture, RID());
  908. RID id = texture_owner.make_rid(texture);
  909. #ifdef DEV_ENABLED
  910. set_resource_name(id, "RID:" + itos(id.get_id()));
  911. #endif
  912. return id;
  913. }
  914. RID RenderingDevice::texture_create_shared_from_slice(const TextureView &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type, uint32_t p_layers) {
  915. Texture *src_texture = texture_owner.get_or_null(p_with_texture);
  916. ERR_FAIL_NULL_V(src_texture, RID());
  917. if (src_texture->owner.is_valid()) { // // Ahh this is a share. The RenderingDeviceDriver needs the actual owner.
  918. p_with_texture = src_texture->owner;
  919. src_texture = texture_owner.get_or_null(src_texture->owner);
  920. ERR_FAIL_NULL_V(src_texture, RID()); // This is a bug.
  921. }
  922. ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_CUBEMAP && (src_texture->type != TEXTURE_TYPE_CUBE && src_texture->type != TEXTURE_TYPE_CUBE_ARRAY), RID(),
  923. "Can only create a cubemap slice from a cubemap or cubemap array mipmap");
  924. ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_3D && src_texture->type != TEXTURE_TYPE_3D, RID(),
  925. "Can only create a 3D slice from a 3D texture");
  926. ERR_FAIL_COND_V_MSG(p_slice_type == TEXTURE_SLICE_2D_ARRAY && (src_texture->type != TEXTURE_TYPE_2D_ARRAY), RID(),
  927. "Can only create an array slice from a 2D array mipmap");
  928. // Create view.
  929. ERR_FAIL_UNSIGNED_INDEX_V(p_mipmap, src_texture->mipmaps, RID());
  930. ERR_FAIL_COND_V(p_mipmap + p_mipmaps > src_texture->mipmaps, RID());
  931. ERR_FAIL_UNSIGNED_INDEX_V(p_layer, src_texture->layers, RID());
  932. int slice_layers = 1;
  933. if (p_layers != 0) {
  934. ERR_FAIL_COND_V_MSG(p_layers > 1 && p_slice_type != TEXTURE_SLICE_2D_ARRAY, RID(), "layer slicing only supported for 2D arrays");
  935. ERR_FAIL_COND_V_MSG(p_layer + p_layers > src_texture->layers, RID(), "layer slice is out of bounds");
  936. slice_layers = p_layers;
  937. } else if (p_slice_type == TEXTURE_SLICE_2D_ARRAY) {
  938. ERR_FAIL_COND_V_MSG(p_layer != 0, RID(), "layer must be 0 when obtaining a 2D array mipmap slice");
  939. slice_layers = src_texture->layers;
  940. } else if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
  941. slice_layers = 6;
  942. }
  943. Texture texture = *src_texture;
  944. texture.shared_fallback = nullptr;
  945. get_image_format_required_size(texture.format, texture.width, texture.height, texture.depth, p_mipmap + 1, &texture.width, &texture.height);
  946. texture.mipmaps = p_mipmaps;
  947. texture.layers = slice_layers;
  948. texture.base_mipmap = p_mipmap;
  949. texture.base_layer = p_layer;
  950. if (p_slice_type == TEXTURE_SLICE_2D) {
  951. texture.type = TEXTURE_TYPE_2D;
  952. } else if (p_slice_type == TEXTURE_SLICE_3D) {
  953. texture.type = TEXTURE_TYPE_3D;
  954. }
  955. RDD::TextureView tv;
  956. bool create_shared = true;
  957. bool raw_reintepretation = false;
  958. if (p_view.format_override == DATA_FORMAT_MAX || p_view.format_override == texture.format) {
  959. tv.format = texture.format;
  960. } else {
  961. ERR_FAIL_INDEX_V(p_view.format_override, DATA_FORMAT_MAX, RID());
  962. ERR_FAIL_COND_V_MSG(!texture.allowed_shared_formats.has(p_view.format_override), RID(),
  963. "Format override is not in the list of allowed shareable formats for original texture.");
  964. tv.format = p_view.format_override;
  965. create_shared = driver->texture_can_make_shared_with_format(texture.driver_id, p_view.format_override, raw_reintepretation);
  966. }
  967. tv.swizzle_r = p_view.swizzle_r;
  968. tv.swizzle_g = p_view.swizzle_g;
  969. tv.swizzle_b = p_view.swizzle_b;
  970. tv.swizzle_a = p_view.swizzle_a;
  971. if (p_slice_type == TEXTURE_SLICE_CUBEMAP) {
  972. ERR_FAIL_COND_V_MSG(p_layer >= src_texture->layers, RID(),
  973. "Specified layer is invalid for cubemap");
  974. ERR_FAIL_COND_V_MSG((p_layer % 6) != 0, RID(),
  975. "Specified layer must be a multiple of 6.");
  976. }
  977. if (create_shared) {
  978. texture.driver_id = driver->texture_create_shared_from_slice(src_texture->driver_id, tv, p_slice_type, p_layer, slice_layers, p_mipmap, p_mipmaps);
  979. } else {
  980. // The regular view will use the same format as the main texture.
  981. RDD::TextureView regular_view = tv;
  982. regular_view.format = src_texture->format;
  983. texture.driver_id = driver->texture_create_shared_from_slice(src_texture->driver_id, regular_view, p_slice_type, p_layer, slice_layers, p_mipmap, p_mipmaps);
  984. // Create the independent texture for the slice.
  985. RDD::TextureSubresourceRange slice_range = texture.barrier_range();
  986. slice_range.base_mipmap = 0;
  987. slice_range.base_layer = 0;
  988. RDD::TextureFormat slice_format = texture.texture_format();
  989. slice_format.width = MAX(texture.width >> p_mipmap, 1U);
  990. slice_format.height = MAX(texture.height >> p_mipmap, 1U);
  991. slice_format.depth = MAX(texture.depth >> p_mipmap, 1U);
  992. slice_format.format = tv.format;
  993. slice_format.usage_bits = TEXTURE_USAGE_SAMPLING_BIT | TEXTURE_USAGE_CAN_COPY_TO_BIT;
  994. _texture_check_shared_fallback(src_texture);
  995. _texture_check_shared_fallback(&texture);
  996. texture.shared_fallback->texture = driver->texture_create(slice_format, tv);
  997. texture.shared_fallback->raw_reinterpretation = raw_reintepretation;
  998. texture_memory += driver->texture_get_allocation_size(texture.shared_fallback->texture);
  999. RDG::ResourceTracker *tracker = RDG::resource_tracker_create();
  1000. tracker->texture_driver_id = texture.shared_fallback->texture;
  1001. tracker->texture_size = Size2i(texture.width, texture.height);
  1002. tracker->texture_subresources = slice_range;
  1003. tracker->texture_usage = slice_format.usage_bits;
  1004. tracker->is_discardable = slice_format.is_discardable;
  1005. tracker->reference_count = 1;
  1006. texture.shared_fallback->texture_tracker = tracker;
  1007. texture.shared_fallback->revision = 0;
  1008. if (raw_reintepretation && src_texture->shared_fallback->buffer.id == 0) {
  1009. // For shared texture slices, we create the buffer on the slice if the source texture has no reinterpretation buffer.
  1010. _texture_create_reinterpret_buffer(&texture);
  1011. }
  1012. }
  1013. ERR_FAIL_COND_V(!texture.driver_id, RID());
  1014. const Rect2i slice_rect(p_mipmap, p_layer, p_mipmaps, slice_layers);
  1015. texture.owner = p_with_texture;
  1016. texture.slice_type = p_slice_type;
  1017. texture.slice_rect = slice_rect;
  1018. // If parent is mutable, make slice mutable by default.
  1019. if (src_texture->draw_tracker != nullptr) {
  1020. texture.draw_tracker = nullptr;
  1021. _texture_make_mutable(&texture, RID());
  1022. }
  1023. RID id = texture_owner.make_rid(texture);
  1024. #ifdef DEV_ENABLED
  1025. set_resource_name(id, "RID:" + itos(id.get_id()));
  1026. #endif
  1027. _add_dependency(id, p_with_texture);
  1028. return id;
  1029. }
  1030. static _ALWAYS_INLINE_ void _copy_region(uint8_t const *__restrict p_src, uint8_t *__restrict p_dst, uint32_t p_src_x, uint32_t p_src_y, uint32_t p_src_w, uint32_t p_src_h, uint32_t p_src_full_w, uint32_t p_dst_pitch, uint32_t p_unit_size) {
  1031. uint32_t src_offset = (p_src_y * p_src_full_w + p_src_x) * p_unit_size;
  1032. uint32_t dst_offset = 0;
  1033. for (uint32_t y = p_src_h; y > 0; y--) {
  1034. uint8_t const *__restrict src = p_src + src_offset;
  1035. uint8_t *__restrict dst = p_dst + dst_offset;
  1036. for (uint32_t x = p_src_w * p_unit_size; x > 0; x--) {
  1037. *dst = *src;
  1038. src++;
  1039. dst++;
  1040. }
  1041. src_offset += p_src_full_w * p_unit_size;
  1042. dst_offset += p_dst_pitch;
  1043. }
  1044. }
  1045. static _ALWAYS_INLINE_ void _copy_region_block_or_regular(const uint8_t *p_read_ptr, uint8_t *p_write_ptr, uint32_t p_x, uint32_t p_y, uint32_t p_width, uint32_t p_region_w, uint32_t p_region_h, uint32_t p_block_w, uint32_t p_block_h, uint32_t p_dst_pitch, uint32_t p_pixel_size, uint32_t p_block_size) {
  1046. if (p_block_w != 1 || p_block_h != 1) {
  1047. // Block format.
  1048. uint32_t xb = p_x / p_block_w;
  1049. uint32_t yb = p_y / p_block_h;
  1050. uint32_t wb = p_width / p_block_w;
  1051. uint32_t region_wb = p_region_w / p_block_w;
  1052. uint32_t region_hb = p_region_h / p_block_h;
  1053. _copy_region(p_read_ptr, p_write_ptr, xb, yb, region_wb, region_hb, wb, p_dst_pitch, p_block_size);
  1054. } else {
  1055. // Regular format.
  1056. _copy_region(p_read_ptr, p_write_ptr, p_x, p_y, p_region_w, p_region_h, p_width, p_dst_pitch, p_pixel_size);
  1057. }
  1058. }
  1059. uint32_t RenderingDevice::_texture_layer_count(Texture *p_texture) const {
  1060. switch (p_texture->type) {
  1061. case TEXTURE_TYPE_CUBE:
  1062. case TEXTURE_TYPE_CUBE_ARRAY:
  1063. return p_texture->layers * 6;
  1064. default:
  1065. return p_texture->layers;
  1066. }
  1067. }
  1068. uint32_t RenderingDevice::_texture_alignment(Texture *p_texture) const {
  1069. uint32_t alignment = get_compressed_image_format_block_byte_size(p_texture->format);
  1070. if (alignment == 1) {
  1071. alignment = get_image_format_pixel_size(p_texture->format);
  1072. }
  1073. return STEPIFY(alignment, driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT));
  1074. }
  1075. Error RenderingDevice::_texture_initialize(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data) {
  1076. Texture *texture = texture_owner.get_or_null(p_texture);
  1077. ERR_FAIL_NULL_V(texture, ERR_INVALID_PARAMETER);
  1078. if (texture->owner != RID()) {
  1079. p_texture = texture->owner;
  1080. texture = texture_owner.get_or_null(texture->owner);
  1081. ERR_FAIL_NULL_V(texture, ERR_BUG); // This is a bug.
  1082. }
  1083. uint32_t layer_count = _texture_layer_count(texture);
  1084. ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
  1085. uint32_t width, height;
  1086. uint32_t tight_mip_size = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, texture->mipmaps, &width, &height);
  1087. uint32_t required_size = tight_mip_size;
  1088. uint32_t required_align = _texture_alignment(texture);
  1089. ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
  1090. "Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
  1091. uint32_t block_w, block_h;
  1092. get_compressed_image_format_block_dimensions(texture->format, block_w, block_h);
  1093. uint32_t pixel_size = get_image_format_pixel_size(texture->format);
  1094. uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(texture->format);
  1095. uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format);
  1096. // The algorithm operates on two passes, one to figure out the total size the staging buffer will require to allocate and another one where the copy is actually performed.
  1097. uint32_t staging_worker_offset = 0;
  1098. uint32_t staging_local_offset = 0;
  1099. TransferWorker *transfer_worker = nullptr;
  1100. const uint8_t *read_ptr = p_data.ptr();
  1101. uint8_t *write_ptr = nullptr;
  1102. const RDD::TextureLayout copy_dst_layout = driver->api_trait_get(RDD::API_TRAIT_USE_GENERAL_IN_COPY_QUEUES) ? RDD::TEXTURE_LAYOUT_GENERAL : RDD::TEXTURE_LAYOUT_COPY_DST_OPTIMAL;
  1103. for (uint32_t pass = 0; pass < 2; pass++) {
  1104. const bool copy_pass = (pass == 1);
  1105. if (copy_pass) {
  1106. transfer_worker = _acquire_transfer_worker(staging_local_offset, required_align, staging_worker_offset);
  1107. texture->transfer_worker_index = transfer_worker->index;
  1108. {
  1109. MutexLock lock(transfer_worker->operations_mutex);
  1110. texture->transfer_worker_operation = ++transfer_worker->operations_counter;
  1111. }
  1112. staging_local_offset = 0;
  1113. write_ptr = driver->buffer_map(transfer_worker->staging_buffer);
  1114. ERR_FAIL_NULL_V(write_ptr, ERR_CANT_CREATE);
  1115. if (driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
  1116. // Transition the texture to the optimal layout.
  1117. RDD::TextureBarrier tb;
  1118. tb.texture = texture->driver_id;
  1119. tb.dst_access = RDD::BARRIER_ACCESS_COPY_WRITE_BIT;
  1120. tb.prev_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  1121. tb.next_layout = copy_dst_layout;
  1122. tb.subresources.aspect = texture->barrier_aspect_flags;
  1123. tb.subresources.mipmap_count = texture->mipmaps;
  1124. tb.subresources.base_layer = p_layer;
  1125. tb.subresources.layer_count = 1;
  1126. driver->command_pipeline_barrier(transfer_worker->command_buffer, RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, RDD::PIPELINE_STAGE_COPY_BIT, {}, {}, tb);
  1127. }
  1128. }
  1129. uint32_t mipmap_offset = 0;
  1130. uint32_t logic_width = texture->width;
  1131. uint32_t logic_height = texture->height;
  1132. for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) {
  1133. uint32_t depth = 0;
  1134. uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth);
  1135. const uint8_t *read_ptr_mipmap = read_ptr + mipmap_offset;
  1136. tight_mip_size = image_total - mipmap_offset;
  1137. for (uint32_t z = 0; z < depth; z++) {
  1138. if (required_align > 0) {
  1139. uint32_t align_offset = staging_local_offset % required_align;
  1140. if (align_offset != 0) {
  1141. staging_local_offset += required_align - align_offset;
  1142. }
  1143. }
  1144. uint32_t pitch = (width * pixel_size * block_w) >> pixel_rshift;
  1145. uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
  1146. pitch = STEPIFY(pitch, pitch_step);
  1147. uint32_t to_allocate = pitch * height;
  1148. to_allocate >>= pixel_rshift;
  1149. if (copy_pass) {
  1150. const uint8_t *read_ptr_mipmap_layer = read_ptr_mipmap + (tight_mip_size / depth) * z;
  1151. uint64_t staging_buffer_offset = staging_worker_offset + staging_local_offset;
  1152. uint8_t *write_ptr_mipmap_layer = write_ptr + staging_buffer_offset;
  1153. _copy_region_block_or_regular(read_ptr_mipmap_layer, write_ptr_mipmap_layer, 0, 0, width, width, height, block_w, block_h, pitch, pixel_size, block_size);
  1154. RDD::BufferTextureCopyRegion copy_region;
  1155. copy_region.buffer_offset = staging_buffer_offset;
  1156. copy_region.texture_subresources.aspect = texture->read_aspect_flags;
  1157. copy_region.texture_subresources.mipmap = mm_i;
  1158. copy_region.texture_subresources.base_layer = p_layer;
  1159. copy_region.texture_subresources.layer_count = 1;
  1160. copy_region.texture_offset = Vector3i(0, 0, z);
  1161. copy_region.texture_region_size = Vector3i(logic_width, logic_height, 1);
  1162. driver->command_copy_buffer_to_texture(transfer_worker->command_buffer, transfer_worker->staging_buffer, texture->driver_id, copy_dst_layout, copy_region);
  1163. }
  1164. staging_local_offset += to_allocate;
  1165. }
  1166. mipmap_offset = image_total;
  1167. logic_width = MAX(1u, logic_width >> 1);
  1168. logic_height = MAX(1u, logic_height >> 1);
  1169. }
  1170. if (copy_pass) {
  1171. driver->buffer_unmap(transfer_worker->staging_buffer);
  1172. // If the texture does not have a tracker, it means it must be transitioned to the sampling state.
  1173. if (texture->draw_tracker == nullptr && driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
  1174. RDD::TextureBarrier tb;
  1175. tb.texture = texture->driver_id;
  1176. tb.src_access = RDD::BARRIER_ACCESS_COPY_WRITE_BIT;
  1177. tb.prev_layout = copy_dst_layout;
  1178. tb.next_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
  1179. tb.subresources.aspect = texture->barrier_aspect_flags;
  1180. tb.subresources.mipmap_count = texture->mipmaps;
  1181. tb.subresources.base_layer = p_layer;
  1182. tb.subresources.layer_count = 1;
  1183. transfer_worker->texture_barriers.push_back(tb);
  1184. }
  1185. _release_transfer_worker(transfer_worker);
  1186. }
  1187. }
  1188. return OK;
  1189. }
  1190. Error RenderingDevice::texture_update(RID p_texture, uint32_t p_layer, const Vector<uint8_t> &p_data) {
  1191. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  1192. ERR_FAIL_COND_V_MSG(draw_list || compute_list, ERR_INVALID_PARAMETER, "Updating textures is forbidden during creation of a draw or compute list");
  1193. Texture *texture = texture_owner.get_or_null(p_texture);
  1194. ERR_FAIL_NULL_V(texture, ERR_INVALID_PARAMETER);
  1195. if (texture->owner != RID()) {
  1196. p_texture = texture->owner;
  1197. texture = texture_owner.get_or_null(texture->owner);
  1198. ERR_FAIL_NULL_V(texture, ERR_BUG); // This is a bug.
  1199. }
  1200. ERR_FAIL_COND_V_MSG(texture->bound, ERR_CANT_ACQUIRE_RESOURCE,
  1201. "Texture can't be updated while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to update this texture.");
  1202. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_CAN_UPDATE_BIT), ERR_INVALID_PARAMETER, "Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_UPDATE_BIT` to be set to be updatable.");
  1203. uint32_t layer_count = _texture_layer_count(texture);
  1204. ERR_FAIL_COND_V(p_layer >= layer_count, ERR_INVALID_PARAMETER);
  1205. uint32_t width, height;
  1206. uint32_t tight_mip_size = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, texture->mipmaps, &width, &height);
  1207. uint32_t required_size = tight_mip_size;
  1208. uint32_t required_align = _texture_alignment(texture);
  1209. ERR_FAIL_COND_V_MSG(required_size != (uint32_t)p_data.size(), ERR_INVALID_PARAMETER,
  1210. "Required size for texture update (" + itos(required_size) + ") does not match data supplied size (" + itos(p_data.size()) + ").");
  1211. _check_transfer_worker_texture(texture);
  1212. uint32_t block_w, block_h;
  1213. get_compressed_image_format_block_dimensions(texture->format, block_w, block_h);
  1214. uint32_t pixel_size = get_image_format_pixel_size(texture->format);
  1215. uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(texture->format);
  1216. uint32_t block_size = get_compressed_image_format_block_byte_size(texture->format);
  1217. uint32_t region_size = texture_upload_region_size_px;
  1218. const uint8_t *read_ptr = p_data.ptr();
  1219. thread_local LocalVector<RDG::RecordedBufferToTextureCopy> command_buffer_to_texture_copies_vector;
  1220. command_buffer_to_texture_copies_vector.clear();
  1221. // Indicate the texture will get modified for the shared texture fallback.
  1222. _texture_update_shared_fallback(p_texture, texture, true);
  1223. uint32_t mipmap_offset = 0;
  1224. uint32_t logic_width = texture->width;
  1225. uint32_t logic_height = texture->height;
  1226. for (uint32_t mm_i = 0; mm_i < texture->mipmaps; mm_i++) {
  1227. uint32_t depth = 0;
  1228. uint32_t image_total = get_image_format_required_size(texture->format, texture->width, texture->height, texture->depth, mm_i + 1, &width, &height, &depth);
  1229. const uint8_t *read_ptr_mipmap = read_ptr + mipmap_offset;
  1230. tight_mip_size = image_total - mipmap_offset;
  1231. for (uint32_t z = 0; z < depth; z++) {
  1232. const uint8_t *read_ptr_mipmap_layer = read_ptr_mipmap + (tight_mip_size / depth) * z;
  1233. for (uint32_t y = 0; y < height; y += region_size) {
  1234. for (uint32_t x = 0; x < width; x += region_size) {
  1235. uint32_t region_w = MIN(region_size, width - x);
  1236. uint32_t region_h = MIN(region_size, height - y);
  1237. uint32_t region_logic_w = MIN(region_size, logic_width - x);
  1238. uint32_t region_logic_h = MIN(region_size, logic_height - y);
  1239. uint32_t region_pitch = (region_w * pixel_size * block_w) >> pixel_rshift;
  1240. uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
  1241. region_pitch = STEPIFY(region_pitch, pitch_step);
  1242. uint32_t to_allocate = region_pitch * region_h;
  1243. uint32_t alloc_offset = 0, alloc_size = 0;
  1244. StagingRequiredAction required_action;
  1245. Error err = _staging_buffer_allocate(upload_staging_buffers, to_allocate, required_align, alloc_offset, alloc_size, required_action, false);
  1246. ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
  1247. if (!command_buffer_to_texture_copies_vector.is_empty() && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL) {
  1248. if (_texture_make_mutable(texture, p_texture)) {
  1249. // The texture must be mutable to be used as a copy destination.
  1250. draw_graph.add_synchronization();
  1251. }
  1252. // If the staging buffer requires flushing everything, we submit the command early and clear the current vector.
  1253. draw_graph.add_texture_update(texture->driver_id, texture->draw_tracker, command_buffer_to_texture_copies_vector);
  1254. command_buffer_to_texture_copies_vector.clear();
  1255. }
  1256. _staging_buffer_execute_required_action(upload_staging_buffers, required_action);
  1257. uint8_t *write_ptr;
  1258. { // Map.
  1259. uint8_t *data_ptr = driver->buffer_map(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
  1260. ERR_FAIL_NULL_V(data_ptr, ERR_CANT_CREATE);
  1261. write_ptr = data_ptr;
  1262. write_ptr += alloc_offset;
  1263. }
  1264. ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
  1265. ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
  1266. _copy_region_block_or_regular(read_ptr_mipmap_layer, write_ptr, x, y, width, region_w, region_h, block_w, block_h, region_pitch, pixel_size, block_size);
  1267. { // Unmap.
  1268. driver->buffer_unmap(upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id);
  1269. }
  1270. RDD::BufferTextureCopyRegion copy_region;
  1271. copy_region.buffer_offset = alloc_offset;
  1272. copy_region.texture_subresources.aspect = texture->read_aspect_flags;
  1273. copy_region.texture_subresources.mipmap = mm_i;
  1274. copy_region.texture_subresources.base_layer = p_layer;
  1275. copy_region.texture_subresources.layer_count = 1;
  1276. copy_region.texture_offset = Vector3i(x, y, z);
  1277. copy_region.texture_region_size = Vector3i(region_logic_w, region_logic_h, 1);
  1278. RDG::RecordedBufferToTextureCopy buffer_to_texture_copy;
  1279. buffer_to_texture_copy.from_buffer = upload_staging_buffers.blocks[upload_staging_buffers.current].driver_id;
  1280. buffer_to_texture_copy.region = copy_region;
  1281. command_buffer_to_texture_copies_vector.push_back(buffer_to_texture_copy);
  1282. upload_staging_buffers.blocks.write[upload_staging_buffers.current].fill_amount = alloc_offset + alloc_size;
  1283. }
  1284. }
  1285. }
  1286. mipmap_offset = image_total;
  1287. logic_width = MAX(1u, logic_width >> 1);
  1288. logic_height = MAX(1u, logic_height >> 1);
  1289. }
  1290. if (_texture_make_mutable(texture, p_texture)) {
  1291. // The texture must be mutable to be used as a copy destination.
  1292. draw_graph.add_synchronization();
  1293. }
  1294. draw_graph.add_texture_update(texture->driver_id, texture->draw_tracker, command_buffer_to_texture_copies_vector);
  1295. return OK;
  1296. }
  1297. void RenderingDevice::_texture_check_shared_fallback(Texture *p_texture) {
  1298. if (p_texture->shared_fallback == nullptr) {
  1299. p_texture->shared_fallback = memnew(Texture::SharedFallback);
  1300. }
  1301. }
  1302. void RenderingDevice::_texture_update_shared_fallback(RID p_texture_rid, Texture *p_texture, bool p_for_writing) {
  1303. if (p_texture->shared_fallback == nullptr) {
  1304. // This texture does not use any of the shared texture fallbacks.
  1305. return;
  1306. }
  1307. if (p_texture->owner.is_valid()) {
  1308. Texture *owner_texture = texture_owner.get_or_null(p_texture->owner);
  1309. ERR_FAIL_NULL(owner_texture);
  1310. if (p_for_writing) {
  1311. // Only the main texture is used for writing when using the shared fallback.
  1312. owner_texture->shared_fallback->revision++;
  1313. } else if (p_texture->shared_fallback->revision != owner_texture->shared_fallback->revision) {
  1314. // Copy the contents of the main texture into the shared texture fallback slice. Update the revision.
  1315. _texture_copy_shared(p_texture->owner, owner_texture, p_texture_rid, p_texture);
  1316. p_texture->shared_fallback->revision = owner_texture->shared_fallback->revision;
  1317. }
  1318. } else if (p_for_writing) {
  1319. // Increment the revision of the texture so shared texture fallback slices must be updated.
  1320. p_texture->shared_fallback->revision++;
  1321. }
  1322. }
  1323. void RenderingDevice::_texture_free_shared_fallback(Texture *p_texture) {
  1324. if (p_texture->shared_fallback != nullptr) {
  1325. if (p_texture->shared_fallback->texture_tracker != nullptr) {
  1326. RDG::resource_tracker_free(p_texture->shared_fallback->texture_tracker);
  1327. }
  1328. if (p_texture->shared_fallback->buffer_tracker != nullptr) {
  1329. RDG::resource_tracker_free(p_texture->shared_fallback->buffer_tracker);
  1330. }
  1331. if (p_texture->shared_fallback->texture.id != 0) {
  1332. texture_memory -= driver->texture_get_allocation_size(p_texture->shared_fallback->texture);
  1333. driver->texture_free(p_texture->shared_fallback->texture);
  1334. }
  1335. if (p_texture->shared_fallback->buffer.id != 0) {
  1336. buffer_memory -= driver->buffer_get_allocation_size(p_texture->shared_fallback->buffer);
  1337. driver->buffer_free(p_texture->shared_fallback->buffer);
  1338. }
  1339. memdelete(p_texture->shared_fallback);
  1340. p_texture->shared_fallback = nullptr;
  1341. }
  1342. }
  1343. void RenderingDevice::_texture_copy_shared(RID p_src_texture_rid, Texture *p_src_texture, RID p_dst_texture_rid, Texture *p_dst_texture) {
  1344. // The only type of copying allowed is from the main texture to the slice texture, as slice textures are not allowed to be used for writing when using this fallback.
  1345. DEV_ASSERT(p_src_texture != nullptr);
  1346. DEV_ASSERT(p_dst_texture != nullptr);
  1347. DEV_ASSERT(p_src_texture->owner.is_null());
  1348. DEV_ASSERT(p_dst_texture->owner == p_src_texture_rid);
  1349. bool src_made_mutable = _texture_make_mutable(p_src_texture, p_src_texture_rid);
  1350. bool dst_made_mutable = _texture_make_mutable(p_dst_texture, p_dst_texture_rid);
  1351. if (src_made_mutable || dst_made_mutable) {
  1352. draw_graph.add_synchronization();
  1353. }
  1354. if (p_dst_texture->shared_fallback->raw_reinterpretation) {
  1355. // If one of the textures is a main texture and they have a reinterpret buffer, we prefer using that as it's guaranteed to be big enough to hold
  1356. // anything and it's how the shared textures that don't use slices are created.
  1357. bool src_has_buffer = p_src_texture->shared_fallback->buffer.id != 0;
  1358. bool dst_has_buffer = p_dst_texture->shared_fallback->buffer.id != 0;
  1359. bool from_src = p_src_texture->owner.is_null() && src_has_buffer;
  1360. bool from_dst = p_dst_texture->owner.is_null() && dst_has_buffer;
  1361. if (!from_src && !from_dst) {
  1362. // If neither texture passed the condition, we just pick whichever texture has a reinterpretation buffer.
  1363. from_src = src_has_buffer;
  1364. from_dst = dst_has_buffer;
  1365. }
  1366. // Pick the buffer and tracker to use from the right texture.
  1367. RDD::BufferID shared_buffer;
  1368. RDG::ResourceTracker *shared_buffer_tracker = nullptr;
  1369. if (from_src) {
  1370. shared_buffer = p_src_texture->shared_fallback->buffer;
  1371. shared_buffer_tracker = p_src_texture->shared_fallback->buffer_tracker;
  1372. } else if (from_dst) {
  1373. shared_buffer = p_dst_texture->shared_fallback->buffer;
  1374. shared_buffer_tracker = p_dst_texture->shared_fallback->buffer_tracker;
  1375. } else {
  1376. DEV_ASSERT(false && "This path should not be reachable.");
  1377. }
  1378. // FIXME: When using reinterpretation buffers, the only texture aspect supported is color. Depth or stencil contents won't get copied.
  1379. RDD::BufferTextureCopyRegion get_data_region;
  1380. RDG::RecordedBufferToTextureCopy update_copy;
  1381. RDD::TextureCopyableLayout first_copyable_layout;
  1382. RDD::TextureCopyableLayout copyable_layout;
  1383. RDD::TextureSubresource texture_subresource;
  1384. texture_subresource.aspect = RDD::TEXTURE_ASPECT_COLOR;
  1385. texture_subresource.layer = 0;
  1386. texture_subresource.mipmap = 0;
  1387. driver->texture_get_copyable_layout(p_dst_texture->shared_fallback->texture, texture_subresource, &first_copyable_layout);
  1388. // Copying each mipmap from main texture to a buffer and then to the slice texture.
  1389. thread_local LocalVector<RDD::BufferTextureCopyRegion> get_data_vector;
  1390. thread_local LocalVector<RDG::RecordedBufferToTextureCopy> update_vector;
  1391. get_data_vector.clear();
  1392. update_vector.clear();
  1393. for (uint32_t i = 0; i < p_dst_texture->mipmaps; i++) {
  1394. driver->texture_get_copyable_layout(p_dst_texture->shared_fallback->texture, texture_subresource, &copyable_layout);
  1395. uint32_t mipmap = p_dst_texture->base_mipmap + i;
  1396. get_data_region.buffer_offset = copyable_layout.offset - first_copyable_layout.offset;
  1397. get_data_region.texture_subresources.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
  1398. get_data_region.texture_subresources.base_layer = p_dst_texture->base_layer;
  1399. get_data_region.texture_subresources.mipmap = mipmap;
  1400. get_data_region.texture_subresources.layer_count = p_dst_texture->layers;
  1401. get_data_region.texture_region_size.x = MAX(1U, p_src_texture->width >> mipmap);
  1402. get_data_region.texture_region_size.y = MAX(1U, p_src_texture->height >> mipmap);
  1403. get_data_region.texture_region_size.z = MAX(1U, p_src_texture->depth >> mipmap);
  1404. get_data_vector.push_back(get_data_region);
  1405. update_copy.from_buffer = shared_buffer;
  1406. update_copy.region.buffer_offset = get_data_region.buffer_offset;
  1407. update_copy.region.texture_subresources.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
  1408. update_copy.region.texture_subresources.base_layer = texture_subresource.layer;
  1409. update_copy.region.texture_subresources.mipmap = texture_subresource.mipmap;
  1410. update_copy.region.texture_subresources.layer_count = get_data_region.texture_subresources.layer_count;
  1411. update_copy.region.texture_region_size.x = get_data_region.texture_region_size.x;
  1412. update_copy.region.texture_region_size.y = get_data_region.texture_region_size.y;
  1413. update_copy.region.texture_region_size.z = get_data_region.texture_region_size.z;
  1414. update_vector.push_back(update_copy);
  1415. texture_subresource.mipmap++;
  1416. }
  1417. draw_graph.add_texture_get_data(p_src_texture->driver_id, p_src_texture->draw_tracker, shared_buffer, get_data_vector, shared_buffer_tracker);
  1418. draw_graph.add_texture_update(p_dst_texture->shared_fallback->texture, p_dst_texture->shared_fallback->texture_tracker, update_vector, shared_buffer_tracker);
  1419. } else {
  1420. // Raw reinterpretation is not required. Use a regular texture copy.
  1421. RDD::TextureCopyRegion copy_region;
  1422. copy_region.src_subresources.aspect = p_src_texture->read_aspect_flags;
  1423. copy_region.src_subresources.base_layer = p_dst_texture->base_layer;
  1424. copy_region.src_subresources.layer_count = p_dst_texture->layers;
  1425. copy_region.dst_subresources.aspect = p_dst_texture->read_aspect_flags;
  1426. copy_region.dst_subresources.base_layer = 0;
  1427. copy_region.dst_subresources.layer_count = copy_region.src_subresources.layer_count;
  1428. // Copying each mipmap from main texture to to the slice texture.
  1429. thread_local LocalVector<RDD::TextureCopyRegion> region_vector;
  1430. region_vector.clear();
  1431. for (uint32_t i = 0; i < p_dst_texture->mipmaps; i++) {
  1432. uint32_t mipmap = p_dst_texture->base_mipmap + i;
  1433. copy_region.src_subresources.mipmap = mipmap;
  1434. copy_region.dst_subresources.mipmap = i;
  1435. copy_region.size.x = MAX(1U, p_src_texture->width >> mipmap);
  1436. copy_region.size.y = MAX(1U, p_src_texture->height >> mipmap);
  1437. copy_region.size.z = MAX(1U, p_src_texture->depth >> mipmap);
  1438. region_vector.push_back(copy_region);
  1439. }
  1440. draw_graph.add_texture_copy(p_src_texture->driver_id, p_src_texture->draw_tracker, p_dst_texture->shared_fallback->texture, p_dst_texture->shared_fallback->texture_tracker, region_vector);
  1441. }
  1442. }
  1443. void RenderingDevice::_texture_create_reinterpret_buffer(Texture *p_texture) {
  1444. uint64_t row_pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
  1445. uint64_t transfer_alignment = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT);
  1446. uint32_t pixel_bytes = get_image_format_pixel_size(p_texture->format);
  1447. uint32_t row_pitch = STEPIFY(p_texture->width * pixel_bytes, row_pitch_step);
  1448. uint64_t buffer_size = STEPIFY(pixel_bytes * row_pitch * p_texture->height * p_texture->depth, transfer_alignment);
  1449. p_texture->shared_fallback->buffer = driver->buffer_create(buffer_size, RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_GPU);
  1450. buffer_memory += driver->buffer_get_allocation_size(p_texture->shared_fallback->buffer);
  1451. RDG::ResourceTracker *tracker = RDG::resource_tracker_create();
  1452. tracker->buffer_driver_id = p_texture->shared_fallback->buffer;
  1453. p_texture->shared_fallback->buffer_tracker = tracker;
  1454. }
  1455. Vector<uint8_t> RenderingDevice::_texture_get_data(Texture *tex, uint32_t p_layer, bool p_2d) {
  1456. uint32_t width, height, depth;
  1457. uint32_t tight_mip_size = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, tex->mipmaps, &width, &height, &depth);
  1458. Vector<uint8_t> image_data;
  1459. image_data.resize(tight_mip_size);
  1460. uint32_t blockw, blockh;
  1461. get_compressed_image_format_block_dimensions(tex->format, blockw, blockh);
  1462. uint32_t block_size = get_compressed_image_format_block_byte_size(tex->format);
  1463. uint32_t pixel_size = get_image_format_pixel_size(tex->format);
  1464. {
  1465. uint8_t *w = image_data.ptrw();
  1466. uint32_t mipmap_offset = 0;
  1467. for (uint32_t mm_i = 0; mm_i < tex->mipmaps; mm_i++) {
  1468. uint32_t image_total = get_image_format_required_size(tex->format, tex->width, tex->height, p_2d ? 1 : tex->depth, mm_i + 1, &width, &height, &depth);
  1469. uint8_t *write_ptr_mipmap = w + mipmap_offset;
  1470. tight_mip_size = image_total - mipmap_offset;
  1471. RDD::TextureSubresource subres;
  1472. subres.aspect = RDD::TEXTURE_ASPECT_COLOR;
  1473. subres.layer = p_layer;
  1474. subres.mipmap = mm_i;
  1475. RDD::TextureCopyableLayout layout;
  1476. driver->texture_get_copyable_layout(tex->driver_id, subres, &layout);
  1477. uint8_t *img_mem = driver->texture_map(tex->driver_id, subres);
  1478. ERR_FAIL_NULL_V(img_mem, Vector<uint8_t>());
  1479. for (uint32_t z = 0; z < depth; z++) {
  1480. uint8_t *write_ptr = write_ptr_mipmap + z * tight_mip_size / depth;
  1481. const uint8_t *slice_read_ptr = img_mem + z * layout.depth_pitch;
  1482. if (block_size > 1) {
  1483. // Compressed.
  1484. uint32_t line_width = (block_size * (width / blockw));
  1485. for (uint32_t y = 0; y < height / blockh; y++) {
  1486. const uint8_t *rptr = slice_read_ptr + y * layout.row_pitch;
  1487. uint8_t *wptr = write_ptr + y * line_width;
  1488. memcpy(wptr, rptr, line_width);
  1489. }
  1490. } else {
  1491. // Uncompressed.
  1492. for (uint32_t y = 0; y < height; y++) {
  1493. const uint8_t *rptr = slice_read_ptr + y * layout.row_pitch;
  1494. uint8_t *wptr = write_ptr + y * pixel_size * width;
  1495. memcpy(wptr, rptr, (uint64_t)pixel_size * width);
  1496. }
  1497. }
  1498. }
  1499. driver->texture_unmap(tex->driver_id);
  1500. mipmap_offset = image_total;
  1501. }
  1502. }
  1503. return image_data;
  1504. }
  1505. Vector<uint8_t> RenderingDevice::texture_get_data(RID p_texture, uint32_t p_layer) {
  1506. ERR_RENDER_THREAD_GUARD_V(Vector<uint8_t>());
  1507. Texture *tex = texture_owner.get_or_null(p_texture);
  1508. ERR_FAIL_NULL_V(tex, Vector<uint8_t>());
  1509. ERR_FAIL_COND_V_MSG(tex->bound, Vector<uint8_t>(),
  1510. "Texture can't be retrieved while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to retrieve this texture.");
  1511. ERR_FAIL_COND_V_MSG(!(tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), Vector<uint8_t>(),
  1512. "Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
  1513. ERR_FAIL_COND_V(p_layer >= tex->layers, Vector<uint8_t>());
  1514. _check_transfer_worker_texture(tex);
  1515. if (tex->usage_flags & TEXTURE_USAGE_CPU_READ_BIT) {
  1516. // Does not need anything fancy, map and read.
  1517. return _texture_get_data(tex, p_layer);
  1518. } else {
  1519. LocalVector<RDD::TextureCopyableLayout> mip_layouts;
  1520. uint32_t work_mip_alignment = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT);
  1521. uint32_t work_buffer_size = 0;
  1522. mip_layouts.resize(tex->mipmaps);
  1523. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  1524. RDD::TextureSubresource subres;
  1525. subres.aspect = RDD::TEXTURE_ASPECT_COLOR;
  1526. subres.layer = p_layer;
  1527. subres.mipmap = i;
  1528. driver->texture_get_copyable_layout(tex->driver_id, subres, &mip_layouts[i]);
  1529. // Assuming layers are tightly packed. If this is not true on some driver, we must modify the copy algorithm.
  1530. DEV_ASSERT(mip_layouts[i].layer_pitch == mip_layouts[i].size / tex->layers);
  1531. work_buffer_size = STEPIFY(work_buffer_size, work_mip_alignment) + mip_layouts[i].size;
  1532. }
  1533. RDD::BufferID tmp_buffer = driver->buffer_create(work_buffer_size, RDD::BUFFER_USAGE_TRANSFER_TO_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
  1534. ERR_FAIL_COND_V(!tmp_buffer, Vector<uint8_t>());
  1535. thread_local LocalVector<RDD::BufferTextureCopyRegion> command_buffer_texture_copy_regions_vector;
  1536. command_buffer_texture_copy_regions_vector.clear();
  1537. uint32_t w = tex->width;
  1538. uint32_t h = tex->height;
  1539. uint32_t d = tex->depth;
  1540. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  1541. RDD::BufferTextureCopyRegion copy_region;
  1542. copy_region.buffer_offset = mip_layouts[i].offset;
  1543. copy_region.texture_subresources.aspect = tex->read_aspect_flags;
  1544. copy_region.texture_subresources.mipmap = i;
  1545. copy_region.texture_subresources.base_layer = p_layer;
  1546. copy_region.texture_subresources.layer_count = 1;
  1547. copy_region.texture_region_size.x = w;
  1548. copy_region.texture_region_size.y = h;
  1549. copy_region.texture_region_size.z = d;
  1550. command_buffer_texture_copy_regions_vector.push_back(copy_region);
  1551. w = MAX(1u, w >> 1);
  1552. h = MAX(1u, h >> 1);
  1553. d = MAX(1u, d >> 1);
  1554. }
  1555. if (_texture_make_mutable(tex, p_texture)) {
  1556. // The texture must be mutable to be used as a copy source due to layout transitions.
  1557. draw_graph.add_synchronization();
  1558. }
  1559. draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, tmp_buffer, command_buffer_texture_copy_regions_vector);
  1560. // Flush everything so memory can be safely mapped.
  1561. _flush_and_stall_for_all_frames();
  1562. const uint8_t *read_ptr = driver->buffer_map(tmp_buffer);
  1563. ERR_FAIL_NULL_V(read_ptr, Vector<uint8_t>());
  1564. uint32_t block_w = 0;
  1565. uint32_t block_h = 0;
  1566. get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
  1567. Vector<uint8_t> buffer_data;
  1568. uint32_t tight_buffer_size = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, tex->mipmaps);
  1569. buffer_data.resize(tight_buffer_size);
  1570. uint8_t *write_ptr = buffer_data.ptrw();
  1571. w = tex->width;
  1572. h = tex->height;
  1573. d = tex->depth;
  1574. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  1575. uint32_t width = 0, height = 0, depth = 0;
  1576. uint32_t tight_mip_size = get_image_format_required_size(tex->format, w, h, d, 1, &width, &height, &depth);
  1577. uint32_t tight_row_pitch = tight_mip_size / ((height / block_h) * depth);
  1578. // Copy row-by-row to erase padding due to alignments.
  1579. const uint8_t *rp = read_ptr;
  1580. uint8_t *wp = write_ptr;
  1581. for (uint32_t row = h * d / block_h; row != 0; row--) {
  1582. memcpy(wp, rp, tight_row_pitch);
  1583. rp += mip_layouts[i].row_pitch;
  1584. wp += tight_row_pitch;
  1585. }
  1586. w = MAX(block_w, w >> 1);
  1587. h = MAX(block_h, h >> 1);
  1588. d = MAX(1u, d >> 1);
  1589. read_ptr += mip_layouts[i].size;
  1590. write_ptr += tight_mip_size;
  1591. }
  1592. driver->buffer_unmap(tmp_buffer);
  1593. driver->buffer_free(tmp_buffer);
  1594. return buffer_data;
  1595. }
  1596. }
  1597. Error RenderingDevice::texture_get_data_async(RID p_texture, uint32_t p_layer, const Callable &p_callback) {
  1598. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  1599. Texture *tex = texture_owner.get_or_null(p_texture);
  1600. ERR_FAIL_NULL_V(tex, ERR_INVALID_PARAMETER);
  1601. ERR_FAIL_COND_V_MSG(tex->bound, ERR_INVALID_PARAMETER, "Texture can't be retrieved while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to retrieve this texture.");
  1602. ERR_FAIL_COND_V_MSG(!(tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER, "Texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
  1603. ERR_FAIL_COND_V(p_layer >= tex->layers, ERR_INVALID_PARAMETER);
  1604. _check_transfer_worker_texture(tex);
  1605. thread_local LocalVector<RDD::TextureCopyableLayout> mip_layouts;
  1606. mip_layouts.resize(tex->mipmaps);
  1607. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  1608. RDD::TextureSubresource subres;
  1609. subres.aspect = RDD::TEXTURE_ASPECT_COLOR;
  1610. subres.layer = p_layer;
  1611. subres.mipmap = i;
  1612. driver->texture_get_copyable_layout(tex->driver_id, subres, &mip_layouts[i]);
  1613. // Assuming layers are tightly packed. If this is not true on some driver, we must modify the copy algorithm.
  1614. DEV_ASSERT(mip_layouts[i].layer_pitch == mip_layouts[i].size / tex->layers);
  1615. }
  1616. ERR_FAIL_COND_V(mip_layouts.is_empty(), ERR_INVALID_PARAMETER);
  1617. if (_texture_make_mutable(tex, p_texture)) {
  1618. // The texture must be mutable to be used as a copy source due to layout transitions.
  1619. draw_graph.add_synchronization();
  1620. }
  1621. TextureGetDataRequest get_data_request;
  1622. get_data_request.callback = p_callback;
  1623. get_data_request.frame_local_index = frames[frame].download_buffer_texture_copy_regions.size();
  1624. get_data_request.width = tex->width;
  1625. get_data_request.height = tex->height;
  1626. get_data_request.depth = tex->depth;
  1627. get_data_request.format = tex->format;
  1628. get_data_request.mipmaps = tex->mipmaps;
  1629. uint32_t block_w, block_h;
  1630. get_compressed_image_format_block_dimensions(tex->format, block_w, block_h);
  1631. uint32_t pixel_size = get_image_format_pixel_size(tex->format);
  1632. uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(tex->format);
  1633. uint32_t w, h, d;
  1634. uint32_t required_align = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_TRANSFER_ALIGNMENT);
  1635. uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
  1636. uint32_t region_size = texture_download_region_size_px;
  1637. uint32_t logic_w = tex->width;
  1638. uint32_t logic_h = tex->height;
  1639. uint32_t mipmap_offset = 0;
  1640. uint32_t block_write_offset;
  1641. uint32_t block_write_amount;
  1642. StagingRequiredAction required_action;
  1643. uint32_t flushed_copies = 0;
  1644. for (uint32_t i = 0; i < tex->mipmaps; i++) {
  1645. uint32_t image_total = get_image_format_required_size(tex->format, tex->width, tex->height, tex->depth, i + 1, &w, &h, &d);
  1646. uint32_t tight_mip_size = image_total - mipmap_offset;
  1647. for (uint32_t z = 0; z < d; z++) {
  1648. for (uint32_t y = 0; y < h; y += region_size) {
  1649. for (uint32_t x = 0; x < w; x += region_size) {
  1650. uint32_t region_w = MIN(region_size, w - x);
  1651. uint32_t region_h = MIN(region_size, h - y);
  1652. ERR_FAIL_COND_V(region_w % block_w, ERR_BUG);
  1653. ERR_FAIL_COND_V(region_h % block_h, ERR_BUG);
  1654. uint32_t region_logic_w = MIN(region_size, logic_w - x);
  1655. uint32_t region_logic_h = MIN(region_size, logic_h - y);
  1656. uint32_t region_pitch = (region_w * pixel_size * block_w) >> pixel_rshift;
  1657. region_pitch = STEPIFY(region_pitch, pitch_step);
  1658. uint32_t to_allocate = region_pitch * region_h;
  1659. Error err = _staging_buffer_allocate(download_staging_buffers, to_allocate, required_align, block_write_offset, block_write_amount, required_action, false);
  1660. ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
  1661. if ((get_data_request.frame_local_count > 0) && required_action == STAGING_REQUIRED_ACTION_FLUSH_AND_STALL_ALL) {
  1662. for (uint32_t j = flushed_copies; j < get_data_request.frame_local_count; j++) {
  1663. uint32_t local_index = get_data_request.frame_local_index + j;
  1664. draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, frames[frame].download_texture_staging_buffers[local_index], frames[frame].download_buffer_texture_copy_regions[local_index]);
  1665. }
  1666. flushed_copies = get_data_request.frame_local_count;
  1667. }
  1668. _staging_buffer_execute_required_action(download_staging_buffers, required_action);
  1669. RDD::BufferTextureCopyRegion copy_region;
  1670. copy_region.buffer_offset = block_write_offset;
  1671. copy_region.texture_subresources.aspect = tex->read_aspect_flags;
  1672. copy_region.texture_subresources.mipmap = i;
  1673. copy_region.texture_subresources.base_layer = p_layer;
  1674. copy_region.texture_subresources.layer_count = 1;
  1675. copy_region.texture_offset = Vector3i(x, y, z);
  1676. copy_region.texture_region_size = Vector3i(region_logic_w, region_logic_h, 1);
  1677. frames[frame].download_texture_staging_buffers.push_back(download_staging_buffers.blocks[download_staging_buffers.current].driver_id);
  1678. frames[frame].download_buffer_texture_copy_regions.push_back(copy_region);
  1679. frames[frame].download_texture_mipmap_offsets.push_back(mipmap_offset + (tight_mip_size / d) * z);
  1680. get_data_request.frame_local_count++;
  1681. download_staging_buffers.blocks.write[download_staging_buffers.current].fill_amount = block_write_offset + block_write_amount;
  1682. }
  1683. }
  1684. }
  1685. mipmap_offset = image_total;
  1686. logic_w = MAX(1u, logic_w >> 1);
  1687. logic_h = MAX(1u, logic_h >> 1);
  1688. }
  1689. if (get_data_request.frame_local_count > 0) {
  1690. for (uint32_t i = flushed_copies; i < get_data_request.frame_local_count; i++) {
  1691. uint32_t local_index = get_data_request.frame_local_index + i;
  1692. draw_graph.add_texture_get_data(tex->driver_id, tex->draw_tracker, frames[frame].download_texture_staging_buffers[local_index], frames[frame].download_buffer_texture_copy_regions[local_index]);
  1693. }
  1694. flushed_copies = get_data_request.frame_local_count;
  1695. frames[frame].download_texture_get_data_requests.push_back(get_data_request);
  1696. }
  1697. return OK;
  1698. }
  1699. bool RenderingDevice::texture_is_shared(RID p_texture) {
  1700. ERR_RENDER_THREAD_GUARD_V(false);
  1701. Texture *tex = texture_owner.get_or_null(p_texture);
  1702. ERR_FAIL_NULL_V(tex, false);
  1703. return tex->owner.is_valid();
  1704. }
  1705. bool RenderingDevice::texture_is_valid(RID p_texture) {
  1706. ERR_RENDER_THREAD_GUARD_V(false);
  1707. return texture_owner.owns(p_texture);
  1708. }
  1709. RD::TextureFormat RenderingDevice::texture_get_format(RID p_texture) {
  1710. ERR_RENDER_THREAD_GUARD_V(TextureFormat());
  1711. Texture *tex = texture_owner.get_or_null(p_texture);
  1712. ERR_FAIL_NULL_V(tex, TextureFormat());
  1713. TextureFormat tf;
  1714. tf.format = tex->format;
  1715. tf.width = tex->width;
  1716. tf.height = tex->height;
  1717. tf.depth = tex->depth;
  1718. tf.array_layers = tex->layers;
  1719. tf.mipmaps = tex->mipmaps;
  1720. tf.texture_type = tex->type;
  1721. tf.samples = tex->samples;
  1722. tf.usage_bits = tex->usage_flags;
  1723. tf.shareable_formats = tex->allowed_shared_formats;
  1724. tf.is_resolve_buffer = tex->is_resolve_buffer;
  1725. tf.is_discardable = tex->is_discardable;
  1726. return tf;
  1727. }
  1728. Size2i RenderingDevice::texture_size(RID p_texture) {
  1729. ERR_RENDER_THREAD_GUARD_V(Size2i());
  1730. Texture *tex = texture_owner.get_or_null(p_texture);
  1731. ERR_FAIL_NULL_V(tex, Size2i());
  1732. return Size2i(tex->width, tex->height);
  1733. }
  1734. #ifndef DISABLE_DEPRECATED
  1735. uint64_t RenderingDevice::texture_get_native_handle(RID p_texture) {
  1736. return get_driver_resource(DRIVER_RESOURCE_TEXTURE, p_texture);
  1737. }
  1738. #endif
  1739. Error RenderingDevice::texture_copy(RID p_from_texture, RID p_to_texture, const Vector3 &p_from, const Vector3 &p_to, const Vector3 &p_size, uint32_t p_src_mipmap, uint32_t p_dst_mipmap, uint32_t p_src_layer, uint32_t p_dst_layer) {
  1740. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  1741. Texture *src_tex = texture_owner.get_or_null(p_from_texture);
  1742. ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
  1743. ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
  1744. "Source texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
  1745. ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
  1746. "Source texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
  1747. uint32_t src_width, src_height, src_depth;
  1748. get_image_format_required_size(src_tex->format, src_tex->width, src_tex->height, src_tex->depth, p_src_mipmap + 1, &src_width, &src_height, &src_depth);
  1749. ERR_FAIL_COND_V(p_from.x < 0 || p_from.x + p_size.x > src_width, ERR_INVALID_PARAMETER);
  1750. ERR_FAIL_COND_V(p_from.y < 0 || p_from.y + p_size.y > src_height, ERR_INVALID_PARAMETER);
  1751. ERR_FAIL_COND_V(p_from.z < 0 || p_from.z + p_size.z > src_depth, ERR_INVALID_PARAMETER);
  1752. ERR_FAIL_COND_V(p_src_mipmap >= src_tex->mipmaps, ERR_INVALID_PARAMETER);
  1753. ERR_FAIL_COND_V(p_src_layer >= src_tex->layers, ERR_INVALID_PARAMETER);
  1754. Texture *dst_tex = texture_owner.get_or_null(p_to_texture);
  1755. ERR_FAIL_NULL_V(dst_tex, ERR_INVALID_PARAMETER);
  1756. ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
  1757. "Destination texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
  1758. ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
  1759. "Destination texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_TO_BIT` to be set to be retrieved.");
  1760. uint32_t dst_width, dst_height, dst_depth;
  1761. get_image_format_required_size(dst_tex->format, dst_tex->width, dst_tex->height, dst_tex->depth, p_dst_mipmap + 1, &dst_width, &dst_height, &dst_depth);
  1762. ERR_FAIL_COND_V(p_to.x < 0 || p_to.x + p_size.x > dst_width, ERR_INVALID_PARAMETER);
  1763. ERR_FAIL_COND_V(p_to.y < 0 || p_to.y + p_size.y > dst_height, ERR_INVALID_PARAMETER);
  1764. ERR_FAIL_COND_V(p_to.z < 0 || p_to.z + p_size.z > dst_depth, ERR_INVALID_PARAMETER);
  1765. ERR_FAIL_COND_V(p_dst_mipmap >= dst_tex->mipmaps, ERR_INVALID_PARAMETER);
  1766. ERR_FAIL_COND_V(p_dst_layer >= dst_tex->layers, ERR_INVALID_PARAMETER);
  1767. ERR_FAIL_COND_V_MSG(src_tex->read_aspect_flags != dst_tex->read_aspect_flags, ERR_INVALID_PARAMETER,
  1768. "Source and destination texture must be of the same type (color or depth).");
  1769. _check_transfer_worker_texture(src_tex);
  1770. _check_transfer_worker_texture(dst_tex);
  1771. RDD::TextureCopyRegion copy_region;
  1772. copy_region.src_subresources.aspect = src_tex->read_aspect_flags;
  1773. copy_region.src_subresources.mipmap = p_src_mipmap;
  1774. copy_region.src_subresources.base_layer = p_src_layer;
  1775. copy_region.src_subresources.layer_count = 1;
  1776. copy_region.src_offset = p_from;
  1777. copy_region.dst_subresources.aspect = dst_tex->read_aspect_flags;
  1778. copy_region.dst_subresources.mipmap = p_dst_mipmap;
  1779. copy_region.dst_subresources.base_layer = p_dst_layer;
  1780. copy_region.dst_subresources.layer_count = 1;
  1781. copy_region.dst_offset = p_to;
  1782. copy_region.size = p_size;
  1783. // Indicate the texture will get modified for the shared texture fallback.
  1784. _texture_update_shared_fallback(p_to_texture, dst_tex, true);
  1785. // The textures must be mutable to be used in the copy operation.
  1786. bool src_made_mutable = _texture_make_mutable(src_tex, p_from_texture);
  1787. bool dst_made_mutable = _texture_make_mutable(dst_tex, p_to_texture);
  1788. if (src_made_mutable || dst_made_mutable) {
  1789. draw_graph.add_synchronization();
  1790. }
  1791. draw_graph.add_texture_copy(src_tex->driver_id, src_tex->draw_tracker, dst_tex->driver_id, dst_tex->draw_tracker, copy_region);
  1792. return OK;
  1793. }
  1794. Error RenderingDevice::texture_resolve_multisample(RID p_from_texture, RID p_to_texture) {
  1795. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  1796. Texture *src_tex = texture_owner.get_or_null(p_from_texture);
  1797. ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
  1798. ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
  1799. "Source texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
  1800. ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_FROM_BIT), ERR_INVALID_PARAMETER,
  1801. "Source texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_FROM_BIT` to be set to be retrieved.");
  1802. ERR_FAIL_COND_V_MSG(src_tex->type != TEXTURE_TYPE_2D, ERR_INVALID_PARAMETER, "Source texture must be 2D (or a slice of a 3D/Cube texture)");
  1803. ERR_FAIL_COND_V_MSG(src_tex->samples == TEXTURE_SAMPLES_1, ERR_INVALID_PARAMETER, "Source texture must be multisampled.");
  1804. Texture *dst_tex = texture_owner.get_or_null(p_to_texture);
  1805. ERR_FAIL_NULL_V(dst_tex, ERR_INVALID_PARAMETER);
  1806. ERR_FAIL_COND_V_MSG(dst_tex->bound, ERR_INVALID_PARAMETER,
  1807. "Destination texture can't be copied while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to copy this texture.");
  1808. ERR_FAIL_COND_V_MSG(!(dst_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
  1809. "Destination texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_TO_BIT` to be set to be retrieved.");
  1810. ERR_FAIL_COND_V_MSG(dst_tex->type != TEXTURE_TYPE_2D, ERR_INVALID_PARAMETER, "Destination texture must be 2D (or a slice of a 3D/Cube texture).");
  1811. ERR_FAIL_COND_V_MSG(dst_tex->samples != TEXTURE_SAMPLES_1, ERR_INVALID_PARAMETER, "Destination texture must not be multisampled.");
  1812. ERR_FAIL_COND_V_MSG(src_tex->format != dst_tex->format, ERR_INVALID_PARAMETER, "Source and Destination textures must be the same format.");
  1813. ERR_FAIL_COND_V_MSG(src_tex->width != dst_tex->width && src_tex->height != dst_tex->height && src_tex->depth != dst_tex->depth, ERR_INVALID_PARAMETER, "Source and Destination textures must have the same dimensions.");
  1814. ERR_FAIL_COND_V_MSG(src_tex->read_aspect_flags != dst_tex->read_aspect_flags, ERR_INVALID_PARAMETER,
  1815. "Source and destination texture must be of the same type (color or depth).");
  1816. // Indicate the texture will get modified for the shared texture fallback.
  1817. _texture_update_shared_fallback(p_to_texture, dst_tex, true);
  1818. _check_transfer_worker_texture(src_tex);
  1819. _check_transfer_worker_texture(dst_tex);
  1820. // The textures must be mutable to be used in the resolve operation.
  1821. bool src_made_mutable = _texture_make_mutable(src_tex, p_from_texture);
  1822. bool dst_made_mutable = _texture_make_mutable(dst_tex, p_to_texture);
  1823. if (src_made_mutable || dst_made_mutable) {
  1824. draw_graph.add_synchronization();
  1825. }
  1826. draw_graph.add_texture_resolve(src_tex->driver_id, src_tex->draw_tracker, dst_tex->driver_id, dst_tex->draw_tracker, src_tex->base_layer, src_tex->base_mipmap, dst_tex->base_layer, dst_tex->base_mipmap);
  1827. return OK;
  1828. }
  1829. void RenderingDevice::texture_set_discardable(RID p_texture, bool p_discardable) {
  1830. ERR_RENDER_THREAD_GUARD();
  1831. Texture *texture = texture_owner.get_or_null(p_texture);
  1832. ERR_FAIL_NULL(texture);
  1833. texture->is_discardable = p_discardable;
  1834. if (texture->draw_tracker != nullptr) {
  1835. texture->draw_tracker->is_discardable = p_discardable;
  1836. }
  1837. if (texture->shared_fallback != nullptr && texture->shared_fallback->texture_tracker != nullptr) {
  1838. texture->shared_fallback->texture_tracker->is_discardable = p_discardable;
  1839. }
  1840. }
  1841. bool RenderingDevice::texture_is_discardable(RID p_texture) {
  1842. ERR_RENDER_THREAD_GUARD_V(false);
  1843. Texture *texture = texture_owner.get_or_null(p_texture);
  1844. ERR_FAIL_NULL_V(texture, false);
  1845. return texture->is_discardable;
  1846. }
  1847. Error RenderingDevice::texture_clear(RID p_texture, const Color &p_color, uint32_t p_base_mipmap, uint32_t p_mipmaps, uint32_t p_base_layer, uint32_t p_layers) {
  1848. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  1849. Texture *src_tex = texture_owner.get_or_null(p_texture);
  1850. ERR_FAIL_NULL_V(src_tex, ERR_INVALID_PARAMETER);
  1851. ERR_FAIL_COND_V_MSG(src_tex->bound, ERR_INVALID_PARAMETER,
  1852. "Source texture can't be cleared while a draw list that uses it as part of a framebuffer is being created. Ensure the draw list is finalized (and that the color/depth texture using it is not set to `RenderingDevice.FINAL_ACTION_CONTINUE`) to clear this texture.");
  1853. ERR_FAIL_COND_V(p_layers == 0, ERR_INVALID_PARAMETER);
  1854. ERR_FAIL_COND_V(p_mipmaps == 0, ERR_INVALID_PARAMETER);
  1855. ERR_FAIL_COND_V_MSG(!(src_tex->usage_flags & TEXTURE_USAGE_CAN_COPY_TO_BIT), ERR_INVALID_PARAMETER,
  1856. "Source texture requires the `RenderingDevice.TEXTURE_USAGE_CAN_COPY_TO_BIT` to be set to be cleared.");
  1857. ERR_FAIL_COND_V(p_base_mipmap + p_mipmaps > src_tex->mipmaps, ERR_INVALID_PARAMETER);
  1858. ERR_FAIL_COND_V(p_base_layer + p_layers > src_tex->layers, ERR_INVALID_PARAMETER);
  1859. _check_transfer_worker_texture(src_tex);
  1860. RDD::TextureSubresourceRange range;
  1861. range.aspect = src_tex->read_aspect_flags;
  1862. range.base_mipmap = src_tex->base_mipmap + p_base_mipmap;
  1863. range.mipmap_count = p_mipmaps;
  1864. range.base_layer = src_tex->base_layer + p_base_layer;
  1865. range.layer_count = p_layers;
  1866. // Indicate the texture will get modified for the shared texture fallback.
  1867. _texture_update_shared_fallback(p_texture, src_tex, true);
  1868. if (_texture_make_mutable(src_tex, p_texture)) {
  1869. // The texture must be mutable to be used as a clear destination.
  1870. draw_graph.add_synchronization();
  1871. }
  1872. draw_graph.add_texture_clear(src_tex->driver_id, src_tex->draw_tracker, p_color, range);
  1873. return OK;
  1874. }
  1875. bool RenderingDevice::texture_is_format_supported_for_usage(DataFormat p_format, BitField<RenderingDevice::TextureUsageBits> p_usage) const {
  1876. ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
  1877. bool cpu_readable = (p_usage & RDD::TEXTURE_USAGE_CPU_READ_BIT);
  1878. BitField<TextureUsageBits> supported = driver->texture_get_usages_supported_by_format(p_format, cpu_readable);
  1879. bool any_unsupported = (((int64_t)supported) | ((int64_t)p_usage)) != ((int64_t)supported);
  1880. return !any_unsupported;
  1881. }
  1882. /*********************/
  1883. /**** FRAMEBUFFER ****/
  1884. /*********************/
  1885. RDD::RenderPassID RenderingDevice::_render_pass_create(RenderingDeviceDriver *p_driver, const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, uint32_t p_view_count, Vector<TextureSamples> *r_samples) {
  1886. // NOTE:
  1887. // Before the refactor to RenderingDevice-RenderingDeviceDriver, there was commented out code to
  1888. // specify dependencies to external subpasses. Since it had been unused for a long timel it wasn't ported
  1889. // to the new architecture.
  1890. LocalVector<int32_t> attachment_last_pass;
  1891. attachment_last_pass.resize(p_attachments.size());
  1892. if (p_view_count > 1) {
  1893. const RDD::MultiviewCapabilities &capabilities = p_driver->get_multiview_capabilities();
  1894. // This only works with multiview!
  1895. ERR_FAIL_COND_V_MSG(!capabilities.is_supported, RDD::RenderPassID(), "Multiview not supported");
  1896. // Make sure we limit this to the number of views we support.
  1897. ERR_FAIL_COND_V_MSG(p_view_count > capabilities.max_view_count, RDD::RenderPassID(), "Hardware does not support requested number of views for Multiview render pass");
  1898. }
  1899. LocalVector<RDD::Attachment> attachments;
  1900. LocalVector<int> attachment_remap;
  1901. for (int i = 0; i < p_attachments.size(); i++) {
  1902. if (p_attachments[i].usage_flags == AttachmentFormat::UNUSED_ATTACHMENT) {
  1903. attachment_remap.push_back(RDD::AttachmentReference::UNUSED);
  1904. continue;
  1905. }
  1906. ERR_FAIL_INDEX_V(p_attachments[i].format, DATA_FORMAT_MAX, RDD::RenderPassID());
  1907. ERR_FAIL_INDEX_V(p_attachments[i].samples, TEXTURE_SAMPLES_MAX, RDD::RenderPassID());
  1908. ERR_FAIL_COND_V_MSG(!(p_attachments[i].usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT | TEXTURE_USAGE_VRS_ATTACHMENT_BIT)),
  1909. RDD::RenderPassID(), "Texture format for index (" + itos(i) + ") requires an attachment (color, depth-stencil, input or VRS) bit set.");
  1910. RDD::Attachment description;
  1911. description.format = p_attachments[i].format;
  1912. description.samples = p_attachments[i].samples;
  1913. // We can setup a framebuffer where we write to our VRS texture to set it up.
  1914. // We make the assumption here that if our texture is actually used as our VRS attachment.
  1915. // It is used as such for each subpass. This is fairly certain seeing the restrictions on subpasses.
  1916. bool is_vrs = (p_attachments[i].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) && i == p_passes[0].vrs_attachment;
  1917. if (is_vrs) {
  1918. description.load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
  1919. description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
  1920. description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_LOAD;
  1921. description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
  1922. description.initial_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
  1923. description.final_layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
  1924. } else {
  1925. if (p_attachments[i].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
  1926. description.load_op = p_load_ops[i];
  1927. description.store_op = p_store_ops[i];
  1928. description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  1929. description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
  1930. description.initial_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
  1931. description.final_layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
  1932. } else if (p_attachments[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  1933. description.load_op = p_load_ops[i];
  1934. description.store_op = p_store_ops[i];
  1935. description.stencil_load_op = p_load_ops[i];
  1936. description.stencil_store_op = p_store_ops[i];
  1937. description.initial_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
  1938. description.final_layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
  1939. } else {
  1940. description.load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  1941. description.store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
  1942. description.stencil_load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  1943. description.stencil_store_op = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
  1944. description.initial_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  1945. description.final_layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  1946. }
  1947. }
  1948. attachment_last_pass[i] = -1;
  1949. attachment_remap.push_back(attachments.size());
  1950. attachments.push_back(description);
  1951. }
  1952. LocalVector<RDD::Subpass> subpasses;
  1953. subpasses.resize(p_passes.size());
  1954. LocalVector<RDD::SubpassDependency> subpass_dependencies;
  1955. for (int i = 0; i < p_passes.size(); i++) {
  1956. const FramebufferPass *pass = &p_passes[i];
  1957. RDD::Subpass &subpass = subpasses[i];
  1958. TextureSamples texture_samples = TEXTURE_SAMPLES_1;
  1959. bool is_multisample_first = true;
  1960. for (int j = 0; j < pass->color_attachments.size(); j++) {
  1961. int32_t attachment = pass->color_attachments[j];
  1962. RDD::AttachmentReference reference;
  1963. if (attachment == ATTACHMENT_UNUSED) {
  1964. reference.attachment = RDD::AttachmentReference::UNUSED;
  1965. reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  1966. } else {
  1967. ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), color attachment (" + itos(j) + ").");
  1968. ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as depth, but it's not usable as color attachment.");
  1969. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  1970. if (is_multisample_first) {
  1971. texture_samples = p_attachments[attachment].samples;
  1972. is_multisample_first = false;
  1973. } else {
  1974. ERR_FAIL_COND_V_MSG(texture_samples != p_attachments[attachment].samples, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), if an attachment is marked as multisample, all of them should be multisample and use the same number of samples.");
  1975. }
  1976. reference.attachment = attachment_remap[attachment];
  1977. reference.layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
  1978. attachment_last_pass[attachment] = i;
  1979. }
  1980. reference.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
  1981. subpass.color_references.push_back(reference);
  1982. }
  1983. for (int j = 0; j < pass->input_attachments.size(); j++) {
  1984. int32_t attachment = pass->input_attachments[j];
  1985. RDD::AttachmentReference reference;
  1986. if (attachment == ATTACHMENT_UNUSED) {
  1987. reference.attachment = RDD::AttachmentReference::UNUSED;
  1988. reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  1989. } else {
  1990. ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), input attachment (" + itos(j) + ").");
  1991. ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_INPUT_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it isn't marked as an input texture.");
  1992. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  1993. reference.attachment = attachment_remap[attachment];
  1994. reference.layout = RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
  1995. attachment_last_pass[attachment] = i;
  1996. }
  1997. reference.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
  1998. subpass.input_references.push_back(reference);
  1999. }
  2000. if (pass->resolve_attachments.size() > 0) {
  2001. ERR_FAIL_COND_V_MSG(pass->resolve_attachments.size() != pass->color_attachments.size(), RDD::RenderPassID(), "The amount of resolve attachments (" + itos(pass->resolve_attachments.size()) + ") must match the number of color attachments (" + itos(pass->color_attachments.size()) + ").");
  2002. ERR_FAIL_COND_V_MSG(texture_samples == TEXTURE_SAMPLES_1, RDD::RenderPassID(), "Resolve attachments specified, but color attachments are not multisample.");
  2003. }
  2004. for (int j = 0; j < pass->resolve_attachments.size(); j++) {
  2005. int32_t attachment = pass->resolve_attachments[j];
  2006. attachments[attachment].load_op = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  2007. RDD::AttachmentReference reference;
  2008. if (attachment == ATTACHMENT_UNUSED) {
  2009. reference.attachment = RDD::AttachmentReference::UNUSED;
  2010. reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  2011. } else {
  2012. ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment (" + itos(j) + ").");
  2013. ERR_FAIL_COND_V_MSG(pass->color_attachments[j] == ATTACHMENT_UNUSED, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment (" + itos(j) + "), the respective color attachment is marked as unused.");
  2014. ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachment, it isn't marked as a color texture.");
  2015. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  2016. bool multisample = p_attachments[attachment].samples > TEXTURE_SAMPLES_1;
  2017. ERR_FAIL_COND_V_MSG(multisample, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), resolve attachments can't be multisample.");
  2018. reference.attachment = attachment_remap[attachment];
  2019. reference.layout = RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
  2020. attachment_last_pass[attachment] = i;
  2021. }
  2022. reference.aspect = RDD::TEXTURE_ASPECT_COLOR_BIT;
  2023. subpass.resolve_references.push_back(reference);
  2024. }
  2025. if (pass->depth_attachment != ATTACHMENT_UNUSED) {
  2026. int32_t attachment = pass->depth_attachment;
  2027. ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), depth attachment.");
  2028. ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as depth, but it's not a depth attachment.");
  2029. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  2030. subpass.depth_stencil_reference.attachment = attachment_remap[attachment];
  2031. subpass.depth_stencil_reference.layout = RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
  2032. attachment_last_pass[attachment] = i;
  2033. if (is_multisample_first) {
  2034. texture_samples = p_attachments[attachment].samples;
  2035. is_multisample_first = false;
  2036. } else {
  2037. ERR_FAIL_COND_V_MSG(texture_samples != p_attachments[attachment].samples, RDD::RenderPassID(), "Invalid framebuffer depth format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), if an attachment is marked as multisample, all of them should be multisample and use the same number of samples including the depth.");
  2038. }
  2039. } else {
  2040. subpass.depth_stencil_reference.attachment = RDD::AttachmentReference::UNUSED;
  2041. subpass.depth_stencil_reference.layout = RDD::TEXTURE_LAYOUT_UNDEFINED;
  2042. }
  2043. if (pass->vrs_attachment != ATTACHMENT_UNUSED) {
  2044. int32_t attachment = pass->vrs_attachment;
  2045. ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer VRS format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), VRS attachment.");
  2046. ERR_FAIL_COND_V_MSG(!(p_attachments[attachment].usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT), RDD::RenderPassID(), "Invalid framebuffer VRS format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it's marked as VRS, but it's not a VRS attachment.");
  2047. ERR_FAIL_COND_V_MSG(attachment_last_pass[attachment] == i, RDD::RenderPassID(), "Invalid framebuffer VRS attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), it already was used for something else before in this pass.");
  2048. subpass.vrs_reference.attachment = attachment_remap[attachment];
  2049. subpass.vrs_reference.layout = RDD::TEXTURE_LAYOUT_VRS_ATTACHMENT_OPTIMAL;
  2050. attachment_last_pass[attachment] = i;
  2051. }
  2052. for (int j = 0; j < pass->preserve_attachments.size(); j++) {
  2053. int32_t attachment = pass->preserve_attachments[j];
  2054. ERR_FAIL_COND_V_MSG(attachment == ATTACHMENT_UNUSED, RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + "). Preserve attachments can't be unused.");
  2055. ERR_FAIL_INDEX_V_MSG(attachment, p_attachments.size(), RDD::RenderPassID(), "Invalid framebuffer format attachment(" + itos(attachment) + "), in pass (" + itos(i) + "), preserve attachment (" + itos(j) + ").");
  2056. if (attachment_last_pass[attachment] != i) {
  2057. // Preserve can still be used to keep depth or color from being discarded after use.
  2058. attachment_last_pass[attachment] = i;
  2059. subpasses[i].preserve_attachments.push_back(attachment);
  2060. }
  2061. }
  2062. if (r_samples) {
  2063. r_samples->push_back(texture_samples);
  2064. }
  2065. if (i > 0) {
  2066. RDD::SubpassDependency dependency;
  2067. dependency.src_subpass = i - 1;
  2068. dependency.dst_subpass = i;
  2069. dependency.src_stages = (RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
  2070. dependency.dst_stages = (RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
  2071. dependency.src_access = (RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
  2072. dependency.dst_access = (RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT | RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | RDD::BARRIER_ACCESS_INPUT_ATTACHMENT_READ_BIT);
  2073. subpass_dependencies.push_back(dependency);
  2074. }
  2075. }
  2076. RDD::RenderPassID render_pass = p_driver->render_pass_create(attachments, subpasses, subpass_dependencies, p_view_count);
  2077. ERR_FAIL_COND_V(!render_pass, RDD::RenderPassID());
  2078. return render_pass;
  2079. }
  2080. RDD::RenderPassID RenderingDevice::_render_pass_create_from_graph(RenderingDeviceDriver *p_driver, VectorView<RDD::AttachmentLoadOp> p_load_ops, VectorView<RDD::AttachmentStoreOp> p_store_ops, void *p_user_data) {
  2081. DEV_ASSERT(p_driver != nullptr);
  2082. DEV_ASSERT(p_user_data != nullptr);
  2083. // The graph delegates the creation of the render pass to the user according to the load and store ops that were determined as necessary after
  2084. // resolving the dependencies between commands. This function creates a render pass for the framebuffer accordingly.
  2085. Framebuffer *framebuffer = (Framebuffer *)(p_user_data);
  2086. const FramebufferFormatKey &key = framebuffer->rendering_device->framebuffer_formats[framebuffer->format_id].E->key();
  2087. return _render_pass_create(p_driver, key.attachments, key.passes, p_load_ops, p_store_ops, framebuffer->view_count);
  2088. }
  2089. RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create(const Vector<AttachmentFormat> &p_format, uint32_t p_view_count) {
  2090. FramebufferPass pass;
  2091. for (int i = 0; i < p_format.size(); i++) {
  2092. if (p_format[i].usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  2093. pass.depth_attachment = i;
  2094. } else {
  2095. pass.color_attachments.push_back(i);
  2096. }
  2097. }
  2098. Vector<FramebufferPass> passes;
  2099. passes.push_back(pass);
  2100. return framebuffer_format_create_multipass(p_format, passes, p_view_count);
  2101. }
  2102. RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create_multipass(const Vector<AttachmentFormat> &p_attachments, const Vector<FramebufferPass> &p_passes, uint32_t p_view_count) {
  2103. _THREAD_SAFE_METHOD_
  2104. FramebufferFormatKey key;
  2105. key.attachments = p_attachments;
  2106. key.passes = p_passes;
  2107. key.view_count = p_view_count;
  2108. const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key);
  2109. if (E) {
  2110. // Exists, return.
  2111. return E->get();
  2112. }
  2113. Vector<TextureSamples> samples;
  2114. LocalVector<RDD::AttachmentLoadOp> load_ops;
  2115. LocalVector<RDD::AttachmentStoreOp> store_ops;
  2116. for (int64_t i = 0; i < p_attachments.size(); i++) {
  2117. load_ops.push_back(RDD::ATTACHMENT_LOAD_OP_CLEAR);
  2118. store_ops.push_back(RDD::ATTACHMENT_STORE_OP_STORE);
  2119. }
  2120. RDD::RenderPassID render_pass = _render_pass_create(driver, p_attachments, p_passes, load_ops, store_ops, p_view_count, &samples); // Actions don't matter for this use case.
  2121. if (!render_pass) { // Was likely invalid.
  2122. return INVALID_ID;
  2123. }
  2124. FramebufferFormatID id = FramebufferFormatID(framebuffer_format_cache.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
  2125. E = framebuffer_format_cache.insert(key, id);
  2126. FramebufferFormat fb_format;
  2127. fb_format.E = E;
  2128. fb_format.render_pass = render_pass;
  2129. fb_format.pass_samples = samples;
  2130. fb_format.view_count = p_view_count;
  2131. framebuffer_formats[id] = fb_format;
  2132. #if PRINT_FRAMEBUFFER_FORMAT
  2133. print_line("FRAMEBUFFER FORMAT:", id, "ATTACHMENTS:", p_attachments.size(), "PASSES:", p_passes.size());
  2134. for (RD::AttachmentFormat attachment : p_attachments) {
  2135. print_line("FORMAT:", attachment.format, "SAMPLES:", attachment.samples, "USAGE FLAGS:", attachment.usage_flags);
  2136. }
  2137. #endif
  2138. return id;
  2139. }
  2140. RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_format_create_empty(TextureSamples p_samples) {
  2141. _THREAD_SAFE_METHOD_
  2142. FramebufferFormatKey key;
  2143. key.passes.push_back(FramebufferPass());
  2144. const RBMap<FramebufferFormatKey, FramebufferFormatID>::Element *E = framebuffer_format_cache.find(key);
  2145. if (E) {
  2146. // Exists, return.
  2147. return E->get();
  2148. }
  2149. LocalVector<RDD::Subpass> subpass;
  2150. subpass.resize(1);
  2151. RDD::RenderPassID render_pass = driver->render_pass_create({}, subpass, {}, 1);
  2152. ERR_FAIL_COND_V(!render_pass, FramebufferFormatID());
  2153. FramebufferFormatID id = FramebufferFormatID(framebuffer_format_cache.size()) | (FramebufferFormatID(ID_TYPE_FRAMEBUFFER_FORMAT) << FramebufferFormatID(ID_BASE_SHIFT));
  2154. E = framebuffer_format_cache.insert(key, id);
  2155. FramebufferFormat fb_format;
  2156. fb_format.E = E;
  2157. fb_format.render_pass = render_pass;
  2158. fb_format.pass_samples.push_back(p_samples);
  2159. framebuffer_formats[id] = fb_format;
  2160. #if PRINT_FRAMEBUFFER_FORMAT
  2161. print_line("FRAMEBUFFER FORMAT:", id, "ATTACHMENTS: EMPTY");
  2162. #endif
  2163. return id;
  2164. }
  2165. RenderingDevice::TextureSamples RenderingDevice::framebuffer_format_get_texture_samples(FramebufferFormatID p_format, uint32_t p_pass) {
  2166. _THREAD_SAFE_METHOD_
  2167. HashMap<FramebufferFormatID, FramebufferFormat>::Iterator E = framebuffer_formats.find(p_format);
  2168. ERR_FAIL_COND_V(!E, TEXTURE_SAMPLES_1);
  2169. ERR_FAIL_COND_V(p_pass >= uint32_t(E->value.pass_samples.size()), TEXTURE_SAMPLES_1);
  2170. return E->value.pass_samples[p_pass];
  2171. }
  2172. RID RenderingDevice::framebuffer_create_empty(const Size2i &p_size, TextureSamples p_samples, FramebufferFormatID p_format_check) {
  2173. _THREAD_SAFE_METHOD_
  2174. Framebuffer framebuffer;
  2175. framebuffer.rendering_device = this;
  2176. framebuffer.format_id = framebuffer_format_create_empty(p_samples);
  2177. ERR_FAIL_COND_V(p_format_check != INVALID_FORMAT_ID && framebuffer.format_id != p_format_check, RID());
  2178. framebuffer.size = p_size;
  2179. framebuffer.view_count = 1;
  2180. RDG::FramebufferCache *framebuffer_cache = RDG::framebuffer_cache_create();
  2181. framebuffer_cache->width = p_size.width;
  2182. framebuffer_cache->height = p_size.height;
  2183. framebuffer.framebuffer_cache = framebuffer_cache;
  2184. RID id = framebuffer_owner.make_rid(framebuffer);
  2185. #ifdef DEV_ENABLED
  2186. set_resource_name(id, "RID:" + itos(id.get_id()));
  2187. #endif
  2188. framebuffer_cache->render_pass_creation_user_data = framebuffer_owner.get_or_null(id);
  2189. return id;
  2190. }
  2191. RID RenderingDevice::framebuffer_create(const Vector<RID> &p_texture_attachments, FramebufferFormatID p_format_check, uint32_t p_view_count) {
  2192. _THREAD_SAFE_METHOD_
  2193. FramebufferPass pass;
  2194. for (int i = 0; i < p_texture_attachments.size(); i++) {
  2195. Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
  2196. ERR_FAIL_COND_V_MSG(texture && texture->layers != p_view_count, RID(), "Layers of our texture doesn't match view count for this framebuffer");
  2197. if (texture != nullptr) {
  2198. _check_transfer_worker_texture(texture);
  2199. }
  2200. if (texture && texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  2201. pass.depth_attachment = i;
  2202. } else if (texture && texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
  2203. pass.vrs_attachment = i;
  2204. } else {
  2205. if (texture && texture->is_resolve_buffer) {
  2206. pass.resolve_attachments.push_back(i);
  2207. } else {
  2208. pass.color_attachments.push_back(texture ? i : ATTACHMENT_UNUSED);
  2209. }
  2210. }
  2211. }
  2212. Vector<FramebufferPass> passes;
  2213. passes.push_back(pass);
  2214. return framebuffer_create_multipass(p_texture_attachments, passes, p_format_check, p_view_count);
  2215. }
  2216. RID RenderingDevice::framebuffer_create_multipass(const Vector<RID> &p_texture_attachments, const Vector<FramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) {
  2217. _THREAD_SAFE_METHOD_
  2218. Vector<AttachmentFormat> attachments;
  2219. LocalVector<RDD::TextureID> textures;
  2220. LocalVector<RDG::ResourceTracker *> trackers;
  2221. attachments.resize(p_texture_attachments.size());
  2222. Size2i size;
  2223. bool size_set = false;
  2224. for (int i = 0; i < p_texture_attachments.size(); i++) {
  2225. AttachmentFormat af;
  2226. Texture *texture = texture_owner.get_or_null(p_texture_attachments[i]);
  2227. if (!texture) {
  2228. af.usage_flags = AttachmentFormat::UNUSED_ATTACHMENT;
  2229. trackers.push_back(nullptr);
  2230. } else {
  2231. ERR_FAIL_COND_V_MSG(texture->layers != p_view_count, RID(), "Layers of our texture doesn't match view count for this framebuffer");
  2232. _check_transfer_worker_texture(texture);
  2233. if (!size_set) {
  2234. size.width = texture->width;
  2235. size.height = texture->height;
  2236. size_set = true;
  2237. } else if (texture->usage_flags & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
  2238. // If this is not the first attachment we assume this is used as the VRS attachment.
  2239. // In this case this texture will be 1/16th the size of the color attachment.
  2240. // So we skip the size check.
  2241. } else {
  2242. ERR_FAIL_COND_V_MSG((uint32_t)size.width != texture->width || (uint32_t)size.height != texture->height, RID(),
  2243. "All textures in a framebuffer should be the same size.");
  2244. }
  2245. af.format = texture->format;
  2246. af.samples = texture->samples;
  2247. af.usage_flags = texture->usage_flags;
  2248. _texture_make_mutable(texture, p_texture_attachments[i]);
  2249. textures.push_back(texture->driver_id);
  2250. trackers.push_back(texture->draw_tracker);
  2251. }
  2252. attachments.write[i] = af;
  2253. }
  2254. ERR_FAIL_COND_V_MSG(!size_set, RID(), "All attachments unused.");
  2255. FramebufferFormatID format_id = framebuffer_format_create_multipass(attachments, p_passes, p_view_count);
  2256. if (format_id == INVALID_ID) {
  2257. return RID();
  2258. }
  2259. ERR_FAIL_COND_V_MSG(p_format_check != INVALID_ID && format_id != p_format_check, RID(),
  2260. "The format used to check this framebuffer differs from the intended framebuffer format.");
  2261. Framebuffer framebuffer;
  2262. framebuffer.rendering_device = this;
  2263. framebuffer.format_id = format_id;
  2264. framebuffer.texture_ids = p_texture_attachments;
  2265. framebuffer.size = size;
  2266. framebuffer.view_count = p_view_count;
  2267. RDG::FramebufferCache *framebuffer_cache = RDG::framebuffer_cache_create();
  2268. framebuffer_cache->width = size.width;
  2269. framebuffer_cache->height = size.height;
  2270. framebuffer_cache->textures = textures;
  2271. framebuffer_cache->trackers = trackers;
  2272. framebuffer.framebuffer_cache = framebuffer_cache;
  2273. RID id = framebuffer_owner.make_rid(framebuffer);
  2274. #ifdef DEV_ENABLED
  2275. set_resource_name(id, "RID:" + itos(id.get_id()));
  2276. #endif
  2277. for (int i = 0; i < p_texture_attachments.size(); i++) {
  2278. if (p_texture_attachments[i].is_valid()) {
  2279. _add_dependency(id, p_texture_attachments[i]);
  2280. }
  2281. }
  2282. framebuffer_cache->render_pass_creation_user_data = framebuffer_owner.get_or_null(id);
  2283. return id;
  2284. }
  2285. RenderingDevice::FramebufferFormatID RenderingDevice::framebuffer_get_format(RID p_framebuffer) {
  2286. _THREAD_SAFE_METHOD_
  2287. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  2288. ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
  2289. return framebuffer->format_id;
  2290. }
  2291. Size2 RenderingDevice::framebuffer_get_size(RID p_framebuffer) {
  2292. _THREAD_SAFE_METHOD_
  2293. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  2294. ERR_FAIL_NULL_V(framebuffer, Size2(0, 0));
  2295. return framebuffer->size;
  2296. }
  2297. bool RenderingDevice::framebuffer_is_valid(RID p_framebuffer) const {
  2298. _THREAD_SAFE_METHOD_
  2299. return framebuffer_owner.owns(p_framebuffer);
  2300. }
  2301. void RenderingDevice::framebuffer_set_invalidation_callback(RID p_framebuffer, InvalidationCallback p_callback, void *p_userdata) {
  2302. _THREAD_SAFE_METHOD_
  2303. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  2304. ERR_FAIL_NULL(framebuffer);
  2305. framebuffer->invalidated_callback = p_callback;
  2306. framebuffer->invalidated_callback_userdata = p_userdata;
  2307. }
  2308. /*****************/
  2309. /**** SAMPLER ****/
  2310. /*****************/
  2311. RID RenderingDevice::sampler_create(const SamplerState &p_state) {
  2312. _THREAD_SAFE_METHOD_
  2313. ERR_FAIL_INDEX_V(p_state.repeat_u, SAMPLER_REPEAT_MODE_MAX, RID());
  2314. ERR_FAIL_INDEX_V(p_state.repeat_v, SAMPLER_REPEAT_MODE_MAX, RID());
  2315. ERR_FAIL_INDEX_V(p_state.repeat_w, SAMPLER_REPEAT_MODE_MAX, RID());
  2316. ERR_FAIL_INDEX_V(p_state.compare_op, COMPARE_OP_MAX, RID());
  2317. ERR_FAIL_INDEX_V(p_state.border_color, SAMPLER_BORDER_COLOR_MAX, RID());
  2318. RDD::SamplerID sampler = driver->sampler_create(p_state);
  2319. ERR_FAIL_COND_V(!sampler, RID());
  2320. RID id = sampler_owner.make_rid(sampler);
  2321. #ifdef DEV_ENABLED
  2322. set_resource_name(id, "RID:" + itos(id.get_id()));
  2323. #endif
  2324. return id;
  2325. }
  2326. bool RenderingDevice::sampler_is_format_supported_for_filter(DataFormat p_format, SamplerFilter p_sampler_filter) const {
  2327. _THREAD_SAFE_METHOD_
  2328. ERR_FAIL_INDEX_V(p_format, DATA_FORMAT_MAX, false);
  2329. return driver->sampler_is_format_supported_for_filter(p_format, p_sampler_filter);
  2330. }
  2331. /***********************/
  2332. /**** VERTEX BUFFER ****/
  2333. /***********************/
  2334. RID RenderingDevice::vertex_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data, bool p_use_as_storage) {
  2335. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
  2336. Buffer buffer;
  2337. buffer.size = p_size_bytes;
  2338. buffer.usage = RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_VERTEX_BIT;
  2339. if (p_use_as_storage) {
  2340. buffer.usage.set_flag(RDD::BUFFER_USAGE_STORAGE_BIT);
  2341. }
  2342. buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
  2343. ERR_FAIL_COND_V(!buffer.driver_id, RID());
  2344. // Vertex buffers are assumed to be immutable unless they don't have initial data or they've been marked for storage explicitly.
  2345. if (p_data.is_empty() || p_use_as_storage) {
  2346. buffer.draw_tracker = RDG::resource_tracker_create();
  2347. buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
  2348. }
  2349. if (p_data.size()) {
  2350. _buffer_initialize(&buffer, p_data.ptr(), p_data.size());
  2351. }
  2352. _THREAD_SAFE_LOCK_
  2353. buffer_memory += buffer.size;
  2354. _THREAD_SAFE_UNLOCK_
  2355. RID id = vertex_buffer_owner.make_rid(buffer);
  2356. #ifdef DEV_ENABLED
  2357. set_resource_name(id, "RID:" + itos(id.get_id()));
  2358. #endif
  2359. return id;
  2360. }
  2361. // Internally reference counted, this ID is warranted to be unique for the same description, but needs to be freed as many times as it was allocated.
  2362. RenderingDevice::VertexFormatID RenderingDevice::vertex_format_create(const Vector<VertexAttribute> &p_vertex_descriptions) {
  2363. _THREAD_SAFE_METHOD_
  2364. VertexDescriptionKey key;
  2365. key.vertex_formats = p_vertex_descriptions;
  2366. VertexFormatID *idptr = vertex_format_cache.getptr(key);
  2367. if (idptr) {
  2368. return *idptr;
  2369. }
  2370. HashSet<int> used_locations;
  2371. for (int i = 0; i < p_vertex_descriptions.size(); i++) {
  2372. ERR_CONTINUE(p_vertex_descriptions[i].format >= DATA_FORMAT_MAX);
  2373. ERR_FAIL_COND_V(used_locations.has(p_vertex_descriptions[i].location), INVALID_ID);
  2374. ERR_FAIL_COND_V_MSG(get_format_vertex_size(p_vertex_descriptions[i].format) == 0, INVALID_ID,
  2375. "Data format for attachment (" + itos(i) + "), '" + FORMAT_NAMES[p_vertex_descriptions[i].format] + "', is not valid for a vertex array.");
  2376. used_locations.insert(p_vertex_descriptions[i].location);
  2377. }
  2378. RDD::VertexFormatID driver_id = driver->vertex_format_create(p_vertex_descriptions);
  2379. ERR_FAIL_COND_V(!driver_id, 0);
  2380. VertexFormatID id = (vertex_format_cache.size() | ((int64_t)ID_TYPE_VERTEX_FORMAT << ID_BASE_SHIFT));
  2381. vertex_format_cache[key] = id;
  2382. vertex_formats[id].vertex_formats = p_vertex_descriptions;
  2383. vertex_formats[id].driver_id = driver_id;
  2384. return id;
  2385. }
  2386. RID RenderingDevice::vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const Vector<RID> &p_src_buffers, const Vector<uint64_t> &p_offsets) {
  2387. _THREAD_SAFE_METHOD_
  2388. ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
  2389. const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
  2390. ERR_FAIL_COND_V(vd.vertex_formats.size() != p_src_buffers.size(), RID());
  2391. for (int i = 0; i < p_src_buffers.size(); i++) {
  2392. ERR_FAIL_COND_V(!vertex_buffer_owner.owns(p_src_buffers[i]), RID());
  2393. }
  2394. VertexArray vertex_array;
  2395. if (p_offsets.is_empty()) {
  2396. vertex_array.offsets.resize_zeroed(p_src_buffers.size());
  2397. } else {
  2398. ERR_FAIL_COND_V(p_offsets.size() != p_src_buffers.size(), RID());
  2399. vertex_array.offsets = p_offsets;
  2400. }
  2401. vertex_array.vertex_count = p_vertex_count;
  2402. vertex_array.description = p_vertex_format;
  2403. vertex_array.max_instances_allowed = 0xFFFFFFFF; // By default as many as you want.
  2404. for (int i = 0; i < p_src_buffers.size(); i++) {
  2405. Buffer *buffer = vertex_buffer_owner.get_or_null(p_src_buffers[i]);
  2406. // Validate with buffer.
  2407. {
  2408. const VertexAttribute &atf = vd.vertex_formats[i];
  2409. uint32_t element_size = get_format_vertex_size(atf.format);
  2410. ERR_FAIL_COND_V(element_size == 0, RID()); // Should never happens since this was prevalidated.
  2411. if (atf.frequency == VERTEX_FREQUENCY_VERTEX) {
  2412. // Validate size for regular drawing.
  2413. uint64_t total_size = uint64_t(atf.stride) * (p_vertex_count - 1) + atf.offset + element_size;
  2414. ERR_FAIL_COND_V_MSG(total_size > buffer->size, RID(),
  2415. "Attachment (" + itos(i) + ") will read past the end of the buffer.");
  2416. } else {
  2417. // Validate size for instances drawing.
  2418. uint64_t available = buffer->size - atf.offset;
  2419. ERR_FAIL_COND_V_MSG(available < element_size, RID(),
  2420. "Attachment (" + itos(i) + ") uses instancing, but it's just too small.");
  2421. uint32_t instances_allowed = available / atf.stride;
  2422. vertex_array.max_instances_allowed = MIN(instances_allowed, vertex_array.max_instances_allowed);
  2423. }
  2424. }
  2425. vertex_array.buffers.push_back(buffer->driver_id);
  2426. if (buffer->draw_tracker != nullptr) {
  2427. vertex_array.draw_trackers.push_back(buffer->draw_tracker);
  2428. } else {
  2429. vertex_array.untracked_buffers.insert(p_src_buffers[i]);
  2430. }
  2431. if (buffer->transfer_worker_index >= 0) {
  2432. vertex_array.transfer_worker_indices.push_back(buffer->transfer_worker_index);
  2433. vertex_array.transfer_worker_operations.push_back(buffer->transfer_worker_operation);
  2434. }
  2435. }
  2436. RID id = vertex_array_owner.make_rid(vertex_array);
  2437. for (int i = 0; i < p_src_buffers.size(); i++) {
  2438. _add_dependency(id, p_src_buffers[i]);
  2439. }
  2440. return id;
  2441. }
  2442. RID RenderingDevice::index_buffer_create(uint32_t p_index_count, IndexBufferFormat p_format, const Vector<uint8_t> &p_data, bool p_use_restart_indices) {
  2443. ERR_FAIL_COND_V(p_index_count == 0, RID());
  2444. IndexBuffer index_buffer;
  2445. index_buffer.format = p_format;
  2446. index_buffer.supports_restart_indices = p_use_restart_indices;
  2447. index_buffer.index_count = p_index_count;
  2448. uint32_t size_bytes = p_index_count * ((p_format == INDEX_BUFFER_FORMAT_UINT16) ? 2 : 4);
  2449. #ifdef DEBUG_ENABLED
  2450. if (p_data.size()) {
  2451. index_buffer.max_index = 0;
  2452. ERR_FAIL_COND_V_MSG((uint32_t)p_data.size() != size_bytes, RID(),
  2453. "Default index buffer initializer array size (" + itos(p_data.size()) + ") does not match format required size (" + itos(size_bytes) + ").");
  2454. const uint8_t *r = p_data.ptr();
  2455. if (p_format == INDEX_BUFFER_FORMAT_UINT16) {
  2456. const uint16_t *index16 = (const uint16_t *)r;
  2457. for (uint32_t i = 0; i < p_index_count; i++) {
  2458. if (p_use_restart_indices && index16[i] == 0xFFFF) {
  2459. continue; // Restart index, ignore.
  2460. }
  2461. index_buffer.max_index = MAX(index16[i], index_buffer.max_index);
  2462. }
  2463. } else {
  2464. const uint32_t *index32 = (const uint32_t *)r;
  2465. for (uint32_t i = 0; i < p_index_count; i++) {
  2466. if (p_use_restart_indices && index32[i] == 0xFFFFFFFF) {
  2467. continue; // Restart index, ignore.
  2468. }
  2469. index_buffer.max_index = MAX(index32[i], index_buffer.max_index);
  2470. }
  2471. }
  2472. } else {
  2473. index_buffer.max_index = 0xFFFFFFFF;
  2474. }
  2475. #else
  2476. index_buffer.max_index = 0xFFFFFFFF;
  2477. #endif
  2478. index_buffer.size = size_bytes;
  2479. index_buffer.usage = (RDD::BUFFER_USAGE_TRANSFER_FROM_BIT | RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_INDEX_BIT);
  2480. index_buffer.driver_id = driver->buffer_create(index_buffer.size, index_buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
  2481. ERR_FAIL_COND_V(!index_buffer.driver_id, RID());
  2482. // Index buffers are assumed to be immutable unless they don't have initial data.
  2483. if (p_data.is_empty()) {
  2484. index_buffer.draw_tracker = RDG::resource_tracker_create();
  2485. index_buffer.draw_tracker->buffer_driver_id = index_buffer.driver_id;
  2486. }
  2487. if (p_data.size()) {
  2488. _buffer_initialize(&index_buffer, p_data.ptr(), p_data.size());
  2489. }
  2490. _THREAD_SAFE_LOCK_
  2491. buffer_memory += index_buffer.size;
  2492. _THREAD_SAFE_UNLOCK_
  2493. RID id = index_buffer_owner.make_rid(index_buffer);
  2494. #ifdef DEV_ENABLED
  2495. set_resource_name(id, "RID:" + itos(id.get_id()));
  2496. #endif
  2497. return id;
  2498. }
  2499. RID RenderingDevice::index_array_create(RID p_index_buffer, uint32_t p_index_offset, uint32_t p_index_count) {
  2500. _THREAD_SAFE_METHOD_
  2501. ERR_FAIL_COND_V(!index_buffer_owner.owns(p_index_buffer), RID());
  2502. IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_index_buffer);
  2503. ERR_FAIL_COND_V(p_index_count == 0, RID());
  2504. ERR_FAIL_COND_V(p_index_offset + p_index_count > index_buffer->index_count, RID());
  2505. IndexArray index_array;
  2506. index_array.max_index = index_buffer->max_index;
  2507. index_array.driver_id = index_buffer->driver_id;
  2508. index_array.draw_tracker = index_buffer->draw_tracker;
  2509. index_array.offset = p_index_offset;
  2510. index_array.indices = p_index_count;
  2511. index_array.format = index_buffer->format;
  2512. index_array.supports_restart_indices = index_buffer->supports_restart_indices;
  2513. index_array.transfer_worker_index = index_buffer->transfer_worker_index;
  2514. index_array.transfer_worker_operation = index_buffer->transfer_worker_operation;
  2515. RID id = index_array_owner.make_rid(index_array);
  2516. _add_dependency(id, p_index_buffer);
  2517. return id;
  2518. }
  2519. /****************/
  2520. /**** SHADER ****/
  2521. /****************/
  2522. static const char *SHADER_UNIFORM_NAMES[RenderingDevice::UNIFORM_TYPE_MAX] = {
  2523. "Sampler", "CombinedSampler", "Texture", "Image", "TextureBuffer", "SamplerTextureBuffer", "ImageBuffer", "UniformBuffer", "StorageBuffer", "InputAttachment"
  2524. };
  2525. String RenderingDevice::_shader_uniform_debug(RID p_shader, int p_set) {
  2526. String ret;
  2527. const Shader *shader = shader_owner.get_or_null(p_shader);
  2528. ERR_FAIL_NULL_V(shader, String());
  2529. for (int i = 0; i < shader->uniform_sets.size(); i++) {
  2530. if (p_set >= 0 && i != p_set) {
  2531. continue;
  2532. }
  2533. for (int j = 0; j < shader->uniform_sets[i].size(); j++) {
  2534. const ShaderUniform &ui = shader->uniform_sets[i][j];
  2535. if (!ret.is_empty()) {
  2536. ret += "\n";
  2537. }
  2538. ret += "Set: " + itos(i) + " Binding: " + itos(ui.binding) + " Type: " + SHADER_UNIFORM_NAMES[ui.type] + " Writable: " + (ui.writable ? "Y" : "N") + " Length: " + itos(ui.length);
  2539. }
  2540. }
  2541. return ret;
  2542. }
  2543. String RenderingDevice::shader_get_binary_cache_key() const {
  2544. return driver->shader_get_binary_cache_key();
  2545. }
  2546. Vector<uint8_t> RenderingDevice::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name) {
  2547. return driver->shader_compile_binary_from_spirv(p_spirv, p_shader_name);
  2548. }
  2549. RID RenderingDevice::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder) {
  2550. // Immutable samplers :
  2551. // Expanding api when creating shader to allow passing optionally a set of immutable samplers
  2552. // keeping existing api but extending it by sending an empty set.
  2553. Vector<PipelineImmutableSampler> immutable_samplers;
  2554. return shader_create_from_bytecode_with_samplers(p_shader_binary, p_placeholder, immutable_samplers);
  2555. }
  2556. RID RenderingDevice::shader_create_from_bytecode_with_samplers(const Vector<uint8_t> &p_shader_binary, RID p_placeholder, const Vector<PipelineImmutableSampler> &p_immutable_samplers) {
  2557. _THREAD_SAFE_METHOD_
  2558. ShaderDescription shader_desc;
  2559. String name;
  2560. Vector<RDD::ImmutableSampler> driver_immutable_samplers;
  2561. for (const PipelineImmutableSampler &source_sampler : p_immutable_samplers) {
  2562. RDD::ImmutableSampler driver_sampler;
  2563. driver_sampler.type = source_sampler.uniform_type;
  2564. driver_sampler.binding = source_sampler.binding;
  2565. for (uint32_t j = 0; j < source_sampler.get_id_count(); j++) {
  2566. RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(source_sampler.get_id(j));
  2567. driver_sampler.ids.push_back(*sampler_driver_id);
  2568. }
  2569. driver_immutable_samplers.append(driver_sampler);
  2570. }
  2571. RDD::ShaderID shader_id = driver->shader_create_from_bytecode(p_shader_binary, shader_desc, name, driver_immutable_samplers);
  2572. ERR_FAIL_COND_V(!shader_id, RID());
  2573. // All good, let's create modules.
  2574. RID id;
  2575. if (p_placeholder.is_null()) {
  2576. id = shader_owner.make_rid();
  2577. } else {
  2578. id = p_placeholder;
  2579. }
  2580. Shader *shader = shader_owner.get_or_null(id);
  2581. ERR_FAIL_NULL_V(shader, RID());
  2582. *((ShaderDescription *)shader) = shader_desc; // ShaderDescription bundle.
  2583. shader->name = name;
  2584. shader->driver_id = shader_id;
  2585. shader->layout_hash = driver->shader_get_layout_hash(shader_id);
  2586. for (int i = 0; i < shader->uniform_sets.size(); i++) {
  2587. uint32_t format = 0; // No format, default.
  2588. if (shader->uniform_sets[i].size()) {
  2589. // Sort and hash.
  2590. shader->uniform_sets.write[i].sort();
  2591. UniformSetFormat usformat;
  2592. usformat.uniforms = shader->uniform_sets[i];
  2593. RBMap<UniformSetFormat, uint32_t>::Element *E = uniform_set_format_cache.find(usformat);
  2594. if (E) {
  2595. format = E->get();
  2596. } else {
  2597. format = uniform_set_format_cache.size() + 1;
  2598. uniform_set_format_cache.insert(usformat, format);
  2599. }
  2600. }
  2601. shader->set_formats.push_back(format);
  2602. }
  2603. for (ShaderStage stage : shader_desc.stages) {
  2604. switch (stage) {
  2605. case SHADER_STAGE_VERTEX:
  2606. shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_VERTEX_SHADER_BIT);
  2607. break;
  2608. case SHADER_STAGE_FRAGMENT:
  2609. shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
  2610. break;
  2611. case SHADER_STAGE_TESSELATION_CONTROL:
  2612. shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT);
  2613. break;
  2614. case SHADER_STAGE_TESSELATION_EVALUATION:
  2615. shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT);
  2616. break;
  2617. case SHADER_STAGE_COMPUTE:
  2618. shader->stage_bits.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
  2619. break;
  2620. default:
  2621. DEV_ASSERT(false && "Unknown shader stage.");
  2622. break;
  2623. }
  2624. }
  2625. #ifdef DEV_ENABLED
  2626. set_resource_name(id, "RID:" + itos(id.get_id()));
  2627. #endif
  2628. return id;
  2629. }
  2630. void RenderingDevice::shader_destroy_modules(RID p_shader) {
  2631. Shader *shader = shader_owner.get_or_null(p_shader);
  2632. ERR_FAIL_NULL(shader);
  2633. driver->shader_destroy_modules(shader->driver_id);
  2634. }
  2635. RID RenderingDevice::shader_create_placeholder() {
  2636. _THREAD_SAFE_METHOD_
  2637. Shader shader;
  2638. return shader_owner.make_rid(shader);
  2639. }
  2640. uint64_t RenderingDevice::shader_get_vertex_input_attribute_mask(RID p_shader) {
  2641. _THREAD_SAFE_METHOD_
  2642. const Shader *shader = shader_owner.get_or_null(p_shader);
  2643. ERR_FAIL_NULL_V(shader, 0);
  2644. return shader->vertex_input_mask;
  2645. }
  2646. /******************/
  2647. /**** UNIFORMS ****/
  2648. /******************/
  2649. RID RenderingDevice::uniform_buffer_create(uint32_t p_size_bytes, const Vector<uint8_t> &p_data) {
  2650. ERR_FAIL_COND_V(p_data.size() && (uint32_t)p_data.size() != p_size_bytes, RID());
  2651. Buffer buffer;
  2652. buffer.size = p_size_bytes;
  2653. buffer.usage = (RDD::BUFFER_USAGE_TRANSFER_TO_BIT | RDD::BUFFER_USAGE_UNIFORM_BIT);
  2654. buffer.driver_id = driver->buffer_create(buffer.size, buffer.usage, RDD::MEMORY_ALLOCATION_TYPE_GPU);
  2655. ERR_FAIL_COND_V(!buffer.driver_id, RID());
  2656. // Uniform buffers are assumed to be immutable unless they don't have initial data.
  2657. if (p_data.is_empty()) {
  2658. buffer.draw_tracker = RDG::resource_tracker_create();
  2659. buffer.draw_tracker->buffer_driver_id = buffer.driver_id;
  2660. }
  2661. if (p_data.size()) {
  2662. _buffer_initialize(&buffer, p_data.ptr(), p_data.size());
  2663. }
  2664. _THREAD_SAFE_LOCK_
  2665. buffer_memory += buffer.size;
  2666. _THREAD_SAFE_UNLOCK_
  2667. RID id = uniform_buffer_owner.make_rid(buffer);
  2668. #ifdef DEV_ENABLED
  2669. set_resource_name(id, "RID:" + itos(id.get_id()));
  2670. #endif
  2671. return id;
  2672. }
  2673. void RenderingDevice::_uniform_set_update_shared(UniformSet *p_uniform_set) {
  2674. for (UniformSet::SharedTexture shared : p_uniform_set->shared_textures_to_update) {
  2675. Texture *texture = texture_owner.get_or_null(shared.texture);
  2676. ERR_CONTINUE(texture == nullptr);
  2677. _texture_update_shared_fallback(shared.texture, texture, shared.writing);
  2678. }
  2679. }
  2680. template RID RenderingDevice::uniform_set_create(const LocalVector<RD::Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set, bool p_linear_pool);
  2681. template RID RenderingDevice::uniform_set_create(const Vector<RD::Uniform> &p_uniforms, RID p_shader, uint32_t p_shader_set, bool p_linear_pool);
  2682. template <typename Collection>
  2683. RID RenderingDevice::uniform_set_create(const Collection &p_uniforms, RID p_shader, uint32_t p_shader_set, bool p_linear_pool) {
  2684. _THREAD_SAFE_METHOD_
  2685. ERR_FAIL_COND_V(p_uniforms.is_empty(), RID());
  2686. Shader *shader = shader_owner.get_or_null(p_shader);
  2687. ERR_FAIL_NULL_V(shader, RID());
  2688. ERR_FAIL_COND_V_MSG(p_shader_set >= (uint32_t)shader->uniform_sets.size() || shader->uniform_sets[p_shader_set].is_empty(), RID(),
  2689. "Desired set (" + itos(p_shader_set) + ") not used by shader.");
  2690. // See that all sets in shader are satisfied.
  2691. const Vector<ShaderUniform> &set = shader->uniform_sets[p_shader_set];
  2692. uint32_t uniform_count = p_uniforms.size();
  2693. const Uniform *uniforms = p_uniforms.ptr();
  2694. uint32_t set_uniform_count = set.size();
  2695. const ShaderUniform *set_uniforms = set.ptr();
  2696. LocalVector<RDD::BoundUniform> driver_uniforms;
  2697. driver_uniforms.resize(set_uniform_count);
  2698. // Used for verification to make sure a uniform set does not use a framebuffer bound texture.
  2699. LocalVector<UniformSet::AttachableTexture> attachable_textures;
  2700. Vector<RDG::ResourceTracker *> draw_trackers;
  2701. Vector<RDG::ResourceUsage> draw_trackers_usage;
  2702. HashMap<RID, RDG::ResourceUsage> untracked_usage;
  2703. Vector<UniformSet::SharedTexture> shared_textures_to_update;
  2704. for (uint32_t i = 0; i < set_uniform_count; i++) {
  2705. const ShaderUniform &set_uniform = set_uniforms[i];
  2706. int uniform_idx = -1;
  2707. for (int j = 0; j < (int)uniform_count; j++) {
  2708. if (uniforms[j].binding == set_uniform.binding) {
  2709. uniform_idx = j;
  2710. break;
  2711. }
  2712. }
  2713. ERR_FAIL_COND_V_MSG(uniform_idx == -1, RID(),
  2714. "All the shader bindings for the given set must be covered by the uniforms provided. Binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + ") was not provided.");
  2715. const Uniform &uniform = uniforms[uniform_idx];
  2716. ERR_FAIL_COND_V_MSG(uniform.uniform_type != set_uniform.type, RID(),
  2717. "Mismatch uniform type for binding (" + itos(set_uniform.binding) + "), set (" + itos(p_shader_set) + "). Expected '" + SHADER_UNIFORM_NAMES[set_uniform.type] + "', supplied: '" + SHADER_UNIFORM_NAMES[uniform.uniform_type] + "'.");
  2718. RDD::BoundUniform &driver_uniform = driver_uniforms[i];
  2719. driver_uniform.type = uniform.uniform_type;
  2720. driver_uniform.binding = uniform.binding;
  2721. // Mark immutable samplers to be skipped when creating uniform set.
  2722. driver_uniform.immutable_sampler = uniform.immutable_sampler;
  2723. switch (uniform.uniform_type) {
  2724. case UNIFORM_TYPE_SAMPLER: {
  2725. if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
  2726. if (set_uniform.length > 1) {
  2727. ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler elements, so it should be provided equal number of sampler IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2728. } else {
  2729. ERR_FAIL_V_MSG(RID(), "Sampler (binding: " + itos(uniform.binding) + ") should provide one ID referencing a sampler (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2730. }
  2731. }
  2732. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  2733. RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j));
  2734. ERR_FAIL_NULL_V_MSG(sampler_driver_id, RID(), "Sampler (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid sampler.");
  2735. driver_uniform.ids.push_back(*sampler_driver_id);
  2736. }
  2737. } break;
  2738. case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  2739. if (uniform.get_id_count() != (uint32_t)set_uniform.length * 2) {
  2740. if (set_uniform.length > 1) {
  2741. ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler&texture elements, so it should provided twice the amount of IDs (sampler,texture pairs) to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2742. } else {
  2743. ERR_FAIL_V_MSG(RID(), "SamplerTexture (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2744. }
  2745. }
  2746. for (uint32_t j = 0; j < uniform.get_id_count(); j += 2) {
  2747. RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j + 0));
  2748. ERR_FAIL_NULL_V_MSG(sampler_driver_id, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
  2749. RID texture_id = uniform.get_id(j + 1);
  2750. Texture *texture = texture_owner.get_or_null(texture_id);
  2751. ERR_FAIL_NULL_V_MSG(texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  2752. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
  2753. "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
  2754. if ((texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT))) {
  2755. UniformSet::AttachableTexture attachable_texture;
  2756. attachable_texture.bind = set_uniform.binding;
  2757. attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.get_id(j + 1);
  2758. attachable_textures.push_back(attachable_texture);
  2759. }
  2760. RDD::TextureID driver_id = texture->driver_id;
  2761. RDG::ResourceTracker *tracker = texture->draw_tracker;
  2762. if (texture->shared_fallback != nullptr && texture->shared_fallback->texture.id != 0) {
  2763. driver_id = texture->shared_fallback->texture;
  2764. tracker = texture->shared_fallback->texture_tracker;
  2765. shared_textures_to_update.push_back({ false, texture_id });
  2766. }
  2767. if (tracker != nullptr) {
  2768. draw_trackers.push_back(tracker);
  2769. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_SAMPLE);
  2770. } else {
  2771. untracked_usage[texture_id] = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
  2772. }
  2773. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  2774. driver_uniform.ids.push_back(*sampler_driver_id);
  2775. driver_uniform.ids.push_back(driver_id);
  2776. _check_transfer_worker_texture(texture);
  2777. }
  2778. } break;
  2779. case UNIFORM_TYPE_TEXTURE: {
  2780. if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
  2781. if (set_uniform.length > 1) {
  2782. ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2783. } else {
  2784. ERR_FAIL_V_MSG(RID(), "Texture (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2785. }
  2786. }
  2787. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  2788. RID texture_id = uniform.get_id(j);
  2789. Texture *texture = texture_owner.get_or_null(texture_id);
  2790. ERR_FAIL_NULL_V_MSG(texture, RID(), "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  2791. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
  2792. "Texture (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
  2793. if ((texture->usage_flags & (TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | TEXTURE_USAGE_INPUT_ATTACHMENT_BIT))) {
  2794. UniformSet::AttachableTexture attachable_texture;
  2795. attachable_texture.bind = set_uniform.binding;
  2796. attachable_texture.texture = texture->owner.is_valid() ? texture->owner : uniform.get_id(j);
  2797. attachable_textures.push_back(attachable_texture);
  2798. }
  2799. RDD::TextureID driver_id = texture->driver_id;
  2800. RDG::ResourceTracker *tracker = texture->draw_tracker;
  2801. if (texture->shared_fallback != nullptr && texture->shared_fallback->texture.id != 0) {
  2802. driver_id = texture->shared_fallback->texture;
  2803. tracker = texture->shared_fallback->texture_tracker;
  2804. shared_textures_to_update.push_back({ false, texture_id });
  2805. }
  2806. if (tracker != nullptr) {
  2807. draw_trackers.push_back(tracker);
  2808. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_SAMPLE);
  2809. } else {
  2810. untracked_usage[texture_id] = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
  2811. }
  2812. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  2813. driver_uniform.ids.push_back(driver_id);
  2814. _check_transfer_worker_texture(texture);
  2815. }
  2816. } break;
  2817. case UNIFORM_TYPE_IMAGE: {
  2818. if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
  2819. if (set_uniform.length > 1) {
  2820. ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2821. } else {
  2822. ERR_FAIL_V_MSG(RID(), "Image (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2823. }
  2824. }
  2825. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  2826. RID texture_id = uniform.get_id(j);
  2827. Texture *texture = texture_owner.get_or_null(texture_id);
  2828. ERR_FAIL_NULL_V_MSG(texture, RID(),
  2829. "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  2830. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_STORAGE_BIT), RID(),
  2831. "Image (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_STORAGE_BIT usage flag set in order to be used as uniform.");
  2832. if (texture->owner.is_null() && texture->shared_fallback != nullptr) {
  2833. shared_textures_to_update.push_back({ true, texture_id });
  2834. }
  2835. if (_texture_make_mutable(texture, texture_id)) {
  2836. // The texture must be mutable as a layout transition will be required.
  2837. draw_graph.add_synchronization();
  2838. }
  2839. if (texture->draw_tracker != nullptr) {
  2840. draw_trackers.push_back(texture->draw_tracker);
  2841. if (set_uniform.writable) {
  2842. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE);
  2843. } else {
  2844. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_IMAGE_READ);
  2845. }
  2846. }
  2847. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  2848. driver_uniform.ids.push_back(texture->driver_id);
  2849. _check_transfer_worker_texture(texture);
  2850. }
  2851. } break;
  2852. case UNIFORM_TYPE_TEXTURE_BUFFER: {
  2853. if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
  2854. if (set_uniform.length > 1) {
  2855. ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") texture buffer elements, so it should be provided equal number of texture buffer IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2856. } else {
  2857. ERR_FAIL_V_MSG(RID(), "Buffer (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture buffer (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2858. }
  2859. }
  2860. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  2861. RID buffer_id = uniform.get_id(j);
  2862. Buffer *buffer = texture_buffer_owner.get_or_null(buffer_id);
  2863. ERR_FAIL_NULL_V_MSG(buffer, RID(), "Texture Buffer (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture buffer.");
  2864. if (set_uniform.writable && _buffer_make_mutable(buffer, buffer_id)) {
  2865. // The buffer must be mutable if it's used for writing.
  2866. draw_graph.add_synchronization();
  2867. }
  2868. if (buffer->draw_tracker != nullptr) {
  2869. draw_trackers.push_back(buffer->draw_tracker);
  2870. if (set_uniform.writable) {
  2871. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE);
  2872. } else {
  2873. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ);
  2874. }
  2875. } else {
  2876. untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ;
  2877. }
  2878. driver_uniform.ids.push_back(buffer->driver_id);
  2879. _check_transfer_worker_buffer(buffer);
  2880. }
  2881. } break;
  2882. case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  2883. if (uniform.get_id_count() != (uint32_t)set_uniform.length * 2) {
  2884. if (set_uniform.length > 1) {
  2885. ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") sampler buffer elements, so it should provided twice the amount of IDs (sampler,buffer pairs) to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2886. } else {
  2887. ERR_FAIL_V_MSG(RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ") should provide two IDs referencing a sampler and then a texture buffer (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2888. }
  2889. }
  2890. for (uint32_t j = 0; j < uniform.get_id_count(); j += 2) {
  2891. RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(uniform.get_id(j + 0));
  2892. ERR_FAIL_NULL_V_MSG(sampler_driver_id, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid sampler.");
  2893. RID buffer_id = uniform.get_id(j + 1);
  2894. Buffer *buffer = texture_buffer_owner.get_or_null(buffer_id);
  2895. ERR_FAIL_NULL_V_MSG(buffer, RID(), "SamplerBuffer (binding: " + itos(uniform.binding) + ", index " + itos(j + 1) + ") is not a valid texture buffer.");
  2896. if (buffer->draw_tracker != nullptr) {
  2897. draw_trackers.push_back(buffer->draw_tracker);
  2898. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ);
  2899. } else {
  2900. untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_TEXTURE_BUFFER_READ;
  2901. }
  2902. driver_uniform.ids.push_back(*sampler_driver_id);
  2903. driver_uniform.ids.push_back(buffer->driver_id);
  2904. _check_transfer_worker_buffer(buffer);
  2905. }
  2906. } break;
  2907. case UNIFORM_TYPE_IMAGE_BUFFER: {
  2908. // Todo.
  2909. } break;
  2910. case UNIFORM_TYPE_UNIFORM_BUFFER: {
  2911. ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
  2912. "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
  2913. RID buffer_id = uniform.get_id(0);
  2914. Buffer *buffer = uniform_buffer_owner.get_or_null(buffer_id);
  2915. ERR_FAIL_NULL_V_MSG(buffer, RID(), "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
  2916. ERR_FAIL_COND_V_MSG(buffer->size < (uint32_t)set_uniform.length, RID(),
  2917. "Uniform buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + ") is smaller than size of shader uniform: (" + itos(set_uniform.length) + ").");
  2918. if (buffer->draw_tracker != nullptr) {
  2919. draw_trackers.push_back(buffer->draw_tracker);
  2920. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ);
  2921. } else {
  2922. untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_UNIFORM_BUFFER_READ;
  2923. }
  2924. driver_uniform.ids.push_back(buffer->driver_id);
  2925. _check_transfer_worker_buffer(buffer);
  2926. } break;
  2927. case UNIFORM_TYPE_STORAGE_BUFFER: {
  2928. ERR_FAIL_COND_V_MSG(uniform.get_id_count() != 1, RID(),
  2929. "Storage buffer supplied (binding: " + itos(uniform.binding) + ") must provide one ID (" + itos(uniform.get_id_count()) + " provided).");
  2930. Buffer *buffer = nullptr;
  2931. RID buffer_id = uniform.get_id(0);
  2932. if (storage_buffer_owner.owns(buffer_id)) {
  2933. buffer = storage_buffer_owner.get_or_null(buffer_id);
  2934. } else if (vertex_buffer_owner.owns(buffer_id)) {
  2935. buffer = vertex_buffer_owner.get_or_null(buffer_id);
  2936. ERR_FAIL_COND_V_MSG(!(buffer->usage.has_flag(RDD::BUFFER_USAGE_STORAGE_BIT)), RID(), "Vertex buffer supplied (binding: " + itos(uniform.binding) + ") was not created with storage flag.");
  2937. }
  2938. ERR_FAIL_NULL_V_MSG(buffer, RID(), "Storage buffer supplied (binding: " + itos(uniform.binding) + ") is invalid.");
  2939. // If 0, then it's sized on link time.
  2940. ERR_FAIL_COND_V_MSG(set_uniform.length > 0 && buffer->size != (uint32_t)set_uniform.length, RID(),
  2941. "Storage buffer supplied (binding: " + itos(uniform.binding) + ") size (" + itos(buffer->size) + ") does not match size of shader uniform: (" + itos(set_uniform.length) + ").");
  2942. if (set_uniform.writable && _buffer_make_mutable(buffer, buffer_id)) {
  2943. // The buffer must be mutable if it's used for writing.
  2944. draw_graph.add_synchronization();
  2945. }
  2946. if (buffer->draw_tracker != nullptr) {
  2947. draw_trackers.push_back(buffer->draw_tracker);
  2948. if (set_uniform.writable) {
  2949. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE);
  2950. } else {
  2951. draw_trackers_usage.push_back(RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ);
  2952. }
  2953. } else {
  2954. untracked_usage[buffer_id] = RDG::RESOURCE_USAGE_STORAGE_BUFFER_READ;
  2955. }
  2956. driver_uniform.ids.push_back(buffer->driver_id);
  2957. _check_transfer_worker_buffer(buffer);
  2958. } break;
  2959. case UNIFORM_TYPE_INPUT_ATTACHMENT: {
  2960. ERR_FAIL_COND_V_MSG(shader->is_compute, RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") supplied for compute shader (this is not allowed).");
  2961. if (uniform.get_id_count() != (uint32_t)set_uniform.length) {
  2962. if (set_uniform.length > 1) {
  2963. ERR_FAIL_V_MSG(RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") is an array of (" + itos(set_uniform.length) + ") textures, so it should be provided equal number of texture IDs to satisfy it (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2964. } else {
  2965. ERR_FAIL_V_MSG(RID(), "InputAttachment (binding: " + itos(uniform.binding) + ") should provide one ID referencing a texture (IDs provided: " + itos(uniform.get_id_count()) + ").");
  2966. }
  2967. }
  2968. for (uint32_t j = 0; j < uniform.get_id_count(); j++) {
  2969. RID texture_id = uniform.get_id(j);
  2970. Texture *texture = texture_owner.get_or_null(texture_id);
  2971. ERR_FAIL_NULL_V_MSG(texture, RID(),
  2972. "InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") is not a valid texture.");
  2973. ERR_FAIL_COND_V_MSG(!(texture->usage_flags & TEXTURE_USAGE_SAMPLING_BIT), RID(),
  2974. "InputAttachment (binding: " + itos(uniform.binding) + ", index " + itos(j) + ") needs the TEXTURE_USAGE_SAMPLING_BIT usage flag set in order to be used as uniform.");
  2975. DEV_ASSERT(!texture->owner.is_valid() || texture_owner.get_or_null(texture->owner));
  2976. driver_uniform.ids.push_back(texture->driver_id);
  2977. _check_transfer_worker_texture(texture);
  2978. }
  2979. } break;
  2980. default: {
  2981. }
  2982. }
  2983. }
  2984. RDD::UniformSetID driver_uniform_set = driver->uniform_set_create(driver_uniforms, shader->driver_id, p_shader_set, p_linear_pool ? frame : -1);
  2985. ERR_FAIL_COND_V(!driver_uniform_set, RID());
  2986. UniformSet uniform_set;
  2987. uniform_set.driver_id = driver_uniform_set;
  2988. uniform_set.format = shader->set_formats[p_shader_set];
  2989. uniform_set.attachable_textures = attachable_textures;
  2990. uniform_set.draw_trackers = draw_trackers;
  2991. uniform_set.draw_trackers_usage = draw_trackers_usage;
  2992. uniform_set.untracked_usage = untracked_usage;
  2993. uniform_set.shared_textures_to_update = shared_textures_to_update;
  2994. uniform_set.shader_set = p_shader_set;
  2995. uniform_set.shader_id = p_shader;
  2996. RID id = uniform_set_owner.make_rid(uniform_set);
  2997. #ifdef DEV_ENABLED
  2998. set_resource_name(id, "RID:" + itos(id.get_id()));
  2999. #endif
  3000. // Add dependencies.
  3001. _add_dependency(id, p_shader);
  3002. for (uint32_t i = 0; i < uniform_count; i++) {
  3003. const Uniform &uniform = uniforms[i];
  3004. int id_count = uniform.get_id_count();
  3005. for (int j = 0; j < id_count; j++) {
  3006. _add_dependency(id, uniform.get_id(j));
  3007. }
  3008. }
  3009. return id;
  3010. }
  3011. bool RenderingDevice::uniform_set_is_valid(RID p_uniform_set) {
  3012. _THREAD_SAFE_METHOD_
  3013. return uniform_set_owner.owns(p_uniform_set);
  3014. }
  3015. void RenderingDevice::uniform_set_set_invalidation_callback(RID p_uniform_set, InvalidationCallback p_callback, void *p_userdata) {
  3016. _THREAD_SAFE_METHOD_
  3017. UniformSet *us = uniform_set_owner.get_or_null(p_uniform_set);
  3018. ERR_FAIL_NULL(us);
  3019. us->invalidated_callback = p_callback;
  3020. us->invalidated_callback_userdata = p_userdata;
  3021. }
  3022. bool RenderingDevice::uniform_sets_have_linear_pools() const {
  3023. return driver->uniform_sets_have_linear_pools();
  3024. }
  3025. /*******************/
  3026. /**** PIPELINES ****/
  3027. /*******************/
  3028. RID RenderingDevice::render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const PipelineRasterizationState &p_rasterization_state, const PipelineMultisampleState &p_multisample_state, const PipelineDepthStencilState &p_depth_stencil_state, const PipelineColorBlendState &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
  3029. // Needs a shader.
  3030. Shader *shader = shader_owner.get_or_null(p_shader);
  3031. ERR_FAIL_NULL_V(shader, RID());
  3032. ERR_FAIL_COND_V_MSG(shader->is_compute, RID(), "Compute shaders can't be used in render pipelines");
  3033. FramebufferFormat fb_format;
  3034. {
  3035. _THREAD_SAFE_METHOD_
  3036. if (p_framebuffer_format == INVALID_ID) {
  3037. // If nothing provided, use an empty one (no attachments).
  3038. p_framebuffer_format = framebuffer_format_create(Vector<AttachmentFormat>());
  3039. }
  3040. ERR_FAIL_COND_V(!framebuffer_formats.has(p_framebuffer_format), RID());
  3041. fb_format = framebuffer_formats[p_framebuffer_format];
  3042. }
  3043. // Validate shader vs. framebuffer.
  3044. {
  3045. ERR_FAIL_COND_V_MSG(p_for_render_pass >= uint32_t(fb_format.E->key().passes.size()), RID(), "Render pass requested for pipeline creation (" + itos(p_for_render_pass) + ") is out of bounds");
  3046. const FramebufferPass &pass = fb_format.E->key().passes[p_for_render_pass];
  3047. uint32_t output_mask = 0;
  3048. for (int i = 0; i < pass.color_attachments.size(); i++) {
  3049. if (pass.color_attachments[i] != ATTACHMENT_UNUSED) {
  3050. output_mask |= 1 << i;
  3051. }
  3052. }
  3053. ERR_FAIL_COND_V_MSG(shader->fragment_output_mask != output_mask, RID(),
  3054. "Mismatch fragment shader output mask (" + itos(shader->fragment_output_mask) + ") and framebuffer color output mask (" + itos(output_mask) + ") when binding both in render pipeline.");
  3055. }
  3056. RDD::VertexFormatID driver_vertex_format;
  3057. if (p_vertex_format != INVALID_ID) {
  3058. // Uses vertices, else it does not.
  3059. ERR_FAIL_COND_V(!vertex_formats.has(p_vertex_format), RID());
  3060. const VertexDescriptionCache &vd = vertex_formats[p_vertex_format];
  3061. driver_vertex_format = vertex_formats[p_vertex_format].driver_id;
  3062. // Validate with inputs.
  3063. for (uint32_t i = 0; i < 64; i++) {
  3064. if (!(shader->vertex_input_mask & ((uint64_t)1) << i)) {
  3065. continue;
  3066. }
  3067. bool found = false;
  3068. for (int j = 0; j < vd.vertex_formats.size(); j++) {
  3069. if (vd.vertex_formats[j].location == i) {
  3070. found = true;
  3071. break;
  3072. }
  3073. }
  3074. ERR_FAIL_COND_V_MSG(!found, RID(),
  3075. "Shader vertex input location (" + itos(i) + ") not provided in vertex input description for pipeline creation.");
  3076. }
  3077. } else {
  3078. ERR_FAIL_COND_V_MSG(shader->vertex_input_mask != 0, RID(),
  3079. "Shader contains vertex inputs, but no vertex input description was provided for pipeline creation.");
  3080. }
  3081. ERR_FAIL_INDEX_V(p_render_primitive, RENDER_PRIMITIVE_MAX, RID());
  3082. ERR_FAIL_INDEX_V(p_rasterization_state.cull_mode, 3, RID());
  3083. if (p_multisample_state.sample_mask.size()) {
  3084. // Use sample mask.
  3085. ERR_FAIL_COND_V((int)TEXTURE_SAMPLES_COUNT[p_multisample_state.sample_count] != p_multisample_state.sample_mask.size(), RID());
  3086. }
  3087. ERR_FAIL_INDEX_V(p_depth_stencil_state.depth_compare_operator, COMPARE_OP_MAX, RID());
  3088. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.fail, STENCIL_OP_MAX, RID());
  3089. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.pass, STENCIL_OP_MAX, RID());
  3090. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.depth_fail, STENCIL_OP_MAX, RID());
  3091. ERR_FAIL_INDEX_V(p_depth_stencil_state.front_op.compare, COMPARE_OP_MAX, RID());
  3092. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.fail, STENCIL_OP_MAX, RID());
  3093. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.pass, STENCIL_OP_MAX, RID());
  3094. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.depth_fail, STENCIL_OP_MAX, RID());
  3095. ERR_FAIL_INDEX_V(p_depth_stencil_state.back_op.compare, COMPARE_OP_MAX, RID());
  3096. ERR_FAIL_INDEX_V(p_blend_state.logic_op, LOGIC_OP_MAX, RID());
  3097. const FramebufferPass &pass = fb_format.E->key().passes[p_for_render_pass];
  3098. ERR_FAIL_COND_V(p_blend_state.attachments.size() < pass.color_attachments.size(), RID());
  3099. for (int i = 0; i < pass.color_attachments.size(); i++) {
  3100. if (pass.color_attachments[i] != ATTACHMENT_UNUSED) {
  3101. ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_color_blend_factor, BLEND_FACTOR_MAX, RID());
  3102. ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_color_blend_factor, BLEND_FACTOR_MAX, RID());
  3103. ERR_FAIL_INDEX_V(p_blend_state.attachments[i].color_blend_op, BLEND_OP_MAX, RID());
  3104. ERR_FAIL_INDEX_V(p_blend_state.attachments[i].src_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
  3105. ERR_FAIL_INDEX_V(p_blend_state.attachments[i].dst_alpha_blend_factor, BLEND_FACTOR_MAX, RID());
  3106. ERR_FAIL_INDEX_V(p_blend_state.attachments[i].alpha_blend_op, BLEND_OP_MAX, RID());
  3107. }
  3108. }
  3109. for (int i = 0; i < shader->specialization_constants.size(); i++) {
  3110. const ShaderSpecializationConstant &sc = shader->specialization_constants[i];
  3111. for (int j = 0; j < p_specialization_constants.size(); j++) {
  3112. const PipelineSpecializationConstant &psc = p_specialization_constants[j];
  3113. if (psc.constant_id == sc.constant_id) {
  3114. ERR_FAIL_COND_V_MSG(psc.type != sc.type, RID(), "Specialization constant provided for id (" + itos(sc.constant_id) + ") is of the wrong type.");
  3115. break;
  3116. }
  3117. }
  3118. }
  3119. RenderPipeline pipeline;
  3120. pipeline.driver_id = driver->render_pipeline_create(
  3121. shader->driver_id,
  3122. driver_vertex_format,
  3123. p_render_primitive,
  3124. p_rasterization_state,
  3125. p_multisample_state,
  3126. p_depth_stencil_state,
  3127. p_blend_state,
  3128. pass.color_attachments,
  3129. p_dynamic_state_flags,
  3130. fb_format.render_pass,
  3131. p_for_render_pass,
  3132. p_specialization_constants);
  3133. ERR_FAIL_COND_V(!pipeline.driver_id, RID());
  3134. if (pipeline_cache_enabled) {
  3135. _update_pipeline_cache();
  3136. }
  3137. pipeline.shader = p_shader;
  3138. pipeline.shader_driver_id = shader->driver_id;
  3139. pipeline.shader_layout_hash = shader->layout_hash;
  3140. pipeline.set_formats = shader->set_formats;
  3141. pipeline.push_constant_size = shader->push_constant_size;
  3142. pipeline.stage_bits = shader->stage_bits;
  3143. #ifdef DEBUG_ENABLED
  3144. pipeline.validation.dynamic_state = p_dynamic_state_flags;
  3145. pipeline.validation.framebuffer_format = p_framebuffer_format;
  3146. pipeline.validation.render_pass = p_for_render_pass;
  3147. pipeline.validation.vertex_format = p_vertex_format;
  3148. pipeline.validation.uses_restart_indices = p_render_primitive == RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX;
  3149. static const uint32_t primitive_divisor[RENDER_PRIMITIVE_MAX] = {
  3150. 1, 2, 1, 1, 1, 3, 1, 1, 1, 1, 1
  3151. };
  3152. pipeline.validation.primitive_divisor = primitive_divisor[p_render_primitive];
  3153. static const uint32_t primitive_minimum[RENDER_PRIMITIVE_MAX] = {
  3154. 1,
  3155. 2,
  3156. 2,
  3157. 2,
  3158. 2,
  3159. 3,
  3160. 3,
  3161. 3,
  3162. 3,
  3163. 3,
  3164. 1,
  3165. };
  3166. pipeline.validation.primitive_minimum = primitive_minimum[p_render_primitive];
  3167. #endif
  3168. // Create ID to associate with this pipeline.
  3169. RID id = render_pipeline_owner.make_rid(pipeline);
  3170. {
  3171. _THREAD_SAFE_METHOD_
  3172. #ifdef DEV_ENABLED
  3173. set_resource_name(id, "RID:" + itos(id.get_id()));
  3174. #endif
  3175. // Now add all the dependencies.
  3176. _add_dependency(id, p_shader);
  3177. }
  3178. return id;
  3179. }
  3180. bool RenderingDevice::render_pipeline_is_valid(RID p_pipeline) {
  3181. _THREAD_SAFE_METHOD_
  3182. return render_pipeline_owner.owns(p_pipeline);
  3183. }
  3184. RID RenderingDevice::compute_pipeline_create(RID p_shader, const Vector<PipelineSpecializationConstant> &p_specialization_constants) {
  3185. Shader *shader;
  3186. {
  3187. _THREAD_SAFE_METHOD_
  3188. // Needs a shader.
  3189. shader = shader_owner.get_or_null(p_shader);
  3190. ERR_FAIL_NULL_V(shader, RID());
  3191. ERR_FAIL_COND_V_MSG(!shader->is_compute, RID(),
  3192. "Non-compute shaders can't be used in compute pipelines");
  3193. }
  3194. for (int i = 0; i < shader->specialization_constants.size(); i++) {
  3195. const ShaderSpecializationConstant &sc = shader->specialization_constants[i];
  3196. for (int j = 0; j < p_specialization_constants.size(); j++) {
  3197. const PipelineSpecializationConstant &psc = p_specialization_constants[j];
  3198. if (psc.constant_id == sc.constant_id) {
  3199. ERR_FAIL_COND_V_MSG(psc.type != sc.type, RID(), "Specialization constant provided for id (" + itos(sc.constant_id) + ") is of the wrong type.");
  3200. break;
  3201. }
  3202. }
  3203. }
  3204. ComputePipeline pipeline;
  3205. pipeline.driver_id = driver->compute_pipeline_create(shader->driver_id, p_specialization_constants);
  3206. ERR_FAIL_COND_V(!pipeline.driver_id, RID());
  3207. if (pipeline_cache_enabled) {
  3208. _update_pipeline_cache();
  3209. }
  3210. pipeline.shader = p_shader;
  3211. pipeline.shader_driver_id = shader->driver_id;
  3212. pipeline.shader_layout_hash = shader->layout_hash;
  3213. pipeline.set_formats = shader->set_formats;
  3214. pipeline.push_constant_size = shader->push_constant_size;
  3215. pipeline.local_group_size[0] = shader->compute_local_size[0];
  3216. pipeline.local_group_size[1] = shader->compute_local_size[1];
  3217. pipeline.local_group_size[2] = shader->compute_local_size[2];
  3218. // Create ID to associate with this pipeline.
  3219. RID id = compute_pipeline_owner.make_rid(pipeline);
  3220. {
  3221. _THREAD_SAFE_METHOD_
  3222. #ifdef DEV_ENABLED
  3223. set_resource_name(id, "RID:" + itos(id.get_id()));
  3224. #endif
  3225. // Now add all the dependencies.
  3226. _add_dependency(id, p_shader);
  3227. }
  3228. return id;
  3229. }
  3230. bool RenderingDevice::compute_pipeline_is_valid(RID p_pipeline) {
  3231. _THREAD_SAFE_METHOD_
  3232. return compute_pipeline_owner.owns(p_pipeline);
  3233. }
  3234. /****************/
  3235. /**** SCREEN ****/
  3236. /****************/
  3237. uint32_t RenderingDevice::_get_swap_chain_desired_count() const {
  3238. return MAX(2U, uint32_t(GLOBAL_GET("rendering/rendering_device/vsync/swapchain_image_count")));
  3239. }
  3240. Error RenderingDevice::screen_create(DisplayServer::WindowID p_screen) {
  3241. _THREAD_SAFE_METHOD_
  3242. RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
  3243. ERR_FAIL_COND_V_MSG(surface == 0, ERR_CANT_CREATE, "A surface was not created for the screen.");
  3244. HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
  3245. ERR_FAIL_COND_V_MSG(it != screen_swap_chains.end(), ERR_CANT_CREATE, "A swap chain was already created for the screen.");
  3246. RDD::SwapChainID swap_chain = driver->swap_chain_create(surface);
  3247. ERR_FAIL_COND_V_MSG(swap_chain.id == 0, ERR_CANT_CREATE, "Unable to create swap chain.");
  3248. screen_swap_chains[p_screen] = swap_chain;
  3249. return OK;
  3250. }
  3251. Error RenderingDevice::screen_prepare_for_drawing(DisplayServer::WindowID p_screen) {
  3252. _THREAD_SAFE_METHOD_
  3253. // After submitting work, acquire the swapchain image(s)
  3254. HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
  3255. ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), ERR_CANT_CREATE, "A swap chain was not created for the screen.");
  3256. // Erase the framebuffer corresponding to this screen from the map in case any of the operations fail.
  3257. screen_framebuffers.erase(p_screen);
  3258. // If this frame has already queued this swap chain for presentation, we present it and remove it from the pending list.
  3259. uint32_t to_present_index = 0;
  3260. while (to_present_index < frames[frame].swap_chains_to_present.size()) {
  3261. if (frames[frame].swap_chains_to_present[to_present_index] == it->value) {
  3262. driver->command_queue_execute_and_present(present_queue, {}, {}, {}, {}, it->value);
  3263. frames[frame].swap_chains_to_present.remove_at(to_present_index);
  3264. } else {
  3265. to_present_index++;
  3266. }
  3267. }
  3268. bool resize_required = false;
  3269. RDD::FramebufferID framebuffer = driver->swap_chain_acquire_framebuffer(main_queue, it->value, resize_required);
  3270. if (resize_required) {
  3271. // Flush everything so nothing can be using the swap chain before resizing it.
  3272. _flush_and_stall_for_all_frames();
  3273. Error err = driver->swap_chain_resize(main_queue, it->value, _get_swap_chain_desired_count());
  3274. if (err != OK) {
  3275. // Resize is allowed to fail silently because the window can be minimized.
  3276. return err;
  3277. }
  3278. framebuffer = driver->swap_chain_acquire_framebuffer(main_queue, it->value, resize_required);
  3279. }
  3280. if (framebuffer.id == 0) {
  3281. // Some drivers like NVIDIA are fast enough to invalidate the swap chain between resizing and acquisition (GH-94104).
  3282. // This typically occurs during continuous window resizing operations, especially if done quickly.
  3283. // Allow this to fail silently since it has no visual consequences.
  3284. return ERR_CANT_CREATE;
  3285. }
  3286. // Store the framebuffer that will be used next to draw to this screen.
  3287. screen_framebuffers[p_screen] = framebuffer;
  3288. frames[frame].swap_chains_to_present.push_back(it->value);
  3289. return OK;
  3290. }
  3291. int RenderingDevice::screen_get_width(DisplayServer::WindowID p_screen) const {
  3292. _THREAD_SAFE_METHOD_
  3293. RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
  3294. ERR_FAIL_COND_V_MSG(surface == 0, 0, "A surface was not created for the screen.");
  3295. return context->surface_get_width(surface);
  3296. }
  3297. int RenderingDevice::screen_get_height(DisplayServer::WindowID p_screen) const {
  3298. _THREAD_SAFE_METHOD_
  3299. RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
  3300. ERR_FAIL_COND_V_MSG(surface == 0, 0, "A surface was not created for the screen.");
  3301. return context->surface_get_height(surface);
  3302. }
  3303. int RenderingDevice::screen_get_pre_rotation_degrees(DisplayServer::WindowID p_screen) const {
  3304. _THREAD_SAFE_METHOD_
  3305. HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
  3306. ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), ERR_CANT_CREATE, "A swap chain was not created for the screen.");
  3307. return driver->swap_chain_get_pre_rotation_degrees(it->value);
  3308. }
  3309. RenderingDevice::FramebufferFormatID RenderingDevice::screen_get_framebuffer_format(DisplayServer::WindowID p_screen) const {
  3310. _THREAD_SAFE_METHOD_
  3311. HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
  3312. ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), FAILED, "Screen was never prepared.");
  3313. DataFormat format = driver->swap_chain_get_format(it->value);
  3314. ERR_FAIL_COND_V(format == DATA_FORMAT_MAX, INVALID_ID);
  3315. AttachmentFormat attachment;
  3316. attachment.format = format;
  3317. attachment.samples = TEXTURE_SAMPLES_1;
  3318. attachment.usage_flags = TEXTURE_USAGE_COLOR_ATTACHMENT_BIT;
  3319. Vector<AttachmentFormat> screen_attachment;
  3320. screen_attachment.push_back(attachment);
  3321. return const_cast<RenderingDevice *>(this)->framebuffer_format_create(screen_attachment);
  3322. }
  3323. Error RenderingDevice::screen_free(DisplayServer::WindowID p_screen) {
  3324. _THREAD_SAFE_METHOD_
  3325. HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator it = screen_swap_chains.find(p_screen);
  3326. ERR_FAIL_COND_V_MSG(it == screen_swap_chains.end(), FAILED, "Screen was never created.");
  3327. // Flush everything so nothing can be using the swap chain before erasing it.
  3328. _flush_and_stall_for_all_frames();
  3329. const DisplayServer::WindowID screen = it->key;
  3330. const RDD::SwapChainID swap_chain = it->value;
  3331. driver->swap_chain_free(swap_chain);
  3332. screen_framebuffers.erase(screen);
  3333. screen_swap_chains.erase(screen);
  3334. return OK;
  3335. }
  3336. /*******************/
  3337. /**** DRAW LIST ****/
  3338. /*******************/
  3339. RenderingDevice::DrawListID RenderingDevice::draw_list_begin_for_screen(DisplayServer::WindowID p_screen, const Color &p_clear_color) {
  3340. ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
  3341. ERR_FAIL_COND_V_MSG(draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
  3342. ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
  3343. RenderingContextDriver::SurfaceID surface = context->surface_get_from_window(p_screen);
  3344. HashMap<DisplayServer::WindowID, RDD::SwapChainID>::ConstIterator sc_it = screen_swap_chains.find(p_screen);
  3345. HashMap<DisplayServer::WindowID, RDD::FramebufferID>::ConstIterator fb_it = screen_framebuffers.find(p_screen);
  3346. ERR_FAIL_COND_V_MSG(surface == 0, 0, "A surface was not created for the screen.");
  3347. ERR_FAIL_COND_V_MSG(sc_it == screen_swap_chains.end(), INVALID_ID, "Screen was never prepared.");
  3348. ERR_FAIL_COND_V_MSG(fb_it == screen_framebuffers.end(), INVALID_ID, "Framebuffer was never prepared.");
  3349. Rect2i viewport = Rect2i(0, 0, context->surface_get_width(surface), context->surface_get_height(surface));
  3350. _draw_list_allocate(viewport, 0);
  3351. #ifdef DEBUG_ENABLED
  3352. draw_list_framebuffer_format = screen_get_framebuffer_format(p_screen);
  3353. #endif
  3354. draw_list_subpass_count = 1;
  3355. RDD::RenderPassClearValue clear_value;
  3356. clear_value.color = p_clear_color;
  3357. RDD::RenderPassID render_pass = driver->swap_chain_get_render_pass(sc_it->value);
  3358. draw_graph.add_draw_list_begin(render_pass, fb_it->value, viewport, RDG::ATTACHMENT_OPERATION_CLEAR, clear_value, true, false, RDD::BreadcrumbMarker::BLIT_PASS, split_swapchain_into_its_own_cmd_buffer);
  3359. draw_graph.add_draw_list_set_viewport(viewport);
  3360. draw_graph.add_draw_list_set_scissor(viewport);
  3361. return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
  3362. }
  3363. RenderingDevice::DrawListID RenderingDevice::draw_list_begin(RID p_framebuffer, BitField<DrawFlags> p_draw_flags, const Vector<Color> &p_clear_color_values, float p_clear_depth_value, uint32_t p_clear_stencil_value, const Rect2 &p_region, uint32_t p_breadcrumb) {
  3364. ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
  3365. ERR_FAIL_COND_V_MSG(draw_list != nullptr, INVALID_ID, "Only one draw list can be active at the same time.");
  3366. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_framebuffer);
  3367. ERR_FAIL_NULL_V(framebuffer, INVALID_ID);
  3368. Point2i viewport_offset;
  3369. Point2i viewport_size = framebuffer->size;
  3370. if (p_region != Rect2() && p_region != Rect2(Vector2(), viewport_size)) { // Check custom region.
  3371. Rect2i viewport(viewport_offset, viewport_size);
  3372. Rect2i regioni = p_region;
  3373. if (!((regioni.position.x >= viewport.position.x) && (regioni.position.y >= viewport.position.y) &&
  3374. ((regioni.position.x + regioni.size.x) <= (viewport.position.x + viewport.size.x)) &&
  3375. ((regioni.position.y + regioni.size.y) <= (viewport.position.y + viewport.size.y)))) {
  3376. ERR_FAIL_V_MSG(INVALID_ID, "When supplying a custom region, it must be contained within the framebuffer rectangle");
  3377. }
  3378. viewport_offset = regioni.position;
  3379. viewport_size = regioni.size;
  3380. }
  3381. thread_local LocalVector<RDG::AttachmentOperation> operations;
  3382. thread_local LocalVector<RDD::RenderPassClearValue> clear_values;
  3383. thread_local LocalVector<RDG::ResourceTracker *> resource_trackers;
  3384. thread_local LocalVector<RDG::ResourceUsage> resource_usages;
  3385. bool uses_color = false;
  3386. bool uses_depth = false;
  3387. operations.resize(framebuffer->texture_ids.size());
  3388. clear_values.resize(framebuffer->texture_ids.size());
  3389. resource_trackers.clear();
  3390. resource_usages.clear();
  3391. uint32_t color_index = 0;
  3392. for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
  3393. RID texture_rid = framebuffer->texture_ids[i];
  3394. Texture *texture = texture_owner.get_or_null(texture_rid);
  3395. if (texture == nullptr) {
  3396. operations[i] = RDG::ATTACHMENT_OPERATION_DEFAULT;
  3397. clear_values[i] = RDD::RenderPassClearValue();
  3398. continue;
  3399. }
  3400. // Indicate the texture will get modified for the shared texture fallback.
  3401. _texture_update_shared_fallback(texture_rid, texture, true);
  3402. RDG::AttachmentOperation operation = RDG::ATTACHMENT_OPERATION_DEFAULT;
  3403. RDD::RenderPassClearValue clear_value;
  3404. if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
  3405. if (p_draw_flags.has_flag(DrawFlags(DRAW_CLEAR_COLOR_0 << color_index))) {
  3406. ERR_FAIL_COND_V_MSG(color_index >= p_clear_color_values.size(), INVALID_ID, vformat("Color texture (%d) was specified to be cleared but no color value was provided.", color_index));
  3407. operation = RDG::ATTACHMENT_OPERATION_CLEAR;
  3408. clear_value.color = p_clear_color_values[color_index];
  3409. } else if (p_draw_flags.has_flag(DrawFlags(DRAW_IGNORE_COLOR_0 << color_index))) {
  3410. operation = RDG::ATTACHMENT_OPERATION_IGNORE;
  3411. }
  3412. resource_trackers.push_back(texture->draw_tracker);
  3413. resource_usages.push_back(RDG::RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE);
  3414. uses_color = true;
  3415. color_index++;
  3416. } else if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  3417. if (p_draw_flags.has_flag(DRAW_CLEAR_DEPTH) || p_draw_flags.has_flag(DRAW_CLEAR_STENCIL)) {
  3418. operation = RDG::ATTACHMENT_OPERATION_CLEAR;
  3419. clear_value.depth = p_clear_depth_value;
  3420. clear_value.stencil = p_clear_stencil_value;
  3421. } else if (p_draw_flags.has_flag(DRAW_IGNORE_DEPTH) || p_draw_flags.has_flag(DRAW_IGNORE_STENCIL)) {
  3422. operation = RDG::ATTACHMENT_OPERATION_IGNORE;
  3423. }
  3424. resource_trackers.push_back(texture->draw_tracker);
  3425. resource_usages.push_back(RDG::RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE);
  3426. uses_depth = true;
  3427. }
  3428. operations[i] = operation;
  3429. clear_values[i] = clear_value;
  3430. }
  3431. draw_graph.add_draw_list_begin(framebuffer->framebuffer_cache, Rect2i(viewport_offset, viewport_size), operations, clear_values, uses_color, uses_depth, p_breadcrumb);
  3432. draw_graph.add_draw_list_usages(resource_trackers, resource_usages);
  3433. // Mark textures as bound.
  3434. draw_list_bound_textures.clear();
  3435. for (int i = 0; i < framebuffer->texture_ids.size(); i++) {
  3436. Texture *texture = texture_owner.get_or_null(framebuffer->texture_ids[i]);
  3437. if (texture == nullptr) {
  3438. continue;
  3439. }
  3440. texture->bound = true;
  3441. draw_list_bound_textures.push_back(framebuffer->texture_ids[i]);
  3442. }
  3443. _draw_list_allocate(Rect2i(viewport_offset, viewport_size), 0);
  3444. #ifdef DEBUG_ENABLED
  3445. draw_list_framebuffer_format = framebuffer->format_id;
  3446. #endif
  3447. draw_list_current_subpass = 0;
  3448. const FramebufferFormatKey &key = framebuffer_formats[framebuffer->format_id].E->key();
  3449. draw_list_subpass_count = key.passes.size();
  3450. Rect2i viewport_rect(viewport_offset, viewport_size);
  3451. draw_graph.add_draw_list_set_viewport(viewport_rect);
  3452. draw_graph.add_draw_list_set_scissor(viewport_rect);
  3453. return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
  3454. }
  3455. #ifndef DISABLE_DEPRECATED
  3456. Error RenderingDevice::draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, DrawListID *r_split_ids, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const Vector<RID> &p_storage_textures) {
  3457. ERR_FAIL_V_MSG(ERR_UNAVAILABLE, "Deprecated. Split draw lists are used automatically by RenderingDevice.");
  3458. }
  3459. #endif
  3460. RenderingDevice::DrawList *RenderingDevice::_get_draw_list_ptr(DrawListID p_id) {
  3461. if (p_id < 0) {
  3462. return nullptr;
  3463. }
  3464. if (!draw_list) {
  3465. return nullptr;
  3466. } else if (p_id == (int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT)) {
  3467. return draw_list;
  3468. } else {
  3469. return nullptr;
  3470. }
  3471. }
  3472. void RenderingDevice::draw_list_set_blend_constants(DrawListID p_list, const Color &p_color) {
  3473. ERR_RENDER_THREAD_GUARD();
  3474. DrawList *dl = _get_draw_list_ptr(p_list);
  3475. ERR_FAIL_NULL(dl);
  3476. #ifdef DEBUG_ENABLED
  3477. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3478. #endif
  3479. draw_graph.add_draw_list_set_blend_constants(p_color);
  3480. }
  3481. void RenderingDevice::draw_list_bind_render_pipeline(DrawListID p_list, RID p_render_pipeline) {
  3482. ERR_RENDER_THREAD_GUARD();
  3483. DrawList *dl = _get_draw_list_ptr(p_list);
  3484. ERR_FAIL_NULL(dl);
  3485. #ifdef DEBUG_ENABLED
  3486. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3487. #endif
  3488. const RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_render_pipeline);
  3489. ERR_FAIL_NULL(pipeline);
  3490. #ifdef DEBUG_ENABLED
  3491. ERR_FAIL_COND(pipeline->validation.framebuffer_format != draw_list_framebuffer_format && pipeline->validation.render_pass != draw_list_current_subpass);
  3492. #endif
  3493. if (p_render_pipeline == dl->state.pipeline) {
  3494. return; // Redundant state, return.
  3495. }
  3496. dl->state.pipeline = p_render_pipeline;
  3497. draw_graph.add_draw_list_bind_pipeline(pipeline->driver_id, pipeline->stage_bits);
  3498. if (dl->state.pipeline_shader != pipeline->shader) {
  3499. // Shader changed, so descriptor sets may become incompatible.
  3500. uint32_t pcount = pipeline->set_formats.size(); // Formats count in this pipeline.
  3501. dl->state.set_count = MAX(dl->state.set_count, pcount);
  3502. const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats.
  3503. uint32_t first_invalid_set = UINT32_MAX; // All valid by default.
  3504. if (pipeline->push_constant_size != dl->state.pipeline_push_constant_size) {
  3505. // All sets must be invalidated as the pipeline layout is not compatible if the push constant range is different.
  3506. dl->state.pipeline_push_constant_size = pipeline->push_constant_size;
  3507. first_invalid_set = 0;
  3508. } else {
  3509. switch (driver->api_trait_get(RDD::API_TRAIT_SHADER_CHANGE_INVALIDATION)) {
  3510. case RDD::SHADER_CHANGE_INVALIDATION_ALL_BOUND_UNIFORM_SETS: {
  3511. first_invalid_set = 0;
  3512. } break;
  3513. case RDD::SHADER_CHANGE_INVALIDATION_INCOMPATIBLE_SETS_PLUS_CASCADE: {
  3514. for (uint32_t i = 0; i < pcount; i++) {
  3515. if (dl->state.sets[i].pipeline_expected_format != pformats[i]) {
  3516. first_invalid_set = i;
  3517. break;
  3518. }
  3519. }
  3520. } break;
  3521. case RDD::SHADER_CHANGE_INVALIDATION_ALL_OR_NONE_ACCORDING_TO_LAYOUT_HASH: {
  3522. if (dl->state.pipeline_shader_layout_hash != pipeline->shader_layout_hash) {
  3523. first_invalid_set = 0;
  3524. }
  3525. } break;
  3526. }
  3527. }
  3528. if (pipeline->push_constant_size) {
  3529. #ifdef DEBUG_ENABLED
  3530. dl->validation.pipeline_push_constant_supplied = false;
  3531. #endif
  3532. }
  3533. for (uint32_t i = 0; i < pcount; i++) {
  3534. dl->state.sets[i].bound = dl->state.sets[i].bound && i < first_invalid_set;
  3535. dl->state.sets[i].pipeline_expected_format = pformats[i];
  3536. }
  3537. for (uint32_t i = pcount; i < dl->state.set_count; i++) {
  3538. // Unbind the ones above (not used) if exist.
  3539. dl->state.sets[i].bound = false;
  3540. }
  3541. dl->state.set_count = pcount; // Update set count.
  3542. dl->state.pipeline_shader = pipeline->shader;
  3543. dl->state.pipeline_shader_driver_id = pipeline->shader_driver_id;
  3544. dl->state.pipeline_shader_layout_hash = pipeline->shader_layout_hash;
  3545. }
  3546. #ifdef DEBUG_ENABLED
  3547. // Update render pass pipeline info.
  3548. dl->validation.pipeline_active = true;
  3549. dl->validation.pipeline_dynamic_state = pipeline->validation.dynamic_state;
  3550. dl->validation.pipeline_vertex_format = pipeline->validation.vertex_format;
  3551. dl->validation.pipeline_uses_restart_indices = pipeline->validation.uses_restart_indices;
  3552. dl->validation.pipeline_primitive_divisor = pipeline->validation.primitive_divisor;
  3553. dl->validation.pipeline_primitive_minimum = pipeline->validation.primitive_minimum;
  3554. dl->validation.pipeline_push_constant_size = pipeline->push_constant_size;
  3555. #endif
  3556. }
  3557. void RenderingDevice::draw_list_bind_uniform_set(DrawListID p_list, RID p_uniform_set, uint32_t p_index) {
  3558. ERR_RENDER_THREAD_GUARD();
  3559. #ifdef DEBUG_ENABLED
  3560. ERR_FAIL_COND_MSG(p_index >= driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS) || p_index >= MAX_UNIFORM_SETS,
  3561. "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS)) + ").");
  3562. #endif
  3563. DrawList *dl = _get_draw_list_ptr(p_list);
  3564. ERR_FAIL_NULL(dl);
  3565. #ifdef DEBUG_ENABLED
  3566. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3567. #endif
  3568. const UniformSet *uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
  3569. ERR_FAIL_NULL(uniform_set);
  3570. if (p_index > dl->state.set_count) {
  3571. dl->state.set_count = p_index;
  3572. }
  3573. dl->state.sets[p_index].uniform_set_driver_id = uniform_set->driver_id; // Update set pointer.
  3574. dl->state.sets[p_index].bound = false; // Needs rebind.
  3575. dl->state.sets[p_index].uniform_set_format = uniform_set->format;
  3576. dl->state.sets[p_index].uniform_set = p_uniform_set;
  3577. #ifdef DEBUG_ENABLED
  3578. { // Validate that textures bound are not attached as framebuffer bindings.
  3579. uint32_t attachable_count = uniform_set->attachable_textures.size();
  3580. const UniformSet::AttachableTexture *attachable_ptr = uniform_set->attachable_textures.ptr();
  3581. uint32_t bound_count = draw_list_bound_textures.size();
  3582. const RID *bound_ptr = draw_list_bound_textures.ptr();
  3583. for (uint32_t i = 0; i < attachable_count; i++) {
  3584. for (uint32_t j = 0; j < bound_count; j++) {
  3585. ERR_FAIL_COND_MSG(attachable_ptr[i].texture == bound_ptr[j],
  3586. "Attempted to use the same texture in framebuffer attachment and a uniform (set: " + itos(p_index) + ", binding: " + itos(attachable_ptr[i].bind) + "), this is not allowed.");
  3587. }
  3588. }
  3589. }
  3590. #endif
  3591. }
  3592. void RenderingDevice::draw_list_bind_vertex_array(DrawListID p_list, RID p_vertex_array) {
  3593. ERR_RENDER_THREAD_GUARD();
  3594. DrawList *dl = _get_draw_list_ptr(p_list);
  3595. ERR_FAIL_NULL(dl);
  3596. #ifdef DEBUG_ENABLED
  3597. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3598. #endif
  3599. VertexArray *vertex_array = vertex_array_owner.get_or_null(p_vertex_array);
  3600. ERR_FAIL_NULL(vertex_array);
  3601. if (dl->state.vertex_array == p_vertex_array) {
  3602. return; // Already set.
  3603. }
  3604. _check_transfer_worker_vertex_array(vertex_array);
  3605. dl->state.vertex_array = p_vertex_array;
  3606. #ifdef DEBUG_ENABLED
  3607. dl->validation.vertex_format = vertex_array->description;
  3608. dl->validation.vertex_max_instances_allowed = vertex_array->max_instances_allowed;
  3609. #endif
  3610. dl->validation.vertex_array_size = vertex_array->vertex_count;
  3611. draw_graph.add_draw_list_bind_vertex_buffers(vertex_array->buffers, vertex_array->offsets);
  3612. for (int i = 0; i < vertex_array->draw_trackers.size(); i++) {
  3613. draw_graph.add_draw_list_usage(vertex_array->draw_trackers[i], RDG::RESOURCE_USAGE_VERTEX_BUFFER_READ);
  3614. }
  3615. }
  3616. void RenderingDevice::draw_list_bind_index_array(DrawListID p_list, RID p_index_array) {
  3617. ERR_RENDER_THREAD_GUARD();
  3618. DrawList *dl = _get_draw_list_ptr(p_list);
  3619. ERR_FAIL_NULL(dl);
  3620. #ifdef DEBUG_ENABLED
  3621. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3622. #endif
  3623. IndexArray *index_array = index_array_owner.get_or_null(p_index_array);
  3624. ERR_FAIL_NULL(index_array);
  3625. if (dl->state.index_array == p_index_array) {
  3626. return; // Already set.
  3627. }
  3628. _check_transfer_worker_index_array(index_array);
  3629. dl->state.index_array = p_index_array;
  3630. #ifdef DEBUG_ENABLED
  3631. dl->validation.index_array_max_index = index_array->max_index;
  3632. #endif
  3633. dl->validation.index_array_count = index_array->indices;
  3634. const uint64_t offset_bytes = index_array->offset * (index_array->format == INDEX_BUFFER_FORMAT_UINT16 ? sizeof(uint16_t) : sizeof(uint32_t));
  3635. draw_graph.add_draw_list_bind_index_buffer(index_array->driver_id, index_array->format, offset_bytes);
  3636. if (index_array->draw_tracker != nullptr) {
  3637. draw_graph.add_draw_list_usage(index_array->draw_tracker, RDG::RESOURCE_USAGE_INDEX_BUFFER_READ);
  3638. }
  3639. }
  3640. void RenderingDevice::draw_list_set_line_width(DrawListID p_list, float p_width) {
  3641. ERR_RENDER_THREAD_GUARD();
  3642. DrawList *dl = _get_draw_list_ptr(p_list);
  3643. ERR_FAIL_NULL(dl);
  3644. #ifdef DEBUG_ENABLED
  3645. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3646. #endif
  3647. draw_graph.add_draw_list_set_line_width(p_width);
  3648. }
  3649. void RenderingDevice::draw_list_set_push_constant(DrawListID p_list, const void *p_data, uint32_t p_data_size) {
  3650. ERR_RENDER_THREAD_GUARD();
  3651. DrawList *dl = _get_draw_list_ptr(p_list);
  3652. ERR_FAIL_NULL(dl);
  3653. #ifdef DEBUG_ENABLED
  3654. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3655. #endif
  3656. #ifdef DEBUG_ENABLED
  3657. ERR_FAIL_COND_MSG(p_data_size != dl->validation.pipeline_push_constant_size,
  3658. "This render pipeline requires (" + itos(dl->validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
  3659. #endif
  3660. draw_graph.add_draw_list_set_push_constant(dl->state.pipeline_shader_driver_id, p_data, p_data_size);
  3661. #ifdef DEBUG_ENABLED
  3662. dl->validation.pipeline_push_constant_supplied = true;
  3663. #endif
  3664. }
  3665. void RenderingDevice::draw_list_draw(DrawListID p_list, bool p_use_indices, uint32_t p_instances, uint32_t p_procedural_vertices) {
  3666. ERR_RENDER_THREAD_GUARD();
  3667. DrawList *dl = _get_draw_list_ptr(p_list);
  3668. ERR_FAIL_NULL(dl);
  3669. #ifdef DEBUG_ENABLED
  3670. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3671. #endif
  3672. #ifdef DEBUG_ENABLED
  3673. ERR_FAIL_COND_MSG(!dl->validation.pipeline_active,
  3674. "No render pipeline was set before attempting to draw.");
  3675. if (dl->validation.pipeline_vertex_format != INVALID_ID) {
  3676. // Pipeline uses vertices, validate format.
  3677. ERR_FAIL_COND_MSG(dl->validation.vertex_format == INVALID_ID,
  3678. "No vertex array was bound, and render pipeline expects vertices.");
  3679. // Make sure format is right.
  3680. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != dl->validation.vertex_format,
  3681. "The vertex format used to create the pipeline does not match the vertex format bound.");
  3682. // Make sure number of instances is valid.
  3683. ERR_FAIL_COND_MSG(p_instances > dl->validation.vertex_max_instances_allowed,
  3684. "Number of instances requested (" + itos(p_instances) + " is larger than the maximum number supported by the bound vertex array (" + itos(dl->validation.vertex_max_instances_allowed) + ").");
  3685. }
  3686. if (dl->validation.pipeline_push_constant_size > 0) {
  3687. // Using push constants, check that they were supplied.
  3688. ERR_FAIL_COND_MSG(!dl->validation.pipeline_push_constant_supplied,
  3689. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  3690. }
  3691. #endif
  3692. #ifdef DEBUG_ENABLED
  3693. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  3694. if (dl->state.sets[i].pipeline_expected_format == 0) {
  3695. // Nothing expected by this pipeline.
  3696. continue;
  3697. }
  3698. if (dl->state.sets[i].pipeline_expected_format != dl->state.sets[i].uniform_set_format) {
  3699. if (dl->state.sets[i].uniform_set_format == 0) {
  3700. ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline.");
  3701. } else if (uniform_set_owner.owns(dl->state.sets[i].uniform_set)) {
  3702. UniformSet *us = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
  3703. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(dl->state.pipeline_shader));
  3704. } else {
  3705. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(dl->state.pipeline_shader));
  3706. }
  3707. }
  3708. }
  3709. #endif
  3710. thread_local LocalVector<RDD::UniformSetID> valid_descriptor_ids;
  3711. valid_descriptor_ids.clear();
  3712. valid_descriptor_ids.resize(dl->state.set_count);
  3713. uint32_t valid_set_count = 0;
  3714. uint32_t first_set_index = 0;
  3715. uint32_t last_set_index = 0;
  3716. bool found_first_set = false;
  3717. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  3718. if (dl->state.sets[i].pipeline_expected_format == 0) {
  3719. continue; // Nothing expected by this pipeline.
  3720. }
  3721. if (!dl->state.sets[i].bound && !found_first_set) {
  3722. first_set_index = i;
  3723. found_first_set = true;
  3724. }
  3725. // Prepare descriptor sets if the API doesn't use pipeline barriers.
  3726. if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
  3727. draw_graph.add_draw_list_uniform_set_prepare_for_use(dl->state.pipeline_shader_driver_id, dl->state.sets[i].uniform_set_driver_id, i);
  3728. }
  3729. }
  3730. // Bind descriptor sets.
  3731. for (uint32_t i = first_set_index; i < dl->state.set_count; i++) {
  3732. if (dl->state.sets[i].pipeline_expected_format == 0) {
  3733. continue; // Nothing expected by this pipeline.
  3734. }
  3735. if (!dl->state.sets[i].bound) {
  3736. // Batch contiguous descriptor sets in a single call
  3737. if (descriptor_set_batching) {
  3738. // All good, see if this requires re-binding.
  3739. if (i - last_set_index > 1) {
  3740. // If the descriptor sets are not contiguous, bind the previous ones and start a new batch
  3741. draw_graph.add_draw_list_bind_uniform_sets(dl->state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
  3742. first_set_index = i;
  3743. valid_set_count = 1;
  3744. valid_descriptor_ids[0] = dl->state.sets[i].uniform_set_driver_id;
  3745. } else {
  3746. // Otherwise, keep storing in the current batch
  3747. valid_descriptor_ids[valid_set_count] = dl->state.sets[i].uniform_set_driver_id;
  3748. valid_set_count++;
  3749. }
  3750. UniformSet *uniform_set = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
  3751. _uniform_set_update_shared(uniform_set);
  3752. draw_graph.add_draw_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
  3753. dl->state.sets[i].bound = true;
  3754. last_set_index = i;
  3755. } else {
  3756. draw_graph.add_draw_list_bind_uniform_set(dl->state.pipeline_shader_driver_id, dl->state.sets[i].uniform_set_driver_id, i);
  3757. }
  3758. }
  3759. }
  3760. // Bind the remaining batch
  3761. if (descriptor_set_batching && valid_set_count > 0) {
  3762. draw_graph.add_draw_list_bind_uniform_sets(dl->state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
  3763. }
  3764. if (p_use_indices) {
  3765. #ifdef DEBUG_ENABLED
  3766. ERR_FAIL_COND_MSG(p_procedural_vertices > 0,
  3767. "Procedural vertices can't be used together with indices.");
  3768. ERR_FAIL_COND_MSG(!dl->validation.index_array_count,
  3769. "Draw command requested indices, but no index buffer was set.");
  3770. ERR_FAIL_COND_MSG(dl->validation.pipeline_uses_restart_indices != dl->validation.index_buffer_uses_restart_indices,
  3771. "The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
  3772. #endif
  3773. uint32_t to_draw = dl->validation.index_array_count;
  3774. #ifdef DEBUG_ENABLED
  3775. ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
  3776. "Too few indices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
  3777. ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
  3778. "Index amount (" + itos(to_draw) + ") must be a multiple of the amount of indices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
  3779. #endif
  3780. draw_graph.add_draw_list_draw_indexed(to_draw, p_instances, 0);
  3781. } else {
  3782. uint32_t to_draw;
  3783. if (p_procedural_vertices > 0) {
  3784. #ifdef DEBUG_ENABLED
  3785. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != INVALID_ID,
  3786. "Procedural vertices requested, but pipeline expects a vertex array.");
  3787. #endif
  3788. to_draw = p_procedural_vertices;
  3789. } else {
  3790. #ifdef DEBUG_ENABLED
  3791. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format == INVALID_ID,
  3792. "Draw command lacks indices, but pipeline format does not use vertices.");
  3793. #endif
  3794. to_draw = dl->validation.vertex_array_size;
  3795. }
  3796. #ifdef DEBUG_ENABLED
  3797. ERR_FAIL_COND_MSG(to_draw < dl->validation.pipeline_primitive_minimum,
  3798. "Too few vertices (" + itos(to_draw) + ") for the render primitive set in the render pipeline (" + itos(dl->validation.pipeline_primitive_minimum) + ").");
  3799. ERR_FAIL_COND_MSG((to_draw % dl->validation.pipeline_primitive_divisor) != 0,
  3800. "Vertex amount (" + itos(to_draw) + ") must be a multiple of the amount of vertices required by the render primitive (" + itos(dl->validation.pipeline_primitive_divisor) + ").");
  3801. #endif
  3802. draw_graph.add_draw_list_draw(to_draw, p_instances);
  3803. }
  3804. dl->state.draw_count++;
  3805. }
  3806. void RenderingDevice::draw_list_draw_indirect(DrawListID p_list, bool p_use_indices, RID p_buffer, uint32_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  3807. ERR_RENDER_THREAD_GUARD();
  3808. DrawList *dl = _get_draw_list_ptr(p_list);
  3809. ERR_FAIL_NULL(dl);
  3810. Buffer *buffer = storage_buffer_owner.get_or_null(p_buffer);
  3811. ERR_FAIL_NULL(buffer);
  3812. ERR_FAIL_COND_MSG(!buffer->usage.has_flag(RDD::BUFFER_USAGE_INDIRECT_BIT), "Buffer provided was not created to do indirect dispatch.");
  3813. #ifdef DEBUG_ENABLED
  3814. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3815. #endif
  3816. #ifdef DEBUG_ENABLED
  3817. ERR_FAIL_COND_MSG(!dl->validation.pipeline_active,
  3818. "No render pipeline was set before attempting to draw.");
  3819. if (dl->validation.pipeline_vertex_format != INVALID_ID) {
  3820. // Pipeline uses vertices, validate format.
  3821. ERR_FAIL_COND_MSG(dl->validation.vertex_format == INVALID_ID,
  3822. "No vertex array was bound, and render pipeline expects vertices.");
  3823. // Make sure format is right.
  3824. ERR_FAIL_COND_MSG(dl->validation.pipeline_vertex_format != dl->validation.vertex_format,
  3825. "The vertex format used to create the pipeline does not match the vertex format bound.");
  3826. }
  3827. if (dl->validation.pipeline_push_constant_size > 0) {
  3828. // Using push constants, check that they were supplied.
  3829. ERR_FAIL_COND_MSG(!dl->validation.pipeline_push_constant_supplied,
  3830. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  3831. }
  3832. #endif
  3833. #ifdef DEBUG_ENABLED
  3834. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  3835. if (dl->state.sets[i].pipeline_expected_format == 0) {
  3836. // Nothing expected by this pipeline.
  3837. continue;
  3838. }
  3839. if (dl->state.sets[i].pipeline_expected_format != dl->state.sets[i].uniform_set_format) {
  3840. if (dl->state.sets[i].uniform_set_format == 0) {
  3841. ERR_FAIL_MSG(vformat("Uniforms were never supplied for set (%d) at the time of drawing, which are required by the pipeline.", i));
  3842. } else if (uniform_set_owner.owns(dl->state.sets[i].uniform_set)) {
  3843. UniformSet *us = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
  3844. ERR_FAIL_MSG(vformat("Uniforms supplied for set (%d):\n%s\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n%s", i, _shader_uniform_debug(us->shader_id, us->shader_set), _shader_uniform_debug(dl->state.pipeline_shader)));
  3845. } else {
  3846. ERR_FAIL_MSG(vformat("Uniforms supplied for set (%s, which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n%s", i, _shader_uniform_debug(dl->state.pipeline_shader)));
  3847. }
  3848. }
  3849. }
  3850. #endif
  3851. // Prepare descriptor sets if the API doesn't use pipeline barriers.
  3852. if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
  3853. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  3854. if (dl->state.sets[i].pipeline_expected_format == 0) {
  3855. // Nothing expected by this pipeline.
  3856. continue;
  3857. }
  3858. draw_graph.add_draw_list_uniform_set_prepare_for_use(dl->state.pipeline_shader_driver_id, dl->state.sets[i].uniform_set_driver_id, i);
  3859. }
  3860. }
  3861. // Bind descriptor sets.
  3862. for (uint32_t i = 0; i < dl->state.set_count; i++) {
  3863. if (dl->state.sets[i].pipeline_expected_format == 0) {
  3864. continue; // Nothing expected by this pipeline.
  3865. }
  3866. if (!dl->state.sets[i].bound) {
  3867. // All good, see if this requires re-binding.
  3868. draw_graph.add_draw_list_bind_uniform_set(dl->state.pipeline_shader_driver_id, dl->state.sets[i].uniform_set_driver_id, i);
  3869. UniformSet *uniform_set = uniform_set_owner.get_or_null(dl->state.sets[i].uniform_set);
  3870. _uniform_set_update_shared(uniform_set);
  3871. draw_graph.add_draw_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
  3872. dl->state.sets[i].bound = true;
  3873. }
  3874. }
  3875. if (p_use_indices) {
  3876. #ifdef DEBUG_ENABLED
  3877. ERR_FAIL_COND_MSG(!dl->validation.index_array_count,
  3878. "Draw command requested indices, but no index buffer was set.");
  3879. ERR_FAIL_COND_MSG(dl->validation.pipeline_uses_restart_indices != dl->validation.index_buffer_uses_restart_indices,
  3880. "The usage of restart indices in index buffer does not match the render primitive in the pipeline.");
  3881. #endif
  3882. ERR_FAIL_COND_MSG(p_offset + 20 > buffer->size, "Offset provided (+20) is past the end of buffer.");
  3883. draw_graph.add_draw_list_draw_indexed_indirect(buffer->driver_id, p_offset, p_draw_count, p_stride);
  3884. } else {
  3885. ERR_FAIL_COND_MSG(p_offset + 16 > buffer->size, "Offset provided (+16) is past the end of buffer.");
  3886. draw_graph.add_draw_list_draw_indirect(buffer->driver_id, p_offset, p_draw_count, p_stride);
  3887. }
  3888. dl->state.draw_count++;
  3889. if (buffer->draw_tracker != nullptr) {
  3890. draw_graph.add_draw_list_usage(buffer->draw_tracker, RDG::RESOURCE_USAGE_INDIRECT_BUFFER_READ);
  3891. }
  3892. _check_transfer_worker_buffer(buffer);
  3893. }
  3894. void RenderingDevice::draw_list_set_viewport(DrawListID p_list, const Rect2 &p_rect) {
  3895. DrawList *dl = _get_draw_list_ptr(p_list);
  3896. ERR_FAIL_NULL(dl);
  3897. #ifdef DEBUG_ENABLED
  3898. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3899. #endif
  3900. if (p_rect.get_area() == 0) {
  3901. return;
  3902. }
  3903. dl->viewport = p_rect;
  3904. draw_graph.add_draw_list_set_viewport(p_rect);
  3905. }
  3906. void RenderingDevice::draw_list_enable_scissor(DrawListID p_list, const Rect2 &p_rect) {
  3907. ERR_RENDER_THREAD_GUARD();
  3908. DrawList *dl = _get_draw_list_ptr(p_list);
  3909. ERR_FAIL_NULL(dl);
  3910. #ifdef DEBUG_ENABLED
  3911. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3912. #endif
  3913. Rect2i rect = p_rect;
  3914. rect.position += dl->viewport.position;
  3915. rect = dl->viewport.intersection(rect);
  3916. if (rect.get_area() == 0) {
  3917. return;
  3918. }
  3919. draw_graph.add_draw_list_set_scissor(rect);
  3920. }
  3921. void RenderingDevice::draw_list_disable_scissor(DrawListID p_list) {
  3922. ERR_RENDER_THREAD_GUARD();
  3923. DrawList *dl = _get_draw_list_ptr(p_list);
  3924. ERR_FAIL_NULL(dl);
  3925. #ifdef DEBUG_ENABLED
  3926. ERR_FAIL_COND_MSG(!dl->validation.active, "Submitted Draw Lists can no longer be modified.");
  3927. #endif
  3928. draw_graph.add_draw_list_set_scissor(dl->viewport);
  3929. }
  3930. uint32_t RenderingDevice::draw_list_get_current_pass() {
  3931. ERR_RENDER_THREAD_GUARD_V(0);
  3932. return draw_list_current_subpass;
  3933. }
  3934. RenderingDevice::DrawListID RenderingDevice::draw_list_switch_to_next_pass() {
  3935. ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
  3936. ERR_FAIL_NULL_V(draw_list, INVALID_ID);
  3937. ERR_FAIL_COND_V(draw_list_current_subpass >= draw_list_subpass_count - 1, INVALID_FORMAT_ID);
  3938. draw_list_current_subpass++;
  3939. Rect2i viewport;
  3940. _draw_list_free(&viewport);
  3941. draw_graph.add_draw_list_next_subpass(RDD::COMMAND_BUFFER_TYPE_PRIMARY);
  3942. _draw_list_allocate(viewport, draw_list_current_subpass);
  3943. return int64_t(ID_TYPE_DRAW_LIST) << ID_BASE_SHIFT;
  3944. }
  3945. #ifndef DISABLE_DEPRECATED
  3946. Error RenderingDevice::draw_list_switch_to_next_pass_split(uint32_t p_splits, DrawListID *r_split_ids) {
  3947. ERR_FAIL_V_MSG(ERR_UNAVAILABLE, "Deprecated. Split draw lists are used automatically by RenderingDevice.");
  3948. }
  3949. #endif
  3950. Error RenderingDevice::_draw_list_allocate(const Rect2i &p_viewport, uint32_t p_subpass) {
  3951. draw_list = memnew(DrawList);
  3952. draw_list->viewport = p_viewport;
  3953. return OK;
  3954. }
  3955. void RenderingDevice::_draw_list_free(Rect2i *r_last_viewport) {
  3956. if (r_last_viewport) {
  3957. *r_last_viewport = draw_list->viewport;
  3958. }
  3959. // Just end the list.
  3960. memdelete(draw_list);
  3961. draw_list = nullptr;
  3962. }
  3963. void RenderingDevice::draw_list_end() {
  3964. ERR_RENDER_THREAD_GUARD();
  3965. ERR_FAIL_NULL_MSG(draw_list, "Immediate draw list is already inactive.");
  3966. draw_graph.add_draw_list_end();
  3967. _draw_list_free();
  3968. for (int i = 0; i < draw_list_bound_textures.size(); i++) {
  3969. Texture *texture = texture_owner.get_or_null(draw_list_bound_textures[i]);
  3970. ERR_CONTINUE(!texture); // Wtf.
  3971. if (texture->usage_flags & TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
  3972. texture->bound = false;
  3973. }
  3974. if (texture->usage_flags & TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
  3975. texture->bound = false;
  3976. }
  3977. }
  3978. draw_list_bound_textures.clear();
  3979. }
  3980. /***********************/
  3981. /**** COMPUTE LISTS ****/
  3982. /***********************/
  3983. RenderingDevice::ComputeListID RenderingDevice::compute_list_begin() {
  3984. ERR_RENDER_THREAD_GUARD_V(INVALID_ID);
  3985. ERR_FAIL_COND_V_MSG(compute_list != nullptr, INVALID_ID, "Only one draw/compute list can be active at the same time.");
  3986. compute_list = memnew(ComputeList);
  3987. draw_graph.add_compute_list_begin();
  3988. return ID_TYPE_COMPUTE_LIST;
  3989. }
  3990. void RenderingDevice::compute_list_bind_compute_pipeline(ComputeListID p_list, RID p_compute_pipeline) {
  3991. ERR_RENDER_THREAD_GUARD();
  3992. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  3993. ERR_FAIL_NULL(compute_list);
  3994. ComputeList *cl = compute_list;
  3995. const ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_compute_pipeline);
  3996. ERR_FAIL_NULL(pipeline);
  3997. if (p_compute_pipeline == cl->state.pipeline) {
  3998. return; // Redundant state, return.
  3999. }
  4000. cl->state.pipeline = p_compute_pipeline;
  4001. draw_graph.add_compute_list_bind_pipeline(pipeline->driver_id);
  4002. if (cl->state.pipeline_shader != pipeline->shader) {
  4003. // Shader changed, so descriptor sets may become incompatible.
  4004. uint32_t pcount = pipeline->set_formats.size(); // Formats count in this pipeline.
  4005. cl->state.set_count = MAX(cl->state.set_count, pcount);
  4006. const uint32_t *pformats = pipeline->set_formats.ptr(); // Pipeline set formats.
  4007. uint32_t first_invalid_set = UINT32_MAX; // All valid by default.
  4008. switch (driver->api_trait_get(RDD::API_TRAIT_SHADER_CHANGE_INVALIDATION)) {
  4009. case RDD::SHADER_CHANGE_INVALIDATION_ALL_BOUND_UNIFORM_SETS: {
  4010. first_invalid_set = 0;
  4011. } break;
  4012. case RDD::SHADER_CHANGE_INVALIDATION_INCOMPATIBLE_SETS_PLUS_CASCADE: {
  4013. for (uint32_t i = 0; i < pcount; i++) {
  4014. if (cl->state.sets[i].pipeline_expected_format != pformats[i]) {
  4015. first_invalid_set = i;
  4016. break;
  4017. }
  4018. }
  4019. } break;
  4020. case RDD::SHADER_CHANGE_INVALIDATION_ALL_OR_NONE_ACCORDING_TO_LAYOUT_HASH: {
  4021. if (cl->state.pipeline_shader_layout_hash != pipeline->shader_layout_hash) {
  4022. first_invalid_set = 0;
  4023. }
  4024. } break;
  4025. }
  4026. for (uint32_t i = 0; i < pcount; i++) {
  4027. cl->state.sets[i].bound = cl->state.sets[i].bound && i < first_invalid_set;
  4028. cl->state.sets[i].pipeline_expected_format = pformats[i];
  4029. }
  4030. for (uint32_t i = pcount; i < cl->state.set_count; i++) {
  4031. // Unbind the ones above (not used) if exist.
  4032. cl->state.sets[i].bound = false;
  4033. }
  4034. cl->state.set_count = pcount; // Update set count.
  4035. if (pipeline->push_constant_size) {
  4036. #ifdef DEBUG_ENABLED
  4037. cl->validation.pipeline_push_constant_supplied = false;
  4038. #endif
  4039. }
  4040. cl->state.pipeline_shader = pipeline->shader;
  4041. cl->state.pipeline_shader_driver_id = pipeline->shader_driver_id;
  4042. cl->state.pipeline_shader_layout_hash = pipeline->shader_layout_hash;
  4043. cl->state.local_group_size[0] = pipeline->local_group_size[0];
  4044. cl->state.local_group_size[1] = pipeline->local_group_size[1];
  4045. cl->state.local_group_size[2] = pipeline->local_group_size[2];
  4046. }
  4047. #ifdef DEBUG_ENABLED
  4048. // Update compute pass pipeline info.
  4049. cl->validation.pipeline_active = true;
  4050. cl->validation.pipeline_push_constant_size = pipeline->push_constant_size;
  4051. #endif
  4052. }
  4053. void RenderingDevice::compute_list_bind_uniform_set(ComputeListID p_list, RID p_uniform_set, uint32_t p_index) {
  4054. ERR_RENDER_THREAD_GUARD();
  4055. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  4056. ERR_FAIL_NULL(compute_list);
  4057. ComputeList *cl = compute_list;
  4058. #ifdef DEBUG_ENABLED
  4059. ERR_FAIL_COND_MSG(p_index >= driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS) || p_index >= MAX_UNIFORM_SETS,
  4060. "Attempting to bind a descriptor set (" + itos(p_index) + ") greater than what the hardware supports (" + itos(driver->limit_get(LIMIT_MAX_BOUND_UNIFORM_SETS)) + ").");
  4061. #endif
  4062. #ifdef DEBUG_ENABLED
  4063. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  4064. #endif
  4065. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_uniform_set);
  4066. ERR_FAIL_NULL(uniform_set);
  4067. if (p_index > cl->state.set_count) {
  4068. cl->state.set_count = p_index;
  4069. }
  4070. cl->state.sets[p_index].uniform_set_driver_id = uniform_set->driver_id; // Update set pointer.
  4071. cl->state.sets[p_index].bound = false; // Needs rebind.
  4072. cl->state.sets[p_index].uniform_set_format = uniform_set->format;
  4073. cl->state.sets[p_index].uniform_set = p_uniform_set;
  4074. #if 0
  4075. { // Validate that textures bound are not attached as framebuffer bindings.
  4076. uint32_t attachable_count = uniform_set->attachable_textures.size();
  4077. const RID *attachable_ptr = uniform_set->attachable_textures.ptr();
  4078. uint32_t bound_count = draw_list_bound_textures.size();
  4079. const RID *bound_ptr = draw_list_bound_textures.ptr();
  4080. for (uint32_t i = 0; i < attachable_count; i++) {
  4081. for (uint32_t j = 0; j < bound_count; j++) {
  4082. ERR_FAIL_COND_MSG(attachable_ptr[i] == bound_ptr[j],
  4083. "Attempted to use the same texture in framebuffer attachment and a uniform set, this is not allowed.");
  4084. }
  4085. }
  4086. }
  4087. #endif
  4088. }
  4089. void RenderingDevice::compute_list_set_push_constant(ComputeListID p_list, const void *p_data, uint32_t p_data_size) {
  4090. ERR_RENDER_THREAD_GUARD();
  4091. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  4092. ERR_FAIL_NULL(compute_list);
  4093. ERR_FAIL_COND_MSG(p_data_size > MAX_PUSH_CONSTANT_SIZE, "Push constants can't be bigger than 128 bytes to maintain compatibility.");
  4094. ComputeList *cl = compute_list;
  4095. #ifdef DEBUG_ENABLED
  4096. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  4097. #endif
  4098. #ifdef DEBUG_ENABLED
  4099. ERR_FAIL_COND_MSG(p_data_size != cl->validation.pipeline_push_constant_size,
  4100. "This compute pipeline requires (" + itos(cl->validation.pipeline_push_constant_size) + ") bytes of push constant data, supplied: (" + itos(p_data_size) + ")");
  4101. #endif
  4102. draw_graph.add_compute_list_set_push_constant(cl->state.pipeline_shader_driver_id, p_data, p_data_size);
  4103. // Store it in the state in case we need to restart the compute list.
  4104. memcpy(cl->state.push_constant_data, p_data, p_data_size);
  4105. cl->state.push_constant_size = p_data_size;
  4106. #ifdef DEBUG_ENABLED
  4107. cl->validation.pipeline_push_constant_supplied = true;
  4108. #endif
  4109. }
  4110. void RenderingDevice::compute_list_dispatch(ComputeListID p_list, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  4111. ERR_RENDER_THREAD_GUARD();
  4112. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  4113. ERR_FAIL_NULL(compute_list);
  4114. ComputeList *cl = compute_list;
  4115. #ifdef DEBUG_ENABLED
  4116. ERR_FAIL_COND_MSG(p_x_groups == 0, "Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is zero.");
  4117. ERR_FAIL_COND_MSG(p_z_groups == 0, "Dispatch amount of Z compute groups (" + itos(p_z_groups) + ") is zero.");
  4118. ERR_FAIL_COND_MSG(p_y_groups == 0, "Dispatch amount of Y compute groups (" + itos(p_y_groups) + ") is zero.");
  4119. ERR_FAIL_COND_MSG(p_x_groups > driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X),
  4120. "Dispatch amount of X compute groups (" + itos(p_x_groups) + ") is larger than device limit (" + itos(driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X)) + ")");
  4121. ERR_FAIL_COND_MSG(p_y_groups > driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y),
  4122. "Dispatch amount of Y compute groups (" + itos(p_y_groups) + ") is larger than device limit (" + itos(driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y)) + ")");
  4123. ERR_FAIL_COND_MSG(p_z_groups > driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z),
  4124. "Dispatch amount of Z compute groups (" + itos(p_z_groups) + ") is larger than device limit (" + itos(driver->limit_get(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z)) + ")");
  4125. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  4126. #endif
  4127. #ifdef DEBUG_ENABLED
  4128. ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
  4129. if (cl->validation.pipeline_push_constant_size > 0) {
  4130. // Using push constants, check that they were supplied.
  4131. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied,
  4132. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  4133. }
  4134. #endif
  4135. #ifdef DEBUG_ENABLED
  4136. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  4137. if (cl->state.sets[i].pipeline_expected_format == 0) {
  4138. // Nothing expected by this pipeline.
  4139. continue;
  4140. }
  4141. if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) {
  4142. if (cl->state.sets[i].uniform_set_format == 0) {
  4143. ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline.");
  4144. } else if (uniform_set_owner.owns(cl->state.sets[i].uniform_set)) {
  4145. UniformSet *us = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  4146. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  4147. } else {
  4148. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  4149. }
  4150. }
  4151. }
  4152. #endif
  4153. thread_local LocalVector<RDD::UniformSetID> valid_descriptor_ids;
  4154. valid_descriptor_ids.clear();
  4155. valid_descriptor_ids.resize(cl->state.set_count);
  4156. uint32_t valid_set_count = 0;
  4157. uint32_t first_set_index = 0;
  4158. uint32_t last_set_index = 0;
  4159. bool found_first_set = false;
  4160. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  4161. if (cl->state.sets[i].pipeline_expected_format == 0) {
  4162. // Nothing expected by this pipeline.
  4163. continue;
  4164. }
  4165. if (!cl->state.sets[i].bound && !found_first_set) {
  4166. first_set_index = i;
  4167. found_first_set = true;
  4168. }
  4169. // Prepare descriptor sets if the API doesn't use pipeline barriers.
  4170. if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
  4171. draw_graph.add_compute_list_uniform_set_prepare_for_use(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
  4172. }
  4173. }
  4174. // Bind descriptor sets.
  4175. for (uint32_t i = first_set_index; i < cl->state.set_count; i++) {
  4176. if (cl->state.sets[i].pipeline_expected_format == 0) {
  4177. continue; // Nothing expected by this pipeline.
  4178. }
  4179. if (!cl->state.sets[i].bound) {
  4180. // Descriptor set batching
  4181. if (descriptor_set_batching) {
  4182. // All good, see if this requires re-binding.
  4183. if (i - last_set_index > 1) {
  4184. // If the descriptor sets are not contiguous, bind the previous ones and start a new batch
  4185. draw_graph.add_compute_list_bind_uniform_sets(cl->state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
  4186. first_set_index = i;
  4187. valid_set_count = 1;
  4188. valid_descriptor_ids[0] = cl->state.sets[i].uniform_set_driver_id;
  4189. } else {
  4190. // Otherwise, keep storing in the current batch
  4191. valid_descriptor_ids[valid_set_count] = cl->state.sets[i].uniform_set_driver_id;
  4192. valid_set_count++;
  4193. }
  4194. last_set_index = i;
  4195. } else {
  4196. draw_graph.add_compute_list_bind_uniform_set(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
  4197. }
  4198. UniformSet *uniform_set = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  4199. _uniform_set_update_shared(uniform_set);
  4200. draw_graph.add_compute_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
  4201. cl->state.sets[i].bound = true;
  4202. }
  4203. }
  4204. // Bind the remaining batch
  4205. if (valid_set_count > 0) {
  4206. draw_graph.add_compute_list_bind_uniform_sets(cl->state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
  4207. }
  4208. draw_graph.add_compute_list_dispatch(p_x_groups, p_y_groups, p_z_groups);
  4209. cl->state.dispatch_count++;
  4210. }
  4211. void RenderingDevice::compute_list_dispatch_threads(ComputeListID p_list, uint32_t p_x_threads, uint32_t p_y_threads, uint32_t p_z_threads) {
  4212. ERR_RENDER_THREAD_GUARD();
  4213. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  4214. ERR_FAIL_NULL(compute_list);
  4215. #ifdef DEBUG_ENABLED
  4216. ERR_FAIL_COND_MSG(p_x_threads == 0, "Dispatch amount of X compute threads (" + itos(p_x_threads) + ") is zero.");
  4217. ERR_FAIL_COND_MSG(p_y_threads == 0, "Dispatch amount of Y compute threads (" + itos(p_y_threads) + ") is zero.");
  4218. ERR_FAIL_COND_MSG(p_z_threads == 0, "Dispatch amount of Z compute threads (" + itos(p_z_threads) + ") is zero.");
  4219. #endif
  4220. ComputeList *cl = compute_list;
  4221. #ifdef DEBUG_ENABLED
  4222. ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
  4223. if (cl->validation.pipeline_push_constant_size > 0) {
  4224. // Using push constants, check that they were supplied.
  4225. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied,
  4226. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  4227. }
  4228. #endif
  4229. compute_list_dispatch(p_list, Math::division_round_up(p_x_threads, cl->state.local_group_size[0]), Math::division_round_up(p_y_threads, cl->state.local_group_size[1]), Math::division_round_up(p_z_threads, cl->state.local_group_size[2]));
  4230. }
  4231. void RenderingDevice::compute_list_dispatch_indirect(ComputeListID p_list, RID p_buffer, uint32_t p_offset) {
  4232. ERR_RENDER_THREAD_GUARD();
  4233. ERR_FAIL_COND(p_list != ID_TYPE_COMPUTE_LIST);
  4234. ERR_FAIL_NULL(compute_list);
  4235. ComputeList *cl = compute_list;
  4236. Buffer *buffer = storage_buffer_owner.get_or_null(p_buffer);
  4237. ERR_FAIL_NULL(buffer);
  4238. ERR_FAIL_COND_MSG(!buffer->usage.has_flag(RDD::BUFFER_USAGE_INDIRECT_BIT), "Buffer provided was not created to do indirect dispatch.");
  4239. ERR_FAIL_COND_MSG(p_offset + 12 > buffer->size, "Offset provided (+12) is past the end of buffer.");
  4240. #ifdef DEBUG_ENABLED
  4241. ERR_FAIL_COND_MSG(!cl->validation.active, "Submitted Compute Lists can no longer be modified.");
  4242. #endif
  4243. #ifdef DEBUG_ENABLED
  4244. ERR_FAIL_COND_MSG(!cl->validation.pipeline_active, "No compute pipeline was set before attempting to draw.");
  4245. if (cl->validation.pipeline_push_constant_size > 0) {
  4246. // Using push constants, check that they were supplied.
  4247. ERR_FAIL_COND_MSG(!cl->validation.pipeline_push_constant_supplied,
  4248. "The shader in this pipeline requires a push constant to be set before drawing, but it's not present.");
  4249. }
  4250. #endif
  4251. #ifdef DEBUG_ENABLED
  4252. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  4253. if (cl->state.sets[i].pipeline_expected_format == 0) {
  4254. // Nothing expected by this pipeline.
  4255. continue;
  4256. }
  4257. if (cl->state.sets[i].pipeline_expected_format != cl->state.sets[i].uniform_set_format) {
  4258. if (cl->state.sets[i].uniform_set_format == 0) {
  4259. ERR_FAIL_MSG("Uniforms were never supplied for set (" + itos(i) + ") at the time of drawing, which are required by the pipeline.");
  4260. } else if (uniform_set_owner.owns(cl->state.sets[i].uniform_set)) {
  4261. UniformSet *us = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  4262. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + "):\n" + _shader_uniform_debug(us->shader_id, us->shader_set) + "\nare not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  4263. } else {
  4264. ERR_FAIL_MSG("Uniforms supplied for set (" + itos(i) + ", which was just freed) are not the same format as required by the pipeline shader. Pipeline shader requires the following bindings:\n" + _shader_uniform_debug(cl->state.pipeline_shader));
  4265. }
  4266. }
  4267. }
  4268. #endif
  4269. thread_local LocalVector<RDD::UniformSetID> valid_descriptor_ids;
  4270. valid_descriptor_ids.clear();
  4271. valid_descriptor_ids.resize(cl->state.set_count);
  4272. uint32_t valid_set_count = 0;
  4273. uint32_t first_set_index = 0;
  4274. uint32_t last_set_index = 0;
  4275. bool found_first_set = false;
  4276. for (uint32_t i = 0; i < cl->state.set_count; i++) {
  4277. if (cl->state.sets[i].pipeline_expected_format == 0) {
  4278. // Nothing expected by this pipeline.
  4279. continue;
  4280. }
  4281. if (!cl->state.sets[i].bound && !found_first_set) {
  4282. first_set_index = i;
  4283. found_first_set = true;
  4284. }
  4285. // Prepare descriptor sets if the API doesn't use pipeline barriers.
  4286. if (!driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS)) {
  4287. draw_graph.add_compute_list_uniform_set_prepare_for_use(cl->state.pipeline_shader_driver_id, cl->state.sets[i].uniform_set_driver_id, i);
  4288. }
  4289. }
  4290. // Bind descriptor sets.
  4291. for (uint32_t i = first_set_index; i < cl->state.set_count; i++) {
  4292. if (cl->state.sets[i].pipeline_expected_format == 0) {
  4293. continue; // Nothing expected by this pipeline.
  4294. }
  4295. if (!cl->state.sets[i].bound) {
  4296. // All good, see if this requires re-binding.
  4297. if (i - last_set_index > 1) {
  4298. // If the descriptor sets are not contiguous, bind the previous ones and start a new batch
  4299. draw_graph.add_compute_list_bind_uniform_sets(cl->state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
  4300. first_set_index = i;
  4301. valid_set_count = 1;
  4302. valid_descriptor_ids[0] = cl->state.sets[i].uniform_set_driver_id;
  4303. } else {
  4304. // Otherwise, keep storing in the current batch
  4305. valid_descriptor_ids[valid_set_count] = cl->state.sets[i].uniform_set_driver_id;
  4306. valid_set_count++;
  4307. }
  4308. last_set_index = i;
  4309. UniformSet *uniform_set = uniform_set_owner.get_or_null(cl->state.sets[i].uniform_set);
  4310. _uniform_set_update_shared(uniform_set);
  4311. draw_graph.add_compute_list_usages(uniform_set->draw_trackers, uniform_set->draw_trackers_usage);
  4312. cl->state.sets[i].bound = true;
  4313. }
  4314. }
  4315. // Bind the remaining batch
  4316. if (valid_set_count > 0) {
  4317. draw_graph.add_compute_list_bind_uniform_sets(cl->state.pipeline_shader_driver_id, valid_descriptor_ids, first_set_index, valid_set_count);
  4318. }
  4319. draw_graph.add_compute_list_dispatch_indirect(buffer->driver_id, p_offset);
  4320. cl->state.dispatch_count++;
  4321. if (buffer->draw_tracker != nullptr) {
  4322. draw_graph.add_compute_list_usage(buffer->draw_tracker, RDG::RESOURCE_USAGE_INDIRECT_BUFFER_READ);
  4323. }
  4324. _check_transfer_worker_buffer(buffer);
  4325. }
  4326. void RenderingDevice::compute_list_add_barrier(ComputeListID p_list) {
  4327. ERR_RENDER_THREAD_GUARD();
  4328. compute_list_barrier_state = compute_list->state;
  4329. compute_list_end();
  4330. compute_list_begin();
  4331. if (compute_list_barrier_state.pipeline.is_valid()) {
  4332. compute_list_bind_compute_pipeline(p_list, compute_list_barrier_state.pipeline);
  4333. }
  4334. for (uint32_t i = 0; i < compute_list_barrier_state.set_count; i++) {
  4335. if (compute_list_barrier_state.sets[i].uniform_set.is_valid()) {
  4336. compute_list_bind_uniform_set(p_list, compute_list_barrier_state.sets[i].uniform_set, i);
  4337. }
  4338. }
  4339. if (compute_list_barrier_state.push_constant_size > 0) {
  4340. compute_list_set_push_constant(p_list, compute_list_barrier_state.push_constant_data, compute_list_barrier_state.push_constant_size);
  4341. }
  4342. }
  4343. void RenderingDevice::compute_list_end() {
  4344. ERR_RENDER_THREAD_GUARD();
  4345. ERR_FAIL_NULL(compute_list);
  4346. draw_graph.add_compute_list_end();
  4347. memdelete(compute_list);
  4348. compute_list = nullptr;
  4349. }
  4350. #ifndef DISABLE_DEPRECATED
  4351. void RenderingDevice::barrier(BitField<BarrierMask> p_from, BitField<BarrierMask> p_to) {
  4352. WARN_PRINT("Deprecated. Barriers are automatically inserted by RenderingDevice.");
  4353. }
  4354. void RenderingDevice::full_barrier() {
  4355. WARN_PRINT("Deprecated. Barriers are automatically inserted by RenderingDevice.");
  4356. }
  4357. #endif
  4358. /*************************/
  4359. /**** TRANSFER WORKER ****/
  4360. /*************************/
  4361. static uint32_t _get_alignment_offset(uint32_t p_offset, uint32_t p_required_align) {
  4362. uint32_t alignment_offset = (p_required_align > 0) ? (p_offset % p_required_align) : 0;
  4363. if (alignment_offset != 0) {
  4364. // If a particular alignment is required, add the offset as part of the required size.
  4365. alignment_offset = p_required_align - alignment_offset;
  4366. }
  4367. return alignment_offset;
  4368. }
  4369. RenderingDevice::TransferWorker *RenderingDevice::_acquire_transfer_worker(uint32_t p_transfer_size, uint32_t p_required_align, uint32_t &r_staging_offset) {
  4370. // Find the first worker that is not currently executing anything and has enough size for the transfer.
  4371. // If no workers are available, we make a new one. If we're not allowed to make new ones, we wait until one of them is available.
  4372. TransferWorker *transfer_worker = nullptr;
  4373. uint32_t available_list_index = 0;
  4374. bool transfer_worker_busy = true;
  4375. bool transfer_worker_full = true;
  4376. {
  4377. MutexLock pool_lock(transfer_worker_pool_mutex);
  4378. // If no workers are available and we've reached the max pool capacity, wait until one of them becomes available.
  4379. bool transfer_worker_pool_full = transfer_worker_pool.size() >= transfer_worker_pool_max_size;
  4380. while (transfer_worker_pool_available_list.is_empty() && transfer_worker_pool_full) {
  4381. transfer_worker_pool_condition.wait(pool_lock);
  4382. }
  4383. // Look at all available workers first.
  4384. for (uint32_t i = 0; i < transfer_worker_pool_available_list.size(); i++) {
  4385. uint32_t worker_index = transfer_worker_pool_available_list[i];
  4386. TransferWorker *candidate_worker = transfer_worker_pool[worker_index];
  4387. candidate_worker->thread_mutex.lock();
  4388. // Figure out if the worker can fit the transfer.
  4389. uint32_t alignment_offset = _get_alignment_offset(candidate_worker->staging_buffer_size_in_use, p_required_align);
  4390. uint32_t required_size = candidate_worker->staging_buffer_size_in_use + p_transfer_size + alignment_offset;
  4391. bool candidate_worker_busy = candidate_worker->submitted;
  4392. bool candidate_worker_full = required_size > candidate_worker->staging_buffer_size_allocated;
  4393. bool pick_candidate = false;
  4394. if (!candidate_worker_busy && !candidate_worker_full) {
  4395. // A worker that can fit the transfer and is not waiting for a previous execution is the best possible candidate.
  4396. pick_candidate = true;
  4397. } else if (!candidate_worker_busy) {
  4398. // The worker can't fit the transfer but it's not currently doing anything.
  4399. // We pick it as a possible candidate if the current one is busy.
  4400. pick_candidate = transfer_worker_busy;
  4401. } else if (!candidate_worker_full) {
  4402. // The worker can fit the transfer but it's currently executing previous work.
  4403. // We pick it as a possible candidate if the current one is both busy and full.
  4404. pick_candidate = transfer_worker_busy && transfer_worker_full;
  4405. } else if (transfer_worker == nullptr) {
  4406. // The worker can't fit the transfer and it's currently executing work, so it's the worst candidate.
  4407. // We only pick if no candidate has been picked yet.
  4408. pick_candidate = true;
  4409. }
  4410. if (pick_candidate) {
  4411. if (transfer_worker != nullptr) {
  4412. // Release the lock for the worker that was picked previously.
  4413. transfer_worker->thread_mutex.unlock();
  4414. }
  4415. // Keep the lock active for this worker.
  4416. transfer_worker = candidate_worker;
  4417. transfer_worker_busy = candidate_worker_busy;
  4418. transfer_worker_full = candidate_worker_full;
  4419. available_list_index = i;
  4420. if (!transfer_worker_busy && !transfer_worker_full) {
  4421. // Best possible candidate, stop searching early.
  4422. break;
  4423. }
  4424. } else {
  4425. // Release the lock for the candidate.
  4426. candidate_worker->thread_mutex.unlock();
  4427. }
  4428. }
  4429. if (transfer_worker != nullptr) {
  4430. // A worker was picked, remove it from the available list.
  4431. transfer_worker_pool_available_list.remove_at(available_list_index);
  4432. } else {
  4433. DEV_ASSERT(!transfer_worker_pool_full && "A transfer worker should never be created when the pool is full.");
  4434. // No existing worker was picked, we create a new one.
  4435. transfer_worker = memnew(TransferWorker);
  4436. transfer_worker->command_fence = driver->fence_create();
  4437. transfer_worker->command_pool = driver->command_pool_create(transfer_queue_family, RDD::COMMAND_BUFFER_TYPE_PRIMARY);
  4438. transfer_worker->command_buffer = driver->command_buffer_create(transfer_worker->command_pool);
  4439. transfer_worker->index = transfer_worker_pool.size();
  4440. transfer_worker_pool.push_back(transfer_worker);
  4441. transfer_worker_operation_used_by_draw.push_back(0);
  4442. transfer_worker->thread_mutex.lock();
  4443. }
  4444. }
  4445. if (transfer_worker->submitted) {
  4446. // Wait for the worker if the command buffer was submitted but it hasn't finished processing yet.
  4447. _wait_for_transfer_worker(transfer_worker);
  4448. }
  4449. uint32_t alignment_offset = _get_alignment_offset(transfer_worker->staging_buffer_size_in_use, p_required_align);
  4450. transfer_worker->max_transfer_size = MAX(transfer_worker->max_transfer_size, p_transfer_size);
  4451. uint32_t required_size = transfer_worker->staging_buffer_size_in_use + p_transfer_size + alignment_offset;
  4452. if (required_size > transfer_worker->staging_buffer_size_allocated) {
  4453. // If there's not enough bytes to use on the staging buffer, we submit everything pending from the worker and wait for the work to be finished.
  4454. if (transfer_worker->recording) {
  4455. _end_transfer_worker(transfer_worker);
  4456. _submit_transfer_worker(transfer_worker);
  4457. }
  4458. if (transfer_worker->submitted) {
  4459. _wait_for_transfer_worker(transfer_worker);
  4460. }
  4461. alignment_offset = 0;
  4462. // If the staging buffer can't fit the transfer, we recreate the buffer.
  4463. const uint32_t expected_buffer_size_minimum = 16 * 1024;
  4464. uint32_t expected_buffer_size = MAX(transfer_worker->max_transfer_size, expected_buffer_size_minimum);
  4465. if (expected_buffer_size > transfer_worker->staging_buffer_size_allocated) {
  4466. if (transfer_worker->staging_buffer.id != 0) {
  4467. driver->buffer_free(transfer_worker->staging_buffer);
  4468. }
  4469. uint32_t new_staging_buffer_size = next_power_of_2(expected_buffer_size);
  4470. transfer_worker->staging_buffer_size_allocated = new_staging_buffer_size;
  4471. transfer_worker->staging_buffer = driver->buffer_create(new_staging_buffer_size, RDD::BUFFER_USAGE_TRANSFER_FROM_BIT, RDD::MEMORY_ALLOCATION_TYPE_CPU);
  4472. }
  4473. }
  4474. // Add the alignment before storing the offset that will be returned.
  4475. transfer_worker->staging_buffer_size_in_use += alignment_offset;
  4476. // Store the offset to return and increment the current size.
  4477. r_staging_offset = transfer_worker->staging_buffer_size_in_use;
  4478. transfer_worker->staging_buffer_size_in_use += p_transfer_size;
  4479. if (!transfer_worker->recording) {
  4480. // Begin the command buffer if the worker wasn't recording yet.
  4481. driver->command_buffer_begin(transfer_worker->command_buffer);
  4482. transfer_worker->recording = true;
  4483. }
  4484. return transfer_worker;
  4485. }
  4486. void RenderingDevice::_release_transfer_worker(TransferWorker *p_transfer_worker) {
  4487. p_transfer_worker->thread_mutex.unlock();
  4488. transfer_worker_pool_mutex.lock();
  4489. transfer_worker_pool_available_list.push_back(p_transfer_worker->index);
  4490. transfer_worker_pool_mutex.unlock();
  4491. transfer_worker_pool_condition.notify_one();
  4492. }
  4493. void RenderingDevice::_end_transfer_worker(TransferWorker *p_transfer_worker) {
  4494. driver->command_buffer_end(p_transfer_worker->command_buffer);
  4495. p_transfer_worker->recording = false;
  4496. }
  4497. void RenderingDevice::_submit_transfer_worker(TransferWorker *p_transfer_worker, VectorView<RDD::SemaphoreID> p_signal_semaphores) {
  4498. driver->command_queue_execute_and_present(transfer_queue, {}, p_transfer_worker->command_buffer, p_signal_semaphores, p_transfer_worker->command_fence, {});
  4499. for (uint32_t i = 0; i < p_signal_semaphores.size(); i++) {
  4500. // Indicate the frame should wait on these semaphores before executing the main command buffer.
  4501. frames[frame].semaphores_to_wait_on.push_back(p_signal_semaphores[i]);
  4502. }
  4503. p_transfer_worker->submitted = true;
  4504. {
  4505. MutexLock lock(p_transfer_worker->operations_mutex);
  4506. p_transfer_worker->operations_submitted = p_transfer_worker->operations_counter;
  4507. }
  4508. }
  4509. void RenderingDevice::_wait_for_transfer_worker(TransferWorker *p_transfer_worker) {
  4510. driver->fence_wait(p_transfer_worker->command_fence);
  4511. driver->command_pool_reset(p_transfer_worker->command_pool);
  4512. p_transfer_worker->staging_buffer_size_in_use = 0;
  4513. p_transfer_worker->submitted = false;
  4514. {
  4515. MutexLock lock(p_transfer_worker->operations_mutex);
  4516. p_transfer_worker->operations_processed = p_transfer_worker->operations_submitted;
  4517. }
  4518. _flush_barriers_for_transfer_worker(p_transfer_worker);
  4519. }
  4520. void RenderingDevice::_flush_barriers_for_transfer_worker(TransferWorker *p_transfer_worker) {
  4521. // Caller must have already acquired the mutex for the worker.
  4522. if (!p_transfer_worker->texture_barriers.is_empty()) {
  4523. MutexLock transfer_worker_lock(transfer_worker_pool_texture_barriers_mutex);
  4524. for (uint32_t i = 0; i < p_transfer_worker->texture_barriers.size(); i++) {
  4525. transfer_worker_pool_texture_barriers.push_back(p_transfer_worker->texture_barriers[i]);
  4526. }
  4527. p_transfer_worker->texture_barriers.clear();
  4528. }
  4529. }
  4530. void RenderingDevice::_check_transfer_worker_operation(uint32_t p_transfer_worker_index, uint64_t p_transfer_worker_operation) {
  4531. TransferWorker *transfer_worker = transfer_worker_pool[p_transfer_worker_index];
  4532. MutexLock lock(transfer_worker->operations_mutex);
  4533. uint64_t &dst_operation = transfer_worker_operation_used_by_draw[transfer_worker->index];
  4534. dst_operation = MAX(dst_operation, p_transfer_worker_operation);
  4535. }
  4536. void RenderingDevice::_check_transfer_worker_buffer(Buffer *p_buffer) {
  4537. if (p_buffer->transfer_worker_index >= 0) {
  4538. _check_transfer_worker_operation(p_buffer->transfer_worker_index, p_buffer->transfer_worker_operation);
  4539. p_buffer->transfer_worker_index = -1;
  4540. }
  4541. }
  4542. void RenderingDevice::_check_transfer_worker_texture(Texture *p_texture) {
  4543. if (p_texture->transfer_worker_index >= 0) {
  4544. _check_transfer_worker_operation(p_texture->transfer_worker_index, p_texture->transfer_worker_operation);
  4545. p_texture->transfer_worker_index = -1;
  4546. }
  4547. }
  4548. void RenderingDevice::_check_transfer_worker_vertex_array(VertexArray *p_vertex_array) {
  4549. if (!p_vertex_array->transfer_worker_indices.is_empty()) {
  4550. for (int i = 0; i < p_vertex_array->transfer_worker_indices.size(); i++) {
  4551. _check_transfer_worker_operation(p_vertex_array->transfer_worker_indices[i], p_vertex_array->transfer_worker_operations[i]);
  4552. }
  4553. p_vertex_array->transfer_worker_indices.clear();
  4554. p_vertex_array->transfer_worker_operations.clear();
  4555. }
  4556. }
  4557. void RenderingDevice::_check_transfer_worker_index_array(IndexArray *p_index_array) {
  4558. if (p_index_array->transfer_worker_index >= 0) {
  4559. _check_transfer_worker_operation(p_index_array->transfer_worker_index, p_index_array->transfer_worker_operation);
  4560. p_index_array->transfer_worker_index = -1;
  4561. }
  4562. }
  4563. void RenderingDevice::_submit_transfer_workers(RDD::CommandBufferID p_draw_command_buffer) {
  4564. MutexLock transfer_worker_lock(transfer_worker_pool_mutex);
  4565. for (uint32_t i = 0; i < transfer_worker_pool.size(); i++) {
  4566. TransferWorker *worker = transfer_worker_pool[i];
  4567. if (p_draw_command_buffer) {
  4568. MutexLock lock(worker->operations_mutex);
  4569. if (worker->operations_processed >= transfer_worker_operation_used_by_draw[worker->index]) {
  4570. // The operation used by the draw has already been processed, we don't need to wait on the worker.
  4571. continue;
  4572. }
  4573. }
  4574. {
  4575. MutexLock lock(worker->thread_mutex);
  4576. if (worker->recording) {
  4577. VectorView<RDD::SemaphoreID> semaphores = p_draw_command_buffer ? frames[frame].transfer_worker_semaphores[i] : VectorView<RDD::SemaphoreID>();
  4578. _end_transfer_worker(worker);
  4579. _submit_transfer_worker(worker, semaphores);
  4580. }
  4581. if (p_draw_command_buffer) {
  4582. _flush_barriers_for_transfer_worker(worker);
  4583. }
  4584. }
  4585. }
  4586. }
  4587. void RenderingDevice::_submit_transfer_barriers(RDD::CommandBufferID p_draw_command_buffer) {
  4588. MutexLock transfer_worker_lock(transfer_worker_pool_texture_barriers_mutex);
  4589. if (!transfer_worker_pool_texture_barriers.is_empty()) {
  4590. driver->command_pipeline_barrier(p_draw_command_buffer, RDD::PIPELINE_STAGE_COPY_BIT, RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT, {}, {}, transfer_worker_pool_texture_barriers);
  4591. transfer_worker_pool_texture_barriers.clear();
  4592. }
  4593. }
  4594. void RenderingDevice::_wait_for_transfer_workers() {
  4595. MutexLock transfer_worker_lock(transfer_worker_pool_mutex);
  4596. for (TransferWorker *worker : transfer_worker_pool) {
  4597. MutexLock lock(worker->thread_mutex);
  4598. if (worker->submitted) {
  4599. _wait_for_transfer_worker(worker);
  4600. }
  4601. }
  4602. }
  4603. void RenderingDevice::_free_transfer_workers() {
  4604. MutexLock transfer_worker_lock(transfer_worker_pool_mutex);
  4605. for (TransferWorker *worker : transfer_worker_pool) {
  4606. driver->fence_free(worker->command_fence);
  4607. driver->buffer_free(worker->staging_buffer);
  4608. driver->command_pool_free(worker->command_pool);
  4609. memdelete(worker);
  4610. }
  4611. transfer_worker_pool.clear();
  4612. }
  4613. /***********************/
  4614. /**** COMMAND GRAPH ****/
  4615. /***********************/
  4616. bool RenderingDevice::_texture_make_mutable(Texture *p_texture, RID p_texture_id) {
  4617. if (p_texture->draw_tracker != nullptr) {
  4618. // Texture already has a tracker.
  4619. return false;
  4620. } else {
  4621. if (p_texture->owner.is_valid()) {
  4622. // Texture has an owner.
  4623. Texture *owner_texture = texture_owner.get_or_null(p_texture->owner);
  4624. ERR_FAIL_NULL_V(owner_texture, false);
  4625. if (owner_texture->draw_tracker != nullptr) {
  4626. // Create a tracker for this dependency in particular.
  4627. if (p_texture->slice_type == TEXTURE_SLICE_MAX) {
  4628. // Shared texture.
  4629. p_texture->draw_tracker = owner_texture->draw_tracker;
  4630. p_texture->draw_tracker->reference_count++;
  4631. } else {
  4632. // Slice texture.
  4633. HashMap<Rect2i, RDG::ResourceTracker *>::ConstIterator draw_tracker_iterator = owner_texture->slice_trackers.find(p_texture->slice_rect);
  4634. RDG::ResourceTracker *draw_tracker = nullptr;
  4635. if (draw_tracker_iterator != owner_texture->slice_trackers.end()) {
  4636. // Reuse the tracker at the matching rectangle.
  4637. draw_tracker = draw_tracker_iterator->value;
  4638. } else {
  4639. // Create a new tracker and store it on the map.
  4640. draw_tracker = RDG::resource_tracker_create();
  4641. draw_tracker->parent = owner_texture->draw_tracker;
  4642. draw_tracker->texture_driver_id = p_texture->driver_id;
  4643. draw_tracker->texture_size = Size2i(p_texture->width, p_texture->height);
  4644. draw_tracker->texture_subresources = p_texture->barrier_range();
  4645. draw_tracker->texture_usage = p_texture->usage_flags;
  4646. draw_tracker->texture_slice_or_dirty_rect = p_texture->slice_rect;
  4647. owner_texture->slice_trackers[p_texture->slice_rect] = draw_tracker;
  4648. }
  4649. p_texture->slice_trackers.clear();
  4650. p_texture->draw_tracker = draw_tracker;
  4651. p_texture->draw_tracker->reference_count++;
  4652. }
  4653. if (p_texture_id.is_valid()) {
  4654. _dependencies_make_mutable(p_texture_id, p_texture->draw_tracker);
  4655. }
  4656. } else {
  4657. // Delegate this to the owner instead, as it'll make all its dependencies mutable.
  4658. _texture_make_mutable(owner_texture, p_texture->owner);
  4659. }
  4660. } else {
  4661. // Regular texture.
  4662. p_texture->draw_tracker = RDG::resource_tracker_create();
  4663. p_texture->draw_tracker->texture_driver_id = p_texture->driver_id;
  4664. p_texture->draw_tracker->texture_size = Size2i(p_texture->width, p_texture->height);
  4665. p_texture->draw_tracker->texture_subresources = p_texture->barrier_range();
  4666. p_texture->draw_tracker->texture_usage = p_texture->usage_flags;
  4667. p_texture->draw_tracker->is_discardable = p_texture->is_discardable;
  4668. p_texture->draw_tracker->reference_count = 1;
  4669. if (p_texture_id.is_valid()) {
  4670. if (p_texture->has_initial_data) {
  4671. // If the texture was initialized with initial data but wasn't made mutable from the start, assume the texture sampling usage.
  4672. p_texture->draw_tracker->usage = RDG::RESOURCE_USAGE_TEXTURE_SAMPLE;
  4673. }
  4674. _dependencies_make_mutable(p_texture_id, p_texture->draw_tracker);
  4675. }
  4676. }
  4677. return true;
  4678. }
  4679. }
  4680. bool RenderingDevice::_buffer_make_mutable(Buffer *p_buffer, RID p_buffer_id) {
  4681. if (p_buffer->draw_tracker != nullptr) {
  4682. // Buffer already has a tracker.
  4683. return false;
  4684. } else {
  4685. // Create a tracker for the buffer and make all its dependencies mutable.
  4686. p_buffer->draw_tracker = RDG::resource_tracker_create();
  4687. p_buffer->draw_tracker->buffer_driver_id = p_buffer->driver_id;
  4688. if (p_buffer_id.is_valid()) {
  4689. _dependencies_make_mutable(p_buffer_id, p_buffer->draw_tracker);
  4690. }
  4691. return true;
  4692. }
  4693. }
  4694. bool RenderingDevice::_vertex_array_make_mutable(VertexArray *p_vertex_array, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
  4695. if (!p_vertex_array->untracked_buffers.has(p_resource_id)) {
  4696. // Vertex array thinks the buffer is already tracked or does not use it.
  4697. return false;
  4698. } else {
  4699. // Vertex array is aware of the buffer but it isn't being tracked.
  4700. p_vertex_array->draw_trackers.push_back(p_resource_tracker);
  4701. p_vertex_array->untracked_buffers.erase(p_resource_id);
  4702. return true;
  4703. }
  4704. }
  4705. bool RenderingDevice::_index_array_make_mutable(IndexArray *p_index_array, RDG::ResourceTracker *p_resource_tracker) {
  4706. if (p_index_array->draw_tracker != nullptr) {
  4707. // Index array already has a tracker.
  4708. return false;
  4709. } else {
  4710. // Index array should assign the tracker from the buffer.
  4711. p_index_array->draw_tracker = p_resource_tracker;
  4712. return true;
  4713. }
  4714. }
  4715. bool RenderingDevice::_uniform_set_make_mutable(UniformSet *p_uniform_set, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
  4716. HashMap<RID, RDG::ResourceUsage>::Iterator E = p_uniform_set->untracked_usage.find(p_resource_id);
  4717. if (!E) {
  4718. // Uniform set thinks the resource is already tracked or does not use it.
  4719. return false;
  4720. } else {
  4721. // Uniform set has seen the resource but hasn't added its tracker yet.
  4722. p_uniform_set->draw_trackers.push_back(p_resource_tracker);
  4723. p_uniform_set->draw_trackers_usage.push_back(E->value);
  4724. p_uniform_set->untracked_usage.remove(E);
  4725. return true;
  4726. }
  4727. }
  4728. bool RenderingDevice::_dependency_make_mutable(RID p_id, RID p_resource_id, RDG::ResourceTracker *p_resource_tracker) {
  4729. if (texture_owner.owns(p_id)) {
  4730. Texture *texture = texture_owner.get_or_null(p_id);
  4731. return _texture_make_mutable(texture, p_id);
  4732. } else if (vertex_array_owner.owns(p_id)) {
  4733. VertexArray *vertex_array = vertex_array_owner.get_or_null(p_id);
  4734. return _vertex_array_make_mutable(vertex_array, p_resource_id, p_resource_tracker);
  4735. } else if (index_array_owner.owns(p_id)) {
  4736. IndexArray *index_array = index_array_owner.get_or_null(p_id);
  4737. return _index_array_make_mutable(index_array, p_resource_tracker);
  4738. } else if (uniform_set_owner.owns(p_id)) {
  4739. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
  4740. return _uniform_set_make_mutable(uniform_set, p_resource_id, p_resource_tracker);
  4741. } else {
  4742. DEV_ASSERT(false && "Unknown resource type to make mutable.");
  4743. return false;
  4744. }
  4745. }
  4746. bool RenderingDevice::_dependencies_make_mutable_recursive(RID p_id, RDG::ResourceTracker *p_resource_tracker) {
  4747. bool made_mutable = false;
  4748. HashMap<RID, HashSet<RID>>::Iterator E = dependency_map.find(p_id);
  4749. if (E) {
  4750. for (RID rid : E->value) {
  4751. made_mutable = _dependency_make_mutable(rid, p_id, p_resource_tracker) || made_mutable;
  4752. }
  4753. }
  4754. return made_mutable;
  4755. }
  4756. bool RenderingDevice::_dependencies_make_mutable(RID p_id, RDG::ResourceTracker *p_resource_tracker) {
  4757. _THREAD_SAFE_METHOD_
  4758. return _dependencies_make_mutable_recursive(p_id, p_resource_tracker);
  4759. }
  4760. /**************************/
  4761. /**** FRAME MANAGEMENT ****/
  4762. /**************************/
  4763. void RenderingDevice::free(RID p_id) {
  4764. ERR_RENDER_THREAD_GUARD();
  4765. _free_dependencies(p_id); // Recursively erase dependencies first, to avoid potential API problems.
  4766. _free_internal(p_id);
  4767. }
  4768. void RenderingDevice::_free_internal(RID p_id) {
  4769. #ifdef DEV_ENABLED
  4770. String resource_name;
  4771. if (resource_names.has(p_id)) {
  4772. resource_name = resource_names[p_id];
  4773. resource_names.erase(p_id);
  4774. }
  4775. #endif
  4776. // Push everything so it's disposed of next time this frame index is processed (means, it's safe to do it).
  4777. if (texture_owner.owns(p_id)) {
  4778. Texture *texture = texture_owner.get_or_null(p_id);
  4779. _check_transfer_worker_texture(texture);
  4780. RDG::ResourceTracker *draw_tracker = texture->draw_tracker;
  4781. if (draw_tracker != nullptr) {
  4782. draw_tracker->reference_count--;
  4783. if (draw_tracker->reference_count == 0) {
  4784. RDG::resource_tracker_free(draw_tracker);
  4785. if (texture->owner.is_valid() && (texture->slice_type != TEXTURE_SLICE_MAX)) {
  4786. // If this was a texture slice, erase the tracker from the map.
  4787. Texture *owner_texture = texture_owner.get_or_null(texture->owner);
  4788. if (owner_texture != nullptr) {
  4789. owner_texture->slice_trackers.erase(texture->slice_rect);
  4790. }
  4791. }
  4792. }
  4793. }
  4794. frames[frame].textures_to_dispose_of.push_back(*texture);
  4795. texture_owner.free(p_id);
  4796. } else if (framebuffer_owner.owns(p_id)) {
  4797. Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id);
  4798. frames[frame].framebuffers_to_dispose_of.push_back(*framebuffer);
  4799. if (framebuffer->invalidated_callback != nullptr) {
  4800. framebuffer->invalidated_callback(framebuffer->invalidated_callback_userdata);
  4801. }
  4802. framebuffer_owner.free(p_id);
  4803. } else if (sampler_owner.owns(p_id)) {
  4804. RDD::SamplerID sampler_driver_id = *sampler_owner.get_or_null(p_id);
  4805. frames[frame].samplers_to_dispose_of.push_back(sampler_driver_id);
  4806. sampler_owner.free(p_id);
  4807. } else if (vertex_buffer_owner.owns(p_id)) {
  4808. Buffer *vertex_buffer = vertex_buffer_owner.get_or_null(p_id);
  4809. _check_transfer_worker_buffer(vertex_buffer);
  4810. RDG::resource_tracker_free(vertex_buffer->draw_tracker);
  4811. frames[frame].buffers_to_dispose_of.push_back(*vertex_buffer);
  4812. vertex_buffer_owner.free(p_id);
  4813. } else if (vertex_array_owner.owns(p_id)) {
  4814. vertex_array_owner.free(p_id);
  4815. } else if (index_buffer_owner.owns(p_id)) {
  4816. IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_id);
  4817. _check_transfer_worker_buffer(index_buffer);
  4818. RDG::resource_tracker_free(index_buffer->draw_tracker);
  4819. frames[frame].buffers_to_dispose_of.push_back(*index_buffer);
  4820. index_buffer_owner.free(p_id);
  4821. } else if (index_array_owner.owns(p_id)) {
  4822. index_array_owner.free(p_id);
  4823. } else if (shader_owner.owns(p_id)) {
  4824. Shader *shader = shader_owner.get_or_null(p_id);
  4825. if (shader->driver_id) { // Not placeholder?
  4826. frames[frame].shaders_to_dispose_of.push_back(*shader);
  4827. }
  4828. shader_owner.free(p_id);
  4829. } else if (uniform_buffer_owner.owns(p_id)) {
  4830. Buffer *uniform_buffer = uniform_buffer_owner.get_or_null(p_id);
  4831. _check_transfer_worker_buffer(uniform_buffer);
  4832. RDG::resource_tracker_free(uniform_buffer->draw_tracker);
  4833. frames[frame].buffers_to_dispose_of.push_back(*uniform_buffer);
  4834. uniform_buffer_owner.free(p_id);
  4835. } else if (texture_buffer_owner.owns(p_id)) {
  4836. Buffer *texture_buffer = texture_buffer_owner.get_or_null(p_id);
  4837. _check_transfer_worker_buffer(texture_buffer);
  4838. RDG::resource_tracker_free(texture_buffer->draw_tracker);
  4839. frames[frame].buffers_to_dispose_of.push_back(*texture_buffer);
  4840. texture_buffer_owner.free(p_id);
  4841. } else if (storage_buffer_owner.owns(p_id)) {
  4842. Buffer *storage_buffer = storage_buffer_owner.get_or_null(p_id);
  4843. _check_transfer_worker_buffer(storage_buffer);
  4844. RDG::resource_tracker_free(storage_buffer->draw_tracker);
  4845. frames[frame].buffers_to_dispose_of.push_back(*storage_buffer);
  4846. storage_buffer_owner.free(p_id);
  4847. } else if (uniform_set_owner.owns(p_id)) {
  4848. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
  4849. frames[frame].uniform_sets_to_dispose_of.push_back(*uniform_set);
  4850. uniform_set_owner.free(p_id);
  4851. if (uniform_set->invalidated_callback != nullptr) {
  4852. uniform_set->invalidated_callback(uniform_set->invalidated_callback_userdata);
  4853. }
  4854. } else if (render_pipeline_owner.owns(p_id)) {
  4855. RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_id);
  4856. frames[frame].render_pipelines_to_dispose_of.push_back(*pipeline);
  4857. render_pipeline_owner.free(p_id);
  4858. } else if (compute_pipeline_owner.owns(p_id)) {
  4859. ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_id);
  4860. frames[frame].compute_pipelines_to_dispose_of.push_back(*pipeline);
  4861. compute_pipeline_owner.free(p_id);
  4862. } else {
  4863. #ifdef DEV_ENABLED
  4864. ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()) + " " + resource_name);
  4865. #else
  4866. ERR_PRINT("Attempted to free invalid ID: " + itos(p_id.get_id()));
  4867. #endif
  4868. }
  4869. frames_pending_resources_for_processing = uint32_t(frames.size());
  4870. }
  4871. // The full list of resources that can be named is in the VkObjectType enum.
  4872. // We just expose the resources that are owned and can be accessed easily.
  4873. void RenderingDevice::set_resource_name(RID p_id, const String &p_name) {
  4874. _THREAD_SAFE_METHOD_
  4875. if (texture_owner.owns(p_id)) {
  4876. Texture *texture = texture_owner.get_or_null(p_id);
  4877. driver->set_object_name(RDD::OBJECT_TYPE_TEXTURE, texture->driver_id, p_name);
  4878. } else if (framebuffer_owner.owns(p_id)) {
  4879. //Framebuffer *framebuffer = framebuffer_owner.get_or_null(p_id);
  4880. // Not implemented for now as the relationship between Framebuffer and RenderPass is very complex.
  4881. } else if (sampler_owner.owns(p_id)) {
  4882. RDD::SamplerID sampler_driver_id = *sampler_owner.get_or_null(p_id);
  4883. driver->set_object_name(RDD::OBJECT_TYPE_SAMPLER, sampler_driver_id, p_name);
  4884. } else if (vertex_buffer_owner.owns(p_id)) {
  4885. Buffer *vertex_buffer = vertex_buffer_owner.get_or_null(p_id);
  4886. driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, vertex_buffer->driver_id, p_name);
  4887. } else if (index_buffer_owner.owns(p_id)) {
  4888. IndexBuffer *index_buffer = index_buffer_owner.get_or_null(p_id);
  4889. driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, index_buffer->driver_id, p_name);
  4890. } else if (shader_owner.owns(p_id)) {
  4891. Shader *shader = shader_owner.get_or_null(p_id);
  4892. driver->set_object_name(RDD::OBJECT_TYPE_SHADER, shader->driver_id, p_name);
  4893. } else if (uniform_buffer_owner.owns(p_id)) {
  4894. Buffer *uniform_buffer = uniform_buffer_owner.get_or_null(p_id);
  4895. driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, uniform_buffer->driver_id, p_name);
  4896. } else if (texture_buffer_owner.owns(p_id)) {
  4897. Buffer *texture_buffer = texture_buffer_owner.get_or_null(p_id);
  4898. driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, texture_buffer->driver_id, p_name);
  4899. } else if (storage_buffer_owner.owns(p_id)) {
  4900. Buffer *storage_buffer = storage_buffer_owner.get_or_null(p_id);
  4901. driver->set_object_name(RDD::OBJECT_TYPE_BUFFER, storage_buffer->driver_id, p_name);
  4902. } else if (uniform_set_owner.owns(p_id)) {
  4903. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_id);
  4904. driver->set_object_name(RDD::OBJECT_TYPE_UNIFORM_SET, uniform_set->driver_id, p_name);
  4905. } else if (render_pipeline_owner.owns(p_id)) {
  4906. RenderPipeline *pipeline = render_pipeline_owner.get_or_null(p_id);
  4907. driver->set_object_name(RDD::OBJECT_TYPE_PIPELINE, pipeline->driver_id, p_name);
  4908. } else if (compute_pipeline_owner.owns(p_id)) {
  4909. ComputePipeline *pipeline = compute_pipeline_owner.get_or_null(p_id);
  4910. driver->set_object_name(RDD::OBJECT_TYPE_PIPELINE, pipeline->driver_id, p_name);
  4911. } else {
  4912. ERR_PRINT("Attempted to name invalid ID: " + itos(p_id.get_id()));
  4913. return;
  4914. }
  4915. #ifdef DEV_ENABLED
  4916. resource_names[p_id] = p_name;
  4917. #endif
  4918. }
  4919. void RenderingDevice::draw_command_begin_label(String p_label_name, const Color &p_color) {
  4920. ERR_RENDER_THREAD_GUARD();
  4921. if (!context->is_debug_utils_enabled()) {
  4922. return;
  4923. }
  4924. draw_graph.begin_label(p_label_name, p_color);
  4925. }
  4926. #ifndef DISABLE_DEPRECATED
  4927. void RenderingDevice::draw_command_insert_label(String p_label_name, const Color &p_color) {
  4928. WARN_PRINT("Deprecated. Inserting labels no longer applies due to command reordering.");
  4929. }
  4930. #endif
  4931. void RenderingDevice::draw_command_end_label() {
  4932. ERR_RENDER_THREAD_GUARD();
  4933. draw_graph.end_label();
  4934. }
  4935. String RenderingDevice::get_device_vendor_name() const {
  4936. return _get_device_vendor_name(device);
  4937. }
  4938. String RenderingDevice::get_device_name() const {
  4939. return device.name;
  4940. }
  4941. RenderingDevice::DeviceType RenderingDevice::get_device_type() const {
  4942. return DeviceType(device.type);
  4943. }
  4944. String RenderingDevice::get_device_api_name() const {
  4945. return driver->get_api_name();
  4946. }
  4947. bool RenderingDevice::is_composite_alpha_supported() const {
  4948. return driver->is_composite_alpha_supported(main_queue);
  4949. }
  4950. String RenderingDevice::get_device_api_version() const {
  4951. return driver->get_api_version();
  4952. }
  4953. String RenderingDevice::get_device_pipeline_cache_uuid() const {
  4954. return driver->get_pipeline_cache_uuid();
  4955. }
  4956. void RenderingDevice::swap_buffers(bool p_present) {
  4957. ERR_RENDER_THREAD_GUARD();
  4958. _end_frame();
  4959. _execute_frame(p_present);
  4960. // Advance to the next frame and begin recording again.
  4961. frame = (frame + 1) % frames.size();
  4962. _begin_frame(true);
  4963. }
  4964. void RenderingDevice::submit() {
  4965. ERR_RENDER_THREAD_GUARD();
  4966. ERR_FAIL_COND_MSG(is_main_instance, "Only local devices can submit and sync.");
  4967. ERR_FAIL_COND_MSG(local_device_processing, "device already submitted, call sync to wait until done.");
  4968. _end_frame();
  4969. _execute_frame(false);
  4970. local_device_processing = true;
  4971. }
  4972. void RenderingDevice::sync() {
  4973. ERR_RENDER_THREAD_GUARD();
  4974. ERR_FAIL_COND_MSG(is_main_instance, "Only local devices can submit and sync.");
  4975. ERR_FAIL_COND_MSG(!local_device_processing, "sync can only be called after a submit");
  4976. _begin_frame(true);
  4977. local_device_processing = false;
  4978. }
  4979. void RenderingDevice::_free_pending_resources(int p_frame) {
  4980. // Free in dependency usage order, so nothing weird happens.
  4981. // Pipelines.
  4982. while (frames[p_frame].render_pipelines_to_dispose_of.front()) {
  4983. RenderPipeline *pipeline = &frames[p_frame].render_pipelines_to_dispose_of.front()->get();
  4984. driver->pipeline_free(pipeline->driver_id);
  4985. frames[p_frame].render_pipelines_to_dispose_of.pop_front();
  4986. }
  4987. while (frames[p_frame].compute_pipelines_to_dispose_of.front()) {
  4988. ComputePipeline *pipeline = &frames[p_frame].compute_pipelines_to_dispose_of.front()->get();
  4989. driver->pipeline_free(pipeline->driver_id);
  4990. frames[p_frame].compute_pipelines_to_dispose_of.pop_front();
  4991. }
  4992. // Uniform sets.
  4993. while (frames[p_frame].uniform_sets_to_dispose_of.front()) {
  4994. UniformSet *uniform_set = &frames[p_frame].uniform_sets_to_dispose_of.front()->get();
  4995. driver->uniform_set_free(uniform_set->driver_id);
  4996. frames[p_frame].uniform_sets_to_dispose_of.pop_front();
  4997. }
  4998. // Shaders.
  4999. while (frames[p_frame].shaders_to_dispose_of.front()) {
  5000. Shader *shader = &frames[p_frame].shaders_to_dispose_of.front()->get();
  5001. driver->shader_free(shader->driver_id);
  5002. frames[p_frame].shaders_to_dispose_of.pop_front();
  5003. }
  5004. // Samplers.
  5005. while (frames[p_frame].samplers_to_dispose_of.front()) {
  5006. RDD::SamplerID sampler = frames[p_frame].samplers_to_dispose_of.front()->get();
  5007. driver->sampler_free(sampler);
  5008. frames[p_frame].samplers_to_dispose_of.pop_front();
  5009. }
  5010. // Framebuffers.
  5011. while (frames[p_frame].framebuffers_to_dispose_of.front()) {
  5012. Framebuffer *framebuffer = &frames[p_frame].framebuffers_to_dispose_of.front()->get();
  5013. draw_graph.framebuffer_cache_free(driver, framebuffer->framebuffer_cache);
  5014. frames[p_frame].framebuffers_to_dispose_of.pop_front();
  5015. }
  5016. // Textures.
  5017. while (frames[p_frame].textures_to_dispose_of.front()) {
  5018. Texture *texture = &frames[p_frame].textures_to_dispose_of.front()->get();
  5019. if (texture->bound) {
  5020. WARN_PRINT("Deleted a texture while it was bound.");
  5021. }
  5022. _texture_free_shared_fallback(texture);
  5023. texture_memory -= driver->texture_get_allocation_size(texture->driver_id);
  5024. driver->texture_free(texture->driver_id);
  5025. frames[p_frame].textures_to_dispose_of.pop_front();
  5026. }
  5027. // Buffers.
  5028. while (frames[p_frame].buffers_to_dispose_of.front()) {
  5029. Buffer &buffer = frames[p_frame].buffers_to_dispose_of.front()->get();
  5030. driver->buffer_free(buffer.driver_id);
  5031. buffer_memory -= buffer.size;
  5032. frames[p_frame].buffers_to_dispose_of.pop_front();
  5033. }
  5034. if (frames_pending_resources_for_processing > 0u) {
  5035. --frames_pending_resources_for_processing;
  5036. }
  5037. }
  5038. uint32_t RenderingDevice::get_frame_delay() const {
  5039. return frames.size();
  5040. }
  5041. uint64_t RenderingDevice::get_memory_usage(MemoryType p_type) const {
  5042. switch (p_type) {
  5043. case MEMORY_BUFFERS: {
  5044. return buffer_memory;
  5045. }
  5046. case MEMORY_TEXTURES: {
  5047. return texture_memory;
  5048. }
  5049. case MEMORY_TOTAL: {
  5050. return driver->get_total_memory_used();
  5051. }
  5052. default: {
  5053. DEV_ASSERT(false);
  5054. return 0;
  5055. }
  5056. }
  5057. }
  5058. void RenderingDevice::_begin_frame(bool p_presented) {
  5059. // Before writing to this frame, wait for it to be finished.
  5060. _stall_for_frame(frame);
  5061. if (command_pool_reset_enabled) {
  5062. bool reset = driver->command_pool_reset(frames[frame].command_pool);
  5063. ERR_FAIL_COND(!reset);
  5064. }
  5065. if (p_presented) {
  5066. update_perf_report();
  5067. driver->linear_uniform_set_pools_reset(frame);
  5068. }
  5069. // Begin recording on the frame's command buffers.
  5070. driver->begin_segment(frame, frames_drawn++);
  5071. driver->command_buffer_begin(frames[frame].command_buffer);
  5072. // Reset the graph.
  5073. draw_graph.begin();
  5074. // Erase pending resources.
  5075. _free_pending_resources(frame);
  5076. // Advance staging buffers if used.
  5077. if (upload_staging_buffers.used) {
  5078. upload_staging_buffers.current = (upload_staging_buffers.current + 1) % upload_staging_buffers.blocks.size();
  5079. upload_staging_buffers.used = false;
  5080. }
  5081. if (download_staging_buffers.used) {
  5082. download_staging_buffers.current = (download_staging_buffers.current + 1) % download_staging_buffers.blocks.size();
  5083. download_staging_buffers.used = false;
  5084. }
  5085. if (frames[frame].timestamp_count) {
  5086. driver->timestamp_query_pool_get_results(frames[frame].timestamp_pool, frames[frame].timestamp_count, frames[frame].timestamp_result_values.ptr());
  5087. driver->command_timestamp_query_pool_reset(frames[frame].command_buffer, frames[frame].timestamp_pool, frames[frame].timestamp_count);
  5088. SWAP(frames[frame].timestamp_names, frames[frame].timestamp_result_names);
  5089. SWAP(frames[frame].timestamp_cpu_values, frames[frame].timestamp_cpu_result_values);
  5090. }
  5091. frames[frame].timestamp_result_count = frames[frame].timestamp_count;
  5092. frames[frame].timestamp_count = 0;
  5093. frames[frame].index = Engine::get_singleton()->get_frames_drawn();
  5094. }
  5095. void RenderingDevice::_end_frame() {
  5096. if (draw_list) {
  5097. ERR_PRINT("Found open draw list at the end of the frame, this should never happen (further drawing will likely not work).");
  5098. }
  5099. if (compute_list) {
  5100. ERR_PRINT("Found open compute list at the end of the frame, this should never happen (further compute will likely not work).");
  5101. }
  5102. // The command buffer must be copied into a stack variable as the driver workarounds can change the command buffer in use.
  5103. RDD::CommandBufferID command_buffer = frames[frame].command_buffer;
  5104. _submit_transfer_workers(command_buffer);
  5105. _submit_transfer_barriers(command_buffer);
  5106. draw_graph.end(RENDER_GRAPH_REORDER, RENDER_GRAPH_FULL_BARRIERS, command_buffer, frames[frame].command_buffer_pool);
  5107. driver->command_buffer_end(command_buffer);
  5108. driver->end_segment();
  5109. }
  5110. void RenderingDevice::execute_chained_cmds(bool p_present_swap_chain, RenderingDeviceDriver::FenceID p_draw_fence,
  5111. RenderingDeviceDriver::SemaphoreID p_dst_draw_semaphore_to_signal) {
  5112. // Execute command buffers and use semaphores to wait on the execution of the previous one.
  5113. // Normally there's only one command buffer, but driver workarounds can force situations where
  5114. // there'll be more.
  5115. uint32_t command_buffer_count = 1;
  5116. RDG::CommandBufferPool &buffer_pool = frames[frame].command_buffer_pool;
  5117. if (buffer_pool.buffers_used > 0) {
  5118. command_buffer_count += buffer_pool.buffers_used;
  5119. buffer_pool.buffers_used = 0;
  5120. }
  5121. thread_local LocalVector<RDD::SwapChainID> swap_chains;
  5122. swap_chains.clear();
  5123. // Instead of having just one command; we have potentially many (which had to be split due to an
  5124. // Adreno workaround on mobile, only if the workaround is active). Thus we must execute all of them
  5125. // and chain them together via semaphores as dependent executions.
  5126. thread_local LocalVector<RDD::SemaphoreID> wait_semaphores;
  5127. wait_semaphores = frames[frame].semaphores_to_wait_on;
  5128. for (uint32_t i = 0; i < command_buffer_count; i++) {
  5129. RDD::CommandBufferID command_buffer;
  5130. RDD::SemaphoreID signal_semaphore;
  5131. RDD::FenceID signal_fence;
  5132. if (i > 0) {
  5133. command_buffer = buffer_pool.buffers[i - 1];
  5134. } else {
  5135. command_buffer = frames[frame].command_buffer;
  5136. }
  5137. if (i == (command_buffer_count - 1)) {
  5138. // This is the last command buffer, it should signal the semaphore & fence.
  5139. signal_semaphore = p_dst_draw_semaphore_to_signal;
  5140. signal_fence = p_draw_fence;
  5141. if (p_present_swap_chain) {
  5142. // Just present the swap chains as part of the last command execution.
  5143. swap_chains = frames[frame].swap_chains_to_present;
  5144. }
  5145. } else {
  5146. signal_semaphore = buffer_pool.semaphores[i];
  5147. // Semaphores always need to be signaled if it's not the last command buffer.
  5148. }
  5149. driver->command_queue_execute_and_present(main_queue, wait_semaphores, command_buffer,
  5150. signal_semaphore ? signal_semaphore : VectorView<RDD::SemaphoreID>(), signal_fence,
  5151. swap_chains);
  5152. // Make the next command buffer wait on the semaphore signaled by this one.
  5153. wait_semaphores.resize(1);
  5154. wait_semaphores[0] = signal_semaphore;
  5155. }
  5156. frames[frame].semaphores_to_wait_on.clear();
  5157. }
  5158. void RenderingDevice::_execute_frame(bool p_present) {
  5159. // Check whether this frame should present the swap chains and in which queue.
  5160. const bool frame_can_present = p_present && !frames[frame].swap_chains_to_present.is_empty();
  5161. const bool separate_present_queue = main_queue != present_queue;
  5162. // The semaphore is required if the frame can be presented and a separate present queue is used;
  5163. // since the separate queue will wait for that semaphore before presenting.
  5164. const RDD::SemaphoreID semaphore = (frame_can_present && separate_present_queue)
  5165. ? frames[frame].semaphore
  5166. : RDD::SemaphoreID(nullptr);
  5167. const bool present_swap_chain = frame_can_present && !separate_present_queue;
  5168. execute_chained_cmds(present_swap_chain, frames[frame].fence, semaphore);
  5169. // Indicate the fence has been signaled so the next time the frame's contents need to be
  5170. // used, the CPU needs to wait on the work to be completed.
  5171. frames[frame].fence_signaled = true;
  5172. if (frame_can_present) {
  5173. if (separate_present_queue) {
  5174. // Issue the presentation separately if the presentation queue is different from the main queue.
  5175. driver->command_queue_execute_and_present(present_queue, frames[frame].semaphore, {}, {}, {}, frames[frame].swap_chains_to_present);
  5176. }
  5177. frames[frame].swap_chains_to_present.clear();
  5178. }
  5179. }
  5180. void RenderingDevice::_stall_for_frame(uint32_t p_frame) {
  5181. thread_local PackedByteArray packed_byte_array;
  5182. if (frames[p_frame].fence_signaled) {
  5183. driver->fence_wait(frames[p_frame].fence);
  5184. frames[p_frame].fence_signaled = false;
  5185. // Flush any pending requests for asynchronous buffer downloads.
  5186. if (!frames[p_frame].download_buffer_get_data_requests.is_empty()) {
  5187. for (uint32_t i = 0; i < frames[p_frame].download_buffer_get_data_requests.size(); i++) {
  5188. const BufferGetDataRequest &request = frames[p_frame].download_buffer_get_data_requests[i];
  5189. packed_byte_array.resize(request.size);
  5190. uint32_t array_offset = 0;
  5191. for (uint32_t j = 0; j < request.frame_local_count; j++) {
  5192. uint32_t local_index = request.frame_local_index + j;
  5193. const RDD::BufferCopyRegion &region = frames[p_frame].download_buffer_copy_regions[local_index];
  5194. uint8_t *buffer_data = driver->buffer_map(frames[p_frame].download_buffer_staging_buffers[local_index]);
  5195. memcpy(&packed_byte_array.write[array_offset], &buffer_data[region.dst_offset], region.size);
  5196. driver->buffer_unmap(frames[p_frame].download_buffer_staging_buffers[local_index]);
  5197. array_offset += region.size;
  5198. }
  5199. request.callback.call(packed_byte_array);
  5200. }
  5201. frames[p_frame].download_buffer_staging_buffers.clear();
  5202. frames[p_frame].download_buffer_copy_regions.clear();
  5203. frames[p_frame].download_buffer_get_data_requests.clear();
  5204. }
  5205. // Flush any pending requests for asynchronous texture downloads.
  5206. if (!frames[p_frame].download_texture_get_data_requests.is_empty()) {
  5207. uint32_t pitch_step = driver->api_trait_get(RDD::API_TRAIT_TEXTURE_DATA_ROW_PITCH_STEP);
  5208. for (uint32_t i = 0; i < frames[p_frame].download_texture_get_data_requests.size(); i++) {
  5209. const TextureGetDataRequest &request = frames[p_frame].download_texture_get_data_requests[i];
  5210. uint32_t texture_size = get_image_format_required_size(request.format, request.width, request.height, request.depth, request.mipmaps);
  5211. packed_byte_array.resize(texture_size);
  5212. // Find the block size of the texture's format.
  5213. uint32_t block_w = 0;
  5214. uint32_t block_h = 0;
  5215. get_compressed_image_format_block_dimensions(request.format, block_w, block_h);
  5216. uint32_t block_size = get_compressed_image_format_block_byte_size(request.format);
  5217. uint32_t pixel_size = get_image_format_pixel_size(request.format);
  5218. uint32_t pixel_rshift = get_compressed_image_format_pixel_rshift(request.format);
  5219. uint32_t region_size = texture_download_region_size_px;
  5220. for (uint32_t j = 0; j < request.frame_local_count; j++) {
  5221. uint32_t local_index = request.frame_local_index + j;
  5222. const RDD::BufferTextureCopyRegion &region = frames[p_frame].download_buffer_texture_copy_regions[local_index];
  5223. uint32_t w = STEPIFY(request.width >> region.texture_subresources.mipmap, block_w);
  5224. uint32_t h = STEPIFY(request.height >> region.texture_subresources.mipmap, block_h);
  5225. uint32_t region_w = MIN(region_size, w - region.texture_offset.x);
  5226. uint32_t region_h = MIN(region_size, h - region.texture_offset.y);
  5227. uint32_t region_pitch = (region_w * pixel_size * block_w) >> pixel_rshift;
  5228. region_pitch = STEPIFY(region_pitch, pitch_step);
  5229. uint8_t *buffer_data = driver->buffer_map(frames[p_frame].download_texture_staging_buffers[local_index]);
  5230. const uint8_t *read_ptr = buffer_data + region.buffer_offset;
  5231. uint8_t *write_ptr = packed_byte_array.ptrw() + frames[p_frame].download_texture_mipmap_offsets[local_index];
  5232. uint32_t unit_size = pixel_size;
  5233. if (block_w != 1 || block_h != 1) {
  5234. unit_size = block_size;
  5235. }
  5236. write_ptr += ((region.texture_offset.y / block_h) * (w / block_w) + (region.texture_offset.x / block_w)) * unit_size;
  5237. for (uint32_t y = region_h / block_h; y > 0; y--) {
  5238. memcpy(write_ptr, read_ptr, (region_w / block_w) * unit_size);
  5239. write_ptr += (w / block_w) * unit_size;
  5240. read_ptr += region_pitch;
  5241. }
  5242. driver->buffer_unmap(frames[p_frame].download_texture_staging_buffers[local_index]);
  5243. }
  5244. request.callback.call(packed_byte_array);
  5245. }
  5246. frames[p_frame].download_texture_staging_buffers.clear();
  5247. frames[p_frame].download_buffer_texture_copy_regions.clear();
  5248. frames[p_frame].download_texture_mipmap_offsets.clear();
  5249. frames[p_frame].download_texture_get_data_requests.clear();
  5250. }
  5251. }
  5252. }
  5253. void RenderingDevice::_stall_for_previous_frames() {
  5254. for (uint32_t i = 0; i < frames.size(); i++) {
  5255. _stall_for_frame(i);
  5256. }
  5257. }
  5258. void RenderingDevice::_flush_and_stall_for_all_frames() {
  5259. _stall_for_previous_frames();
  5260. _end_frame();
  5261. _execute_frame(false);
  5262. _begin_frame();
  5263. }
  5264. Error RenderingDevice::initialize(RenderingContextDriver *p_context, DisplayServer::WindowID p_main_window) {
  5265. ERR_RENDER_THREAD_GUARD_V(ERR_UNAVAILABLE);
  5266. Error err;
  5267. RenderingContextDriver::SurfaceID main_surface = 0;
  5268. is_main_instance = (singleton == this) && (p_main_window != DisplayServer::INVALID_WINDOW_ID);
  5269. if (p_main_window != DisplayServer::INVALID_WINDOW_ID) {
  5270. // Retrieve the surface from the main window if it was specified.
  5271. main_surface = p_context->surface_get_from_window(p_main_window);
  5272. ERR_FAIL_COND_V(main_surface == 0, FAILED);
  5273. }
  5274. context = p_context;
  5275. driver = context->driver_create();
  5276. print_verbose("Devices:");
  5277. int32_t device_index = Engine::get_singleton()->get_gpu_index();
  5278. const uint32_t device_count = context->device_get_count();
  5279. const bool detect_device = (device_index < 0) || (device_index >= int32_t(device_count));
  5280. uint32_t device_type_score = 0;
  5281. for (uint32_t i = 0; i < device_count; i++) {
  5282. RenderingContextDriver::Device device_option = context->device_get(i);
  5283. String name = device_option.name;
  5284. String vendor = _get_device_vendor_name(device_option);
  5285. String type = _get_device_type_name(device_option);
  5286. bool present_supported = main_surface != 0 ? context->device_supports_present(i, main_surface) : false;
  5287. print_verbose(" #" + itos(i) + ": " + vendor + " " + name + " - " + (present_supported ? "Supported" : "Unsupported") + ", " + type);
  5288. if (detect_device && (present_supported || main_surface == 0)) {
  5289. // If a window was specified, present must be supported by the device to be available as an option.
  5290. // Assign a score for each type of device and prefer the device with the higher score.
  5291. uint32_t option_score = _get_device_type_score(device_option);
  5292. if (option_score > device_type_score) {
  5293. device_index = i;
  5294. device_type_score = option_score;
  5295. }
  5296. }
  5297. }
  5298. ERR_FAIL_COND_V_MSG((device_index < 0) || (device_index >= int32_t(device_count)), ERR_CANT_CREATE, "None of the devices supports both graphics and present queues.");
  5299. uint32_t frame_count = 1;
  5300. if (main_surface != 0) {
  5301. frame_count = MAX(2U, uint32_t(GLOBAL_GET("rendering/rendering_device/vsync/frame_queue_size")));
  5302. }
  5303. frame = 0;
  5304. frames.resize(frame_count);
  5305. max_timestamp_query_elements = GLOBAL_GET("debug/settings/profiler/max_timestamp_query_elements");
  5306. device = context->device_get(device_index);
  5307. err = driver->initialize(device_index, frame_count);
  5308. ERR_FAIL_COND_V_MSG(err != OK, FAILED, "Failed to initialize driver for device.");
  5309. if (is_main_instance) {
  5310. // Only the singleton instance with a display should print this information.
  5311. String rendering_method;
  5312. if (OS::get_singleton()->get_current_rendering_method() == "mobile") {
  5313. rendering_method = "Forward Mobile";
  5314. } else {
  5315. rendering_method = "Forward+";
  5316. }
  5317. // Output our device version.
  5318. Engine::get_singleton()->print_header(vformat("%s %s - %s - Using Device #%d: %s - %s", get_device_api_name(), get_device_api_version(), rendering_method, device_index, _get_device_vendor_name(device), device.name));
  5319. }
  5320. // Pick the main queue family. It is worth noting we explicitly do not request the transfer bit, as apparently the specification defines
  5321. // that the existence of either the graphics or compute bit implies that the queue can also do transfer operations, but it is optional
  5322. // to indicate whether it supports them or not with the dedicated transfer bit if either is set.
  5323. BitField<RDD::CommandQueueFamilyBits> main_queue_bits;
  5324. main_queue_bits.set_flag(RDD::COMMAND_QUEUE_FAMILY_GRAPHICS_BIT);
  5325. main_queue_bits.set_flag(RDD::COMMAND_QUEUE_FAMILY_COMPUTE_BIT);
  5326. #if !FORCE_SEPARATE_PRESENT_QUEUE
  5327. // Needing to use a separate queue for presentation is an edge case that remains to be seen what hardware triggers it at all.
  5328. main_queue_family = driver->command_queue_family_get(main_queue_bits, main_surface);
  5329. if (!main_queue_family && (main_surface != 0))
  5330. #endif
  5331. {
  5332. // If it was not possible to find a main queue that supports the surface, we attempt to get two different queues instead.
  5333. main_queue_family = driver->command_queue_family_get(main_queue_bits);
  5334. present_queue_family = driver->command_queue_family_get(BitField<RDD::CommandQueueFamilyBits>(), main_surface);
  5335. ERR_FAIL_COND_V(!present_queue_family, FAILED);
  5336. }
  5337. ERR_FAIL_COND_V(!main_queue_family, FAILED);
  5338. // Create the main queue.
  5339. main_queue = driver->command_queue_create(main_queue_family, true);
  5340. ERR_FAIL_COND_V(!main_queue, FAILED);
  5341. transfer_queue_family = driver->command_queue_family_get(RDD::COMMAND_QUEUE_FAMILY_TRANSFER_BIT);
  5342. if (transfer_queue_family) {
  5343. // Create the transfer queue.
  5344. transfer_queue = driver->command_queue_create(transfer_queue_family);
  5345. ERR_FAIL_COND_V(!transfer_queue, FAILED);
  5346. } else {
  5347. // Use main queue as the transfer queue.
  5348. transfer_queue = main_queue;
  5349. transfer_queue_family = main_queue_family;
  5350. }
  5351. if (present_queue_family) {
  5352. // Create the present queue.
  5353. present_queue = driver->command_queue_create(present_queue_family);
  5354. ERR_FAIL_COND_V(!present_queue, FAILED);
  5355. } else {
  5356. // Use main queue as the present queue.
  5357. present_queue = main_queue;
  5358. present_queue_family = main_queue_family;
  5359. }
  5360. // Use the processor count as the max amount of transfer workers that can be created.
  5361. transfer_worker_pool_max_size = OS::get_singleton()->get_processor_count();
  5362. // Create data for all the frames.
  5363. for (uint32_t i = 0; i < frames.size(); i++) {
  5364. frames[i].index = 0;
  5365. // Create command pool, command buffers, semaphores and fences.
  5366. frames[i].command_pool = driver->command_pool_create(main_queue_family, RDD::COMMAND_BUFFER_TYPE_PRIMARY);
  5367. ERR_FAIL_COND_V(!frames[i].command_pool, FAILED);
  5368. frames[i].command_buffer = driver->command_buffer_create(frames[i].command_pool);
  5369. ERR_FAIL_COND_V(!frames[i].command_buffer, FAILED);
  5370. frames[i].semaphore = driver->semaphore_create();
  5371. ERR_FAIL_COND_V(!frames[i].semaphore, FAILED);
  5372. frames[i].fence = driver->fence_create();
  5373. ERR_FAIL_COND_V(!frames[i].fence, FAILED);
  5374. frames[i].fence_signaled = false;
  5375. // Create query pool.
  5376. frames[i].timestamp_pool = driver->timestamp_query_pool_create(max_timestamp_query_elements);
  5377. frames[i].timestamp_names.resize(max_timestamp_query_elements);
  5378. frames[i].timestamp_cpu_values.resize(max_timestamp_query_elements);
  5379. frames[i].timestamp_count = 0;
  5380. frames[i].timestamp_result_names.resize(max_timestamp_query_elements);
  5381. frames[i].timestamp_cpu_result_values.resize(max_timestamp_query_elements);
  5382. frames[i].timestamp_result_values.resize(max_timestamp_query_elements);
  5383. frames[i].timestamp_result_count = 0;
  5384. // Assign the main queue family and command pool to the command buffer pool.
  5385. frames[i].command_buffer_pool.pool = frames[i].command_pool;
  5386. // Create the semaphores for the transfer workers.
  5387. frames[i].transfer_worker_semaphores.resize(transfer_worker_pool_max_size);
  5388. for (uint32_t j = 0; j < transfer_worker_pool_max_size; j++) {
  5389. frames[i].transfer_worker_semaphores[j] = driver->semaphore_create();
  5390. ERR_FAIL_COND_V(!frames[i].transfer_worker_semaphores[j], FAILED);
  5391. }
  5392. }
  5393. // Start from frame count, so everything else is immediately old.
  5394. frames_drawn = frames.size();
  5395. // Initialize recording on the first frame.
  5396. driver->begin_segment(frame, frames_drawn++);
  5397. driver->command_buffer_begin(frames[0].command_buffer);
  5398. // Create draw graph and start it initialized as well.
  5399. draw_graph.initialize(driver, device, &_render_pass_create_from_graph, frames.size(), main_queue_family, SECONDARY_COMMAND_BUFFERS_PER_FRAME);
  5400. draw_graph.begin();
  5401. for (uint32_t i = 0; i < frames.size(); i++) {
  5402. // Reset all queries in a query pool before doing any operations with them..
  5403. driver->command_timestamp_query_pool_reset(frames[0].command_buffer, frames[i].timestamp_pool, max_timestamp_query_elements);
  5404. }
  5405. // Convert block size from KB.
  5406. upload_staging_buffers.block_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/block_size_kb");
  5407. upload_staging_buffers.block_size = MAX(4u, upload_staging_buffers.block_size);
  5408. upload_staging_buffers.block_size *= 1024;
  5409. // Convert staging buffer size from MB.
  5410. upload_staging_buffers.max_size = GLOBAL_GET("rendering/rendering_device/staging_buffer/max_size_mb");
  5411. upload_staging_buffers.max_size = MAX(1u, upload_staging_buffers.max_size);
  5412. upload_staging_buffers.max_size *= 1024 * 1024;
  5413. upload_staging_buffers.max_size = MAX(upload_staging_buffers.max_size, upload_staging_buffers.block_size * 4);
  5414. // Copy the sizes to the download staging buffers.
  5415. download_staging_buffers.block_size = upload_staging_buffers.block_size;
  5416. download_staging_buffers.max_size = upload_staging_buffers.max_size;
  5417. texture_upload_region_size_px = GLOBAL_GET("rendering/rendering_device/staging_buffer/texture_upload_region_size_px");
  5418. texture_upload_region_size_px = nearest_power_of_2_templated(texture_upload_region_size_px);
  5419. texture_download_region_size_px = GLOBAL_GET("rendering/rendering_device/staging_buffer/texture_download_region_size_px");
  5420. texture_download_region_size_px = nearest_power_of_2_templated(texture_download_region_size_px);
  5421. // Ensure current staging block is valid and at least one per frame exists.
  5422. upload_staging_buffers.current = 0;
  5423. upload_staging_buffers.used = false;
  5424. upload_staging_buffers.usage_bits = RDD::BUFFER_USAGE_TRANSFER_FROM_BIT;
  5425. download_staging_buffers.current = 0;
  5426. download_staging_buffers.used = false;
  5427. download_staging_buffers.usage_bits = RDD::BUFFER_USAGE_TRANSFER_TO_BIT;
  5428. for (uint32_t i = 0; i < frames.size(); i++) {
  5429. // Staging was never used, create the blocks.
  5430. err = _insert_staging_block(upload_staging_buffers);
  5431. ERR_FAIL_COND_V(err, FAILED);
  5432. err = _insert_staging_block(download_staging_buffers);
  5433. ERR_FAIL_COND_V(err, FAILED);
  5434. }
  5435. draw_list = nullptr;
  5436. compute_list = nullptr;
  5437. bool project_pipeline_cache_enable = GLOBAL_GET("rendering/rendering_device/pipeline_cache/enable");
  5438. if (is_main_instance && project_pipeline_cache_enable) {
  5439. // Only the instance that is not a local device and is also the singleton is allowed to manage a pipeline cache.
  5440. pipeline_cache_file_path = vformat("user://vulkan/pipelines.%s.%s",
  5441. OS::get_singleton()->get_current_rendering_method(),
  5442. device.name.validate_filename().replace(" ", "_").to_lower());
  5443. if (Engine::get_singleton()->is_editor_hint()) {
  5444. pipeline_cache_file_path += ".editor";
  5445. }
  5446. pipeline_cache_file_path += ".cache";
  5447. Vector<uint8_t> cache_data = _load_pipeline_cache();
  5448. pipeline_cache_enabled = driver->pipeline_cache_create(cache_data);
  5449. if (pipeline_cache_enabled) {
  5450. pipeline_cache_size = driver->pipeline_cache_query_size();
  5451. print_verbose(vformat("Startup PSO cache (%.1f MiB)", pipeline_cache_size / (1024.0f * 1024.0f)));
  5452. }
  5453. }
  5454. return OK;
  5455. }
  5456. Vector<uint8_t> RenderingDevice::_load_pipeline_cache() {
  5457. DirAccess::make_dir_recursive_absolute(pipeline_cache_file_path.get_base_dir());
  5458. if (FileAccess::exists(pipeline_cache_file_path)) {
  5459. Error file_error;
  5460. Vector<uint8_t> file_data = FileAccess::get_file_as_bytes(pipeline_cache_file_path, &file_error);
  5461. return file_data;
  5462. } else {
  5463. return Vector<uint8_t>();
  5464. }
  5465. }
  5466. void RenderingDevice::_update_pipeline_cache(bool p_closing) {
  5467. _THREAD_SAFE_METHOD_
  5468. {
  5469. bool still_saving = pipeline_cache_save_task != WorkerThreadPool::INVALID_TASK_ID && !WorkerThreadPool::get_singleton()->is_task_completed(pipeline_cache_save_task);
  5470. if (still_saving) {
  5471. if (p_closing) {
  5472. WorkerThreadPool::get_singleton()->wait_for_task_completion(pipeline_cache_save_task);
  5473. pipeline_cache_save_task = WorkerThreadPool::INVALID_TASK_ID;
  5474. } else {
  5475. // We can't save until the currently running save is done. We'll retry next time; worst case, we'll save when exiting.
  5476. return;
  5477. }
  5478. }
  5479. }
  5480. {
  5481. size_t new_pipelines_cache_size = driver->pipeline_cache_query_size();
  5482. ERR_FAIL_COND(!new_pipelines_cache_size);
  5483. size_t difference = new_pipelines_cache_size - pipeline_cache_size;
  5484. bool must_save = false;
  5485. if (p_closing) {
  5486. must_save = difference > 0;
  5487. } else {
  5488. float save_interval = GLOBAL_GET("rendering/rendering_device/pipeline_cache/save_chunk_size_mb");
  5489. must_save = difference > 0 && difference / (1024.0f * 1024.0f) >= save_interval;
  5490. }
  5491. if (must_save) {
  5492. pipeline_cache_size = new_pipelines_cache_size;
  5493. } else {
  5494. return;
  5495. }
  5496. }
  5497. if (p_closing) {
  5498. _save_pipeline_cache(this);
  5499. } else {
  5500. pipeline_cache_save_task = WorkerThreadPool::get_singleton()->add_native_task(&_save_pipeline_cache, this, false, "PipelineCacheSave");
  5501. }
  5502. }
  5503. void RenderingDevice::_save_pipeline_cache(void *p_data) {
  5504. RenderingDevice *self = static_cast<RenderingDevice *>(p_data);
  5505. self->_thread_safe_.lock();
  5506. Vector<uint8_t> cache_blob = self->driver->pipeline_cache_serialize();
  5507. self->_thread_safe_.unlock();
  5508. if (cache_blob.size() == 0) {
  5509. return;
  5510. }
  5511. print_verbose(vformat("Updated PSO cache (%.1f MiB)", cache_blob.size() / (1024.0f * 1024.0f)));
  5512. Ref<FileAccess> f = FileAccess::open(self->pipeline_cache_file_path, FileAccess::WRITE, nullptr);
  5513. if (f.is_valid()) {
  5514. f->store_buffer(cache_blob);
  5515. }
  5516. }
  5517. template <typename T>
  5518. void RenderingDevice::_free_rids(T &p_owner, const char *p_type) {
  5519. List<RID> owned;
  5520. p_owner.get_owned_list(&owned);
  5521. if (owned.size()) {
  5522. if (owned.size() == 1) {
  5523. WARN_PRINT(vformat("1 RID of type \"%s\" was leaked.", p_type));
  5524. } else {
  5525. WARN_PRINT(vformat("%d RIDs of type \"%s\" were leaked.", owned.size(), p_type));
  5526. }
  5527. for (const RID &E : owned) {
  5528. #ifdef DEV_ENABLED
  5529. if (resource_names.has(E)) {
  5530. print_line(String(" - ") + resource_names[E]);
  5531. }
  5532. #endif
  5533. free(E);
  5534. }
  5535. }
  5536. }
  5537. void RenderingDevice::capture_timestamp(const String &p_name) {
  5538. ERR_RENDER_THREAD_GUARD();
  5539. ERR_FAIL_COND_MSG(draw_list != nullptr && draw_list->state.draw_count > 0, "Capturing timestamps during draw list creation is not allowed. Offending timestamp was: " + p_name);
  5540. ERR_FAIL_COND_MSG(compute_list != nullptr && compute_list->state.dispatch_count > 0, "Capturing timestamps during compute list creation is not allowed. Offending timestamp was: " + p_name);
  5541. ERR_FAIL_COND_MSG(frames[frame].timestamp_count >= max_timestamp_query_elements, vformat("Tried capturing more timestamps than the configured maximum (%d). You can increase this limit in the project settings under 'Debug/Settings' called 'Max Timestamp Query Elements'.", max_timestamp_query_elements));
  5542. draw_graph.add_capture_timestamp(frames[frame].timestamp_pool, frames[frame].timestamp_count);
  5543. frames[frame].timestamp_names[frames[frame].timestamp_count] = p_name;
  5544. frames[frame].timestamp_cpu_values[frames[frame].timestamp_count] = OS::get_singleton()->get_ticks_usec();
  5545. frames[frame].timestamp_count++;
  5546. }
  5547. uint64_t RenderingDevice::get_driver_resource(DriverResource p_resource, RID p_rid, uint64_t p_index) {
  5548. ERR_RENDER_THREAD_GUARD_V(0);
  5549. uint64_t driver_id = 0;
  5550. switch (p_resource) {
  5551. case DRIVER_RESOURCE_LOGICAL_DEVICE:
  5552. case DRIVER_RESOURCE_PHYSICAL_DEVICE:
  5553. case DRIVER_RESOURCE_TOPMOST_OBJECT:
  5554. break;
  5555. case DRIVER_RESOURCE_COMMAND_QUEUE:
  5556. driver_id = main_queue.id;
  5557. break;
  5558. case DRIVER_RESOURCE_QUEUE_FAMILY:
  5559. driver_id = main_queue_family.id;
  5560. break;
  5561. case DRIVER_RESOURCE_TEXTURE:
  5562. case DRIVER_RESOURCE_TEXTURE_VIEW:
  5563. case DRIVER_RESOURCE_TEXTURE_DATA_FORMAT: {
  5564. Texture *tex = texture_owner.get_or_null(p_rid);
  5565. ERR_FAIL_NULL_V(tex, 0);
  5566. driver_id = tex->driver_id.id;
  5567. } break;
  5568. case DRIVER_RESOURCE_SAMPLER: {
  5569. RDD::SamplerID *sampler_driver_id = sampler_owner.get_or_null(p_rid);
  5570. ERR_FAIL_NULL_V(sampler_driver_id, 0);
  5571. driver_id = (*sampler_driver_id).id;
  5572. } break;
  5573. case DRIVER_RESOURCE_UNIFORM_SET: {
  5574. UniformSet *uniform_set = uniform_set_owner.get_or_null(p_rid);
  5575. ERR_FAIL_NULL_V(uniform_set, 0);
  5576. driver_id = uniform_set->driver_id.id;
  5577. } break;
  5578. case DRIVER_RESOURCE_BUFFER: {
  5579. Buffer *buffer = nullptr;
  5580. if (vertex_buffer_owner.owns(p_rid)) {
  5581. buffer = vertex_buffer_owner.get_or_null(p_rid);
  5582. } else if (index_buffer_owner.owns(p_rid)) {
  5583. buffer = index_buffer_owner.get_or_null(p_rid);
  5584. } else if (uniform_buffer_owner.owns(p_rid)) {
  5585. buffer = uniform_buffer_owner.get_or_null(p_rid);
  5586. } else if (texture_buffer_owner.owns(p_rid)) {
  5587. buffer = texture_buffer_owner.get_or_null(p_rid);
  5588. } else if (storage_buffer_owner.owns(p_rid)) {
  5589. buffer = storage_buffer_owner.get_or_null(p_rid);
  5590. }
  5591. ERR_FAIL_NULL_V(buffer, 0);
  5592. driver_id = buffer->driver_id.id;
  5593. } break;
  5594. case DRIVER_RESOURCE_COMPUTE_PIPELINE: {
  5595. ComputePipeline *compute_pipeline = compute_pipeline_owner.get_or_null(p_rid);
  5596. ERR_FAIL_NULL_V(compute_pipeline, 0);
  5597. driver_id = compute_pipeline->driver_id.id;
  5598. } break;
  5599. case DRIVER_RESOURCE_RENDER_PIPELINE: {
  5600. RenderPipeline *render_pipeline = render_pipeline_owner.get_or_null(p_rid);
  5601. ERR_FAIL_NULL_V(render_pipeline, 0);
  5602. driver_id = render_pipeline->driver_id.id;
  5603. } break;
  5604. default: {
  5605. ERR_FAIL_V(0);
  5606. } break;
  5607. }
  5608. return driver->get_resource_native_handle(p_resource, driver_id);
  5609. }
  5610. String RenderingDevice::get_driver_and_device_memory_report() const {
  5611. return context->get_driver_and_device_memory_report();
  5612. }
  5613. String RenderingDevice::get_tracked_object_name(uint32_t p_type_index) const {
  5614. return context->get_tracked_object_name(p_type_index);
  5615. }
  5616. uint64_t RenderingDevice::get_tracked_object_type_count() const {
  5617. return context->get_tracked_object_type_count();
  5618. }
  5619. uint64_t RenderingDevice::get_driver_total_memory() const {
  5620. return context->get_driver_total_memory();
  5621. }
  5622. uint64_t RenderingDevice::get_driver_allocation_count() const {
  5623. return context->get_driver_allocation_count();
  5624. }
  5625. uint64_t RenderingDevice::get_driver_memory_by_object_type(uint32_t p_type) const {
  5626. return context->get_driver_memory_by_object_type(p_type);
  5627. }
  5628. uint64_t RenderingDevice::get_driver_allocs_by_object_type(uint32_t p_type) const {
  5629. return context->get_driver_allocs_by_object_type(p_type);
  5630. }
  5631. uint64_t RenderingDevice::get_device_total_memory() const {
  5632. return context->get_device_total_memory();
  5633. }
  5634. uint64_t RenderingDevice::get_device_allocation_count() const {
  5635. return context->get_device_allocation_count();
  5636. }
  5637. uint64_t RenderingDevice::get_device_memory_by_object_type(uint32_t type) const {
  5638. return context->get_device_memory_by_object_type(type);
  5639. }
  5640. uint64_t RenderingDevice::get_device_allocs_by_object_type(uint32_t type) const {
  5641. return context->get_device_allocs_by_object_type(type);
  5642. }
  5643. uint32_t RenderingDevice::get_captured_timestamps_count() const {
  5644. ERR_RENDER_THREAD_GUARD_V(0);
  5645. return frames[frame].timestamp_result_count;
  5646. }
  5647. uint64_t RenderingDevice::get_captured_timestamps_frame() const {
  5648. ERR_RENDER_THREAD_GUARD_V(0);
  5649. return frames[frame].index;
  5650. }
  5651. uint64_t RenderingDevice::get_captured_timestamp_gpu_time(uint32_t p_index) const {
  5652. ERR_RENDER_THREAD_GUARD_V(0);
  5653. ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
  5654. return driver->timestamp_query_result_to_time(frames[frame].timestamp_result_values[p_index]);
  5655. }
  5656. uint64_t RenderingDevice::get_captured_timestamp_cpu_time(uint32_t p_index) const {
  5657. ERR_RENDER_THREAD_GUARD_V(0);
  5658. ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, 0);
  5659. return frames[frame].timestamp_cpu_result_values[p_index];
  5660. }
  5661. String RenderingDevice::get_captured_timestamp_name(uint32_t p_index) const {
  5662. ERR_FAIL_UNSIGNED_INDEX_V(p_index, frames[frame].timestamp_result_count, String());
  5663. return frames[frame].timestamp_result_names[p_index];
  5664. }
  5665. uint64_t RenderingDevice::limit_get(Limit p_limit) const {
  5666. return driver->limit_get(p_limit);
  5667. }
  5668. void RenderingDevice::finalize() {
  5669. ERR_RENDER_THREAD_GUARD();
  5670. if (!frames.is_empty()) {
  5671. // Wait for all frames to have finished rendering.
  5672. _flush_and_stall_for_all_frames();
  5673. }
  5674. // Wait for transfer workers to finish.
  5675. _submit_transfer_workers();
  5676. _wait_for_transfer_workers();
  5677. // Delete everything the graph has created.
  5678. draw_graph.finalize();
  5679. // Free all resources.
  5680. _free_rids(render_pipeline_owner, "Pipeline");
  5681. _free_rids(compute_pipeline_owner, "Compute");
  5682. _free_rids(uniform_set_owner, "UniformSet");
  5683. _free_rids(texture_buffer_owner, "TextureBuffer");
  5684. _free_rids(storage_buffer_owner, "StorageBuffer");
  5685. _free_rids(uniform_buffer_owner, "UniformBuffer");
  5686. _free_rids(shader_owner, "Shader");
  5687. _free_rids(index_array_owner, "IndexArray");
  5688. _free_rids(index_buffer_owner, "IndexBuffer");
  5689. _free_rids(vertex_array_owner, "VertexArray");
  5690. _free_rids(vertex_buffer_owner, "VertexBuffer");
  5691. _free_rids(framebuffer_owner, "Framebuffer");
  5692. _free_rids(sampler_owner, "Sampler");
  5693. {
  5694. // For textures it's a bit more difficult because they may be shared.
  5695. List<RID> owned;
  5696. texture_owner.get_owned_list(&owned);
  5697. if (owned.size()) {
  5698. if (owned.size() == 1) {
  5699. WARN_PRINT("1 RID of type \"Texture\" was leaked.");
  5700. } else {
  5701. WARN_PRINT(vformat("%d RIDs of type \"Texture\" were leaked.", owned.size()));
  5702. }
  5703. // Free shared first.
  5704. for (List<RID>::Element *E = owned.front(); E;) {
  5705. List<RID>::Element *N = E->next();
  5706. if (texture_is_shared(E->get())) {
  5707. #ifdef DEV_ENABLED
  5708. if (resource_names.has(E->get())) {
  5709. print_line(String(" - ") + resource_names[E->get()]);
  5710. }
  5711. #endif
  5712. free(E->get());
  5713. owned.erase(E);
  5714. }
  5715. E = N;
  5716. }
  5717. // Free non shared second, this will avoid an error trying to free unexisting textures due to dependencies.
  5718. for (const RID &E : owned) {
  5719. #ifdef DEV_ENABLED
  5720. if (resource_names.has(E)) {
  5721. print_line(String(" - ") + resource_names[E]);
  5722. }
  5723. #endif
  5724. free(E);
  5725. }
  5726. }
  5727. }
  5728. // Erase the transfer workers after all resources have been freed.
  5729. _free_transfer_workers();
  5730. // Free everything pending.
  5731. for (uint32_t i = 0; i < frames.size(); i++) {
  5732. int f = (frame + i) % frames.size();
  5733. _free_pending_resources(f);
  5734. driver->command_pool_free(frames[i].command_pool);
  5735. driver->timestamp_query_pool_free(frames[i].timestamp_pool);
  5736. driver->semaphore_free(frames[i].semaphore);
  5737. driver->fence_free(frames[i].fence);
  5738. RDG::CommandBufferPool &buffer_pool = frames[i].command_buffer_pool;
  5739. for (uint32_t j = 0; j < buffer_pool.buffers.size(); j++) {
  5740. driver->semaphore_free(buffer_pool.semaphores[j]);
  5741. }
  5742. for (uint32_t j = 0; j < frames[i].transfer_worker_semaphores.size(); j++) {
  5743. driver->semaphore_free(frames[i].transfer_worker_semaphores[j]);
  5744. }
  5745. }
  5746. if (pipeline_cache_enabled) {
  5747. _update_pipeline_cache(true);
  5748. driver->pipeline_cache_free();
  5749. }
  5750. frames.clear();
  5751. for (int i = 0; i < upload_staging_buffers.blocks.size(); i++) {
  5752. driver->buffer_free(upload_staging_buffers.blocks[i].driver_id);
  5753. }
  5754. for (int i = 0; i < download_staging_buffers.blocks.size(); i++) {
  5755. driver->buffer_free(download_staging_buffers.blocks[i].driver_id);
  5756. }
  5757. while (vertex_formats.size()) {
  5758. HashMap<VertexFormatID, VertexDescriptionCache>::Iterator temp = vertex_formats.begin();
  5759. driver->vertex_format_free(temp->value.driver_id);
  5760. vertex_formats.remove(temp);
  5761. }
  5762. for (KeyValue<FramebufferFormatID, FramebufferFormat> &E : framebuffer_formats) {
  5763. driver->render_pass_free(E.value.render_pass);
  5764. }
  5765. framebuffer_formats.clear();
  5766. // Delete the swap chains created for the screens.
  5767. for (const KeyValue<DisplayServer::WindowID, RDD::SwapChainID> &it : screen_swap_chains) {
  5768. driver->swap_chain_free(it.value);
  5769. }
  5770. screen_swap_chains.clear();
  5771. // Delete the command queues.
  5772. if (present_queue) {
  5773. if (main_queue != present_queue) {
  5774. // Only delete the present queue if it's unique.
  5775. driver->command_queue_free(present_queue);
  5776. }
  5777. present_queue = RDD::CommandQueueID();
  5778. }
  5779. if (transfer_queue) {
  5780. if (main_queue != transfer_queue) {
  5781. // Only delete the transfer queue if it's unique.
  5782. driver->command_queue_free(transfer_queue);
  5783. }
  5784. transfer_queue = RDD::CommandQueueID();
  5785. }
  5786. if (main_queue) {
  5787. driver->command_queue_free(main_queue);
  5788. main_queue = RDD::CommandQueueID();
  5789. }
  5790. // Delete the driver once everything else has been deleted.
  5791. if (driver != nullptr) {
  5792. context->driver_free(driver);
  5793. driver = nullptr;
  5794. }
  5795. // All these should be clear at this point.
  5796. ERR_FAIL_COND(dependency_map.size());
  5797. ERR_FAIL_COND(reverse_dependency_map.size());
  5798. }
  5799. void RenderingDevice::_set_max_fps(int p_max_fps) {
  5800. for (const KeyValue<DisplayServer::WindowID, RDD::SwapChainID> &it : screen_swap_chains) {
  5801. driver->swap_chain_set_max_fps(it.value, p_max_fps);
  5802. }
  5803. }
  5804. RenderingDevice *RenderingDevice::create_local_device() {
  5805. RenderingDevice *rd = memnew(RenderingDevice);
  5806. rd->initialize(context);
  5807. return rd;
  5808. }
  5809. bool RenderingDevice::has_feature(const Features p_feature) const {
  5810. return driver->has_feature(p_feature);
  5811. }
  5812. void RenderingDevice::_bind_methods() {
  5813. ClassDB::bind_method(D_METHOD("texture_create", "format", "view", "data"), &RenderingDevice::_texture_create, DEFVAL(Array()));
  5814. ClassDB::bind_method(D_METHOD("texture_create_shared", "view", "with_texture"), &RenderingDevice::_texture_create_shared);
  5815. ClassDB::bind_method(D_METHOD("texture_create_shared_from_slice", "view", "with_texture", "layer", "mipmap", "mipmaps", "slice_type"), &RenderingDevice::_texture_create_shared_from_slice, DEFVAL(1), DEFVAL(TEXTURE_SLICE_2D));
  5816. ClassDB::bind_method(D_METHOD("texture_create_from_extension", "type", "format", "samples", "usage_flags", "image", "width", "height", "depth", "layers"), &RenderingDevice::texture_create_from_extension);
  5817. ClassDB::bind_method(D_METHOD("texture_update", "texture", "layer", "data"), &RenderingDevice::texture_update);
  5818. ClassDB::bind_method(D_METHOD("texture_get_data", "texture", "layer"), &RenderingDevice::texture_get_data);
  5819. ClassDB::bind_method(D_METHOD("texture_get_data_async", "texture", "layer", "callback"), &RenderingDevice::texture_get_data_async);
  5820. ClassDB::bind_method(D_METHOD("texture_is_format_supported_for_usage", "format", "usage_flags"), &RenderingDevice::texture_is_format_supported_for_usage);
  5821. ClassDB::bind_method(D_METHOD("texture_is_shared", "texture"), &RenderingDevice::texture_is_shared);
  5822. ClassDB::bind_method(D_METHOD("texture_is_valid", "texture"), &RenderingDevice::texture_is_valid);
  5823. ClassDB::bind_method(D_METHOD("texture_set_discardable", "texture", "discardable"), &RenderingDevice::texture_set_discardable);
  5824. ClassDB::bind_method(D_METHOD("texture_is_discardable", "texture"), &RenderingDevice::texture_is_discardable);
  5825. ClassDB::bind_method(D_METHOD("texture_copy", "from_texture", "to_texture", "from_pos", "to_pos", "size", "src_mipmap", "dst_mipmap", "src_layer", "dst_layer"), &RenderingDevice::texture_copy);
  5826. ClassDB::bind_method(D_METHOD("texture_clear", "texture", "color", "base_mipmap", "mipmap_count", "base_layer", "layer_count"), &RenderingDevice::texture_clear);
  5827. ClassDB::bind_method(D_METHOD("texture_resolve_multisample", "from_texture", "to_texture"), &RenderingDevice::texture_resolve_multisample);
  5828. ClassDB::bind_method(D_METHOD("texture_get_format", "texture"), &RenderingDevice::_texture_get_format);
  5829. #ifndef DISABLE_DEPRECATED
  5830. ClassDB::bind_method(D_METHOD("texture_get_native_handle", "texture"), &RenderingDevice::texture_get_native_handle);
  5831. #endif
  5832. ClassDB::bind_method(D_METHOD("framebuffer_format_create", "attachments", "view_count"), &RenderingDevice::_framebuffer_format_create, DEFVAL(1));
  5833. ClassDB::bind_method(D_METHOD("framebuffer_format_create_multipass", "attachments", "passes", "view_count"), &RenderingDevice::_framebuffer_format_create_multipass, DEFVAL(1));
  5834. ClassDB::bind_method(D_METHOD("framebuffer_format_create_empty", "samples"), &RenderingDevice::framebuffer_format_create_empty, DEFVAL(TEXTURE_SAMPLES_1));
  5835. ClassDB::bind_method(D_METHOD("framebuffer_format_get_texture_samples", "format", "render_pass"), &RenderingDevice::framebuffer_format_get_texture_samples, DEFVAL(0));
  5836. ClassDB::bind_method(D_METHOD("framebuffer_create", "textures", "validate_with_format", "view_count"), &RenderingDevice::_framebuffer_create, DEFVAL(INVALID_FORMAT_ID), DEFVAL(1));
  5837. ClassDB::bind_method(D_METHOD("framebuffer_create_multipass", "textures", "passes", "validate_with_format", "view_count"), &RenderingDevice::_framebuffer_create_multipass, DEFVAL(INVALID_FORMAT_ID), DEFVAL(1));
  5838. ClassDB::bind_method(D_METHOD("framebuffer_create_empty", "size", "samples", "validate_with_format"), &RenderingDevice::framebuffer_create_empty, DEFVAL(TEXTURE_SAMPLES_1), DEFVAL(INVALID_FORMAT_ID));
  5839. ClassDB::bind_method(D_METHOD("framebuffer_get_format", "framebuffer"), &RenderingDevice::framebuffer_get_format);
  5840. ClassDB::bind_method(D_METHOD("framebuffer_is_valid", "framebuffer"), &RenderingDevice::framebuffer_is_valid);
  5841. ClassDB::bind_method(D_METHOD("sampler_create", "state"), &RenderingDevice::_sampler_create);
  5842. ClassDB::bind_method(D_METHOD("sampler_is_format_supported_for_filter", "format", "sampler_filter"), &RenderingDevice::sampler_is_format_supported_for_filter);
  5843. ClassDB::bind_method(D_METHOD("vertex_buffer_create", "size_bytes", "data", "use_as_storage"), &RenderingDevice::vertex_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(false));
  5844. ClassDB::bind_method(D_METHOD("vertex_format_create", "vertex_descriptions"), &RenderingDevice::_vertex_format_create);
  5845. ClassDB::bind_method(D_METHOD("vertex_array_create", "vertex_count", "vertex_format", "src_buffers", "offsets"), &RenderingDevice::_vertex_array_create, DEFVAL(Vector<int64_t>()));
  5846. ClassDB::bind_method(D_METHOD("index_buffer_create", "size_indices", "format", "data", "use_restart_indices"), &RenderingDevice::index_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(false));
  5847. ClassDB::bind_method(D_METHOD("index_array_create", "index_buffer", "index_offset", "index_count"), &RenderingDevice::index_array_create);
  5848. ClassDB::bind_method(D_METHOD("shader_compile_spirv_from_source", "shader_source", "allow_cache"), &RenderingDevice::_shader_compile_spirv_from_source, DEFVAL(true));
  5849. ClassDB::bind_method(D_METHOD("shader_compile_binary_from_spirv", "spirv_data", "name"), &RenderingDevice::_shader_compile_binary_from_spirv, DEFVAL(""));
  5850. ClassDB::bind_method(D_METHOD("shader_create_from_spirv", "spirv_data", "name"), &RenderingDevice::_shader_create_from_spirv, DEFVAL(""));
  5851. ClassDB::bind_method(D_METHOD("shader_create_from_bytecode", "binary_data", "placeholder_rid"), &RenderingDevice::shader_create_from_bytecode, DEFVAL(RID()));
  5852. ClassDB::bind_method(D_METHOD("shader_create_placeholder"), &RenderingDevice::shader_create_placeholder);
  5853. ClassDB::bind_method(D_METHOD("shader_get_vertex_input_attribute_mask", "shader"), &RenderingDevice::shader_get_vertex_input_attribute_mask);
  5854. ClassDB::bind_method(D_METHOD("uniform_buffer_create", "size_bytes", "data"), &RenderingDevice::uniform_buffer_create, DEFVAL(Vector<uint8_t>()));
  5855. ClassDB::bind_method(D_METHOD("storage_buffer_create", "size_bytes", "data", "usage"), &RenderingDevice::storage_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(0));
  5856. ClassDB::bind_method(D_METHOD("texture_buffer_create", "size_bytes", "format", "data"), &RenderingDevice::texture_buffer_create, DEFVAL(Vector<uint8_t>()));
  5857. ClassDB::bind_method(D_METHOD("uniform_set_create", "uniforms", "shader", "shader_set"), &RenderingDevice::_uniform_set_create);
  5858. ClassDB::bind_method(D_METHOD("uniform_set_is_valid", "uniform_set"), &RenderingDevice::uniform_set_is_valid);
  5859. ClassDB::bind_method(D_METHOD("buffer_copy", "src_buffer", "dst_buffer", "src_offset", "dst_offset", "size"), &RenderingDevice::buffer_copy);
  5860. ClassDB::bind_method(D_METHOD("buffer_update", "buffer", "offset", "size_bytes", "data"), &RenderingDevice::_buffer_update_bind);
  5861. ClassDB::bind_method(D_METHOD("buffer_clear", "buffer", "offset", "size_bytes"), &RenderingDevice::buffer_clear);
  5862. ClassDB::bind_method(D_METHOD("buffer_get_data", "buffer", "offset_bytes", "size_bytes"), &RenderingDevice::buffer_get_data, DEFVAL(0), DEFVAL(0));
  5863. ClassDB::bind_method(D_METHOD("buffer_get_data_async", "buffer", "callback", "offset_bytes", "size_bytes"), &RenderingDevice::buffer_get_data_async, DEFVAL(0), DEFVAL(0));
  5864. ClassDB::bind_method(D_METHOD("render_pipeline_create", "shader", "framebuffer_format", "vertex_format", "primitive", "rasterization_state", "multisample_state", "stencil_state", "color_blend_state", "dynamic_state_flags", "for_render_pass", "specialization_constants"), &RenderingDevice::_render_pipeline_create, DEFVAL(0), DEFVAL(0), DEFVAL(TypedArray<RDPipelineSpecializationConstant>()));
  5865. ClassDB::bind_method(D_METHOD("render_pipeline_is_valid", "render_pipeline"), &RenderingDevice::render_pipeline_is_valid);
  5866. ClassDB::bind_method(D_METHOD("compute_pipeline_create", "shader", "specialization_constants"), &RenderingDevice::_compute_pipeline_create, DEFVAL(TypedArray<RDPipelineSpecializationConstant>()));
  5867. ClassDB::bind_method(D_METHOD("compute_pipeline_is_valid", "compute_pipeline"), &RenderingDevice::compute_pipeline_is_valid);
  5868. ClassDB::bind_method(D_METHOD("screen_get_width", "screen"), &RenderingDevice::screen_get_width, DEFVAL(DisplayServer::MAIN_WINDOW_ID));
  5869. ClassDB::bind_method(D_METHOD("screen_get_height", "screen"), &RenderingDevice::screen_get_height, DEFVAL(DisplayServer::MAIN_WINDOW_ID));
  5870. ClassDB::bind_method(D_METHOD("screen_get_framebuffer_format", "screen"), &RenderingDevice::screen_get_framebuffer_format, DEFVAL(DisplayServer::MAIN_WINDOW_ID));
  5871. ClassDB::bind_method(D_METHOD("draw_list_begin_for_screen", "screen", "clear_color"), &RenderingDevice::draw_list_begin_for_screen, DEFVAL(DisplayServer::MAIN_WINDOW_ID), DEFVAL(Color()));
  5872. ClassDB::bind_method(D_METHOD("draw_list_begin", "framebuffer", "draw_flags", "clear_color_values", "clear_depth_value", "clear_stencil_value", "region", "breadcrumb"), &RenderingDevice::draw_list_begin, DEFVAL(DRAW_DEFAULT_ALL), DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()), DEFVAL(0));
  5873. #ifndef DISABLE_DEPRECATED
  5874. ClassDB::bind_method(D_METHOD("draw_list_begin_split", "framebuffer", "splits", "initial_color_action", "final_color_action", "initial_depth_action", "final_depth_action", "clear_color_values", "clear_depth", "clear_stencil", "region", "storage_textures"), &RenderingDevice::_draw_list_begin_split, DEFVAL(Vector<Color>()), DEFVAL(1.0), DEFVAL(0), DEFVAL(Rect2()), DEFVAL(TypedArray<RID>()));
  5875. #endif
  5876. ClassDB::bind_method(D_METHOD("draw_list_set_blend_constants", "draw_list", "color"), &RenderingDevice::draw_list_set_blend_constants);
  5877. ClassDB::bind_method(D_METHOD("draw_list_bind_render_pipeline", "draw_list", "render_pipeline"), &RenderingDevice::draw_list_bind_render_pipeline);
  5878. ClassDB::bind_method(D_METHOD("draw_list_bind_uniform_set", "draw_list", "uniform_set", "set_index"), &RenderingDevice::draw_list_bind_uniform_set);
  5879. ClassDB::bind_method(D_METHOD("draw_list_bind_vertex_array", "draw_list", "vertex_array"), &RenderingDevice::draw_list_bind_vertex_array);
  5880. ClassDB::bind_method(D_METHOD("draw_list_bind_index_array", "draw_list", "index_array"), &RenderingDevice::draw_list_bind_index_array);
  5881. ClassDB::bind_method(D_METHOD("draw_list_set_push_constant", "draw_list", "buffer", "size_bytes"), &RenderingDevice::_draw_list_set_push_constant);
  5882. ClassDB::bind_method(D_METHOD("draw_list_draw", "draw_list", "use_indices", "instances", "procedural_vertex_count"), &RenderingDevice::draw_list_draw, DEFVAL(0));
  5883. ClassDB::bind_method(D_METHOD("draw_list_draw_indirect", "draw_list", "use_indices", "buffer", "offset", "draw_count", "stride"), &RenderingDevice::draw_list_draw_indirect, DEFVAL(0), DEFVAL(1), DEFVAL(0));
  5884. ClassDB::bind_method(D_METHOD("draw_list_enable_scissor", "draw_list", "rect"), &RenderingDevice::draw_list_enable_scissor, DEFVAL(Rect2()));
  5885. ClassDB::bind_method(D_METHOD("draw_list_disable_scissor", "draw_list"), &RenderingDevice::draw_list_disable_scissor);
  5886. ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass"), &RenderingDevice::draw_list_switch_to_next_pass);
  5887. #ifndef DISABLE_DEPRECATED
  5888. ClassDB::bind_method(D_METHOD("draw_list_switch_to_next_pass_split", "splits"), &RenderingDevice::_draw_list_switch_to_next_pass_split);
  5889. #endif
  5890. ClassDB::bind_method(D_METHOD("draw_list_end"), &RenderingDevice::draw_list_end);
  5891. ClassDB::bind_method(D_METHOD("compute_list_begin"), &RenderingDevice::compute_list_begin);
  5892. ClassDB::bind_method(D_METHOD("compute_list_bind_compute_pipeline", "compute_list", "compute_pipeline"), &RenderingDevice::compute_list_bind_compute_pipeline);
  5893. ClassDB::bind_method(D_METHOD("compute_list_set_push_constant", "compute_list", "buffer", "size_bytes"), &RenderingDevice::_compute_list_set_push_constant);
  5894. ClassDB::bind_method(D_METHOD("compute_list_bind_uniform_set", "compute_list", "uniform_set", "set_index"), &RenderingDevice::compute_list_bind_uniform_set);
  5895. ClassDB::bind_method(D_METHOD("compute_list_dispatch", "compute_list", "x_groups", "y_groups", "z_groups"), &RenderingDevice::compute_list_dispatch);
  5896. ClassDB::bind_method(D_METHOD("compute_list_dispatch_indirect", "compute_list", "buffer", "offset"), &RenderingDevice::compute_list_dispatch_indirect);
  5897. ClassDB::bind_method(D_METHOD("compute_list_add_barrier", "compute_list"), &RenderingDevice::compute_list_add_barrier);
  5898. ClassDB::bind_method(D_METHOD("compute_list_end"), &RenderingDevice::compute_list_end);
  5899. ClassDB::bind_method(D_METHOD("free_rid", "rid"), &RenderingDevice::free);
  5900. ClassDB::bind_method(D_METHOD("capture_timestamp", "name"), &RenderingDevice::capture_timestamp);
  5901. ClassDB::bind_method(D_METHOD("get_captured_timestamps_count"), &RenderingDevice::get_captured_timestamps_count);
  5902. ClassDB::bind_method(D_METHOD("get_captured_timestamps_frame"), &RenderingDevice::get_captured_timestamps_frame);
  5903. ClassDB::bind_method(D_METHOD("get_captured_timestamp_gpu_time", "index"), &RenderingDevice::get_captured_timestamp_gpu_time);
  5904. ClassDB::bind_method(D_METHOD("get_captured_timestamp_cpu_time", "index"), &RenderingDevice::get_captured_timestamp_cpu_time);
  5905. ClassDB::bind_method(D_METHOD("get_captured_timestamp_name", "index"), &RenderingDevice::get_captured_timestamp_name);
  5906. ClassDB::bind_method(D_METHOD("limit_get", "limit"), &RenderingDevice::limit_get);
  5907. ClassDB::bind_method(D_METHOD("get_frame_delay"), &RenderingDevice::get_frame_delay);
  5908. ClassDB::bind_method(D_METHOD("submit"), &RenderingDevice::submit);
  5909. ClassDB::bind_method(D_METHOD("sync"), &RenderingDevice::sync);
  5910. #ifndef DISABLE_DEPRECATED
  5911. ClassDB::bind_method(D_METHOD("barrier", "from", "to"), &RenderingDevice::barrier, DEFVAL(BARRIER_MASK_ALL_BARRIERS), DEFVAL(BARRIER_MASK_ALL_BARRIERS));
  5912. ClassDB::bind_method(D_METHOD("full_barrier"), &RenderingDevice::full_barrier);
  5913. #endif
  5914. ClassDB::bind_method(D_METHOD("create_local_device"), &RenderingDevice::create_local_device);
  5915. ClassDB::bind_method(D_METHOD("set_resource_name", "id", "name"), &RenderingDevice::set_resource_name);
  5916. ClassDB::bind_method(D_METHOD("draw_command_begin_label", "name", "color"), &RenderingDevice::draw_command_begin_label);
  5917. #ifndef DISABLE_DEPRECATED
  5918. ClassDB::bind_method(D_METHOD("draw_command_insert_label", "name", "color"), &RenderingDevice::draw_command_insert_label);
  5919. #endif
  5920. ClassDB::bind_method(D_METHOD("draw_command_end_label"), &RenderingDevice::draw_command_end_label);
  5921. ClassDB::bind_method(D_METHOD("get_device_vendor_name"), &RenderingDevice::get_device_vendor_name);
  5922. ClassDB::bind_method(D_METHOD("get_device_name"), &RenderingDevice::get_device_name);
  5923. ClassDB::bind_method(D_METHOD("get_device_pipeline_cache_uuid"), &RenderingDevice::get_device_pipeline_cache_uuid);
  5924. ClassDB::bind_method(D_METHOD("get_memory_usage", "type"), &RenderingDevice::get_memory_usage);
  5925. ClassDB::bind_method(D_METHOD("get_driver_resource", "resource", "rid", "index"), &RenderingDevice::get_driver_resource);
  5926. ClassDB::bind_method(D_METHOD("get_perf_report"), &RenderingDevice::get_perf_report);
  5927. ClassDB::bind_method(D_METHOD("get_driver_and_device_memory_report"), &RenderingDevice::get_driver_and_device_memory_report);
  5928. ClassDB::bind_method(D_METHOD("get_tracked_object_name", "type_index"), &RenderingDevice::get_tracked_object_name);
  5929. ClassDB::bind_method(D_METHOD("get_tracked_object_type_count"), &RenderingDevice::get_tracked_object_type_count);
  5930. ClassDB::bind_method(D_METHOD("get_driver_total_memory"), &RenderingDevice::get_driver_total_memory);
  5931. ClassDB::bind_method(D_METHOD("get_driver_allocation_count"), &RenderingDevice::get_driver_allocation_count);
  5932. ClassDB::bind_method(D_METHOD("get_driver_memory_by_object_type", "type"), &RenderingDevice::get_driver_memory_by_object_type);
  5933. ClassDB::bind_method(D_METHOD("get_driver_allocs_by_object_type", "type"), &RenderingDevice::get_driver_allocs_by_object_type);
  5934. ClassDB::bind_method(D_METHOD("get_device_total_memory"), &RenderingDevice::get_device_total_memory);
  5935. ClassDB::bind_method(D_METHOD("get_device_allocation_count"), &RenderingDevice::get_device_allocation_count);
  5936. ClassDB::bind_method(D_METHOD("get_device_memory_by_object_type", "type"), &RenderingDevice::get_device_memory_by_object_type);
  5937. ClassDB::bind_method(D_METHOD("get_device_allocs_by_object_type", "type"), &RenderingDevice::get_device_allocs_by_object_type);
  5938. BIND_ENUM_CONSTANT(DEVICE_TYPE_OTHER);
  5939. BIND_ENUM_CONSTANT(DEVICE_TYPE_INTEGRATED_GPU);
  5940. BIND_ENUM_CONSTANT(DEVICE_TYPE_DISCRETE_GPU);
  5941. BIND_ENUM_CONSTANT(DEVICE_TYPE_VIRTUAL_GPU);
  5942. BIND_ENUM_CONSTANT(DEVICE_TYPE_CPU);
  5943. BIND_ENUM_CONSTANT(DEVICE_TYPE_MAX);
  5944. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_LOGICAL_DEVICE);
  5945. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_PHYSICAL_DEVICE);
  5946. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TOPMOST_OBJECT);
  5947. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_COMMAND_QUEUE);
  5948. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_QUEUE_FAMILY);
  5949. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TEXTURE);
  5950. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TEXTURE_VIEW);
  5951. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_TEXTURE_DATA_FORMAT);
  5952. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_SAMPLER);
  5953. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_UNIFORM_SET);
  5954. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_BUFFER);
  5955. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_COMPUTE_PIPELINE);
  5956. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_RENDER_PIPELINE);
  5957. #ifndef DISABLE_DEPRECATED
  5958. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_DEVICE);
  5959. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_PHYSICAL_DEVICE);
  5960. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_INSTANCE);
  5961. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_QUEUE);
  5962. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_QUEUE_FAMILY_INDEX);
  5963. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_IMAGE);
  5964. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_IMAGE_VIEW);
  5965. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_IMAGE_NATIVE_TEXTURE_FORMAT);
  5966. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_SAMPLER);
  5967. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_DESCRIPTOR_SET);
  5968. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_BUFFER);
  5969. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_COMPUTE_PIPELINE);
  5970. BIND_ENUM_CONSTANT(DRIVER_RESOURCE_VULKAN_RENDER_PIPELINE);
  5971. #endif
  5972. BIND_ENUM_CONSTANT(DATA_FORMAT_R4G4_UNORM_PACK8);
  5973. BIND_ENUM_CONSTANT(DATA_FORMAT_R4G4B4A4_UNORM_PACK16);
  5974. BIND_ENUM_CONSTANT(DATA_FORMAT_B4G4R4A4_UNORM_PACK16);
  5975. BIND_ENUM_CONSTANT(DATA_FORMAT_R5G6B5_UNORM_PACK16);
  5976. BIND_ENUM_CONSTANT(DATA_FORMAT_B5G6R5_UNORM_PACK16);
  5977. BIND_ENUM_CONSTANT(DATA_FORMAT_R5G5B5A1_UNORM_PACK16);
  5978. BIND_ENUM_CONSTANT(DATA_FORMAT_B5G5R5A1_UNORM_PACK16);
  5979. BIND_ENUM_CONSTANT(DATA_FORMAT_A1R5G5B5_UNORM_PACK16);
  5980. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_UNORM);
  5981. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SNORM);
  5982. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_USCALED);
  5983. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SSCALED);
  5984. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_UINT);
  5985. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SINT);
  5986. BIND_ENUM_CONSTANT(DATA_FORMAT_R8_SRGB);
  5987. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_UNORM);
  5988. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SNORM);
  5989. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_USCALED);
  5990. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SSCALED);
  5991. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_UINT);
  5992. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SINT);
  5993. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8_SRGB);
  5994. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_UNORM);
  5995. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SNORM);
  5996. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_USCALED);
  5997. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SSCALED);
  5998. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_UINT);
  5999. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SINT);
  6000. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8_SRGB);
  6001. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_UNORM);
  6002. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SNORM);
  6003. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_USCALED);
  6004. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SSCALED);
  6005. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_UINT);
  6006. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SINT);
  6007. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8_SRGB);
  6008. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_UNORM);
  6009. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SNORM);
  6010. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_USCALED);
  6011. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SSCALED);
  6012. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_UINT);
  6013. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SINT);
  6014. BIND_ENUM_CONSTANT(DATA_FORMAT_R8G8B8A8_SRGB);
  6015. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_UNORM);
  6016. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SNORM);
  6017. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_USCALED);
  6018. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SSCALED);
  6019. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_UINT);
  6020. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SINT);
  6021. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8A8_SRGB);
  6022. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_UNORM_PACK32);
  6023. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SNORM_PACK32);
  6024. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_USCALED_PACK32);
  6025. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SSCALED_PACK32);
  6026. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_UINT_PACK32);
  6027. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SINT_PACK32);
  6028. BIND_ENUM_CONSTANT(DATA_FORMAT_A8B8G8R8_SRGB_PACK32);
  6029. BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_UNORM_PACK32);
  6030. BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_SNORM_PACK32);
  6031. BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_USCALED_PACK32);
  6032. BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_SSCALED_PACK32);
  6033. BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_UINT_PACK32);
  6034. BIND_ENUM_CONSTANT(DATA_FORMAT_A2R10G10B10_SINT_PACK32);
  6035. BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_UNORM_PACK32);
  6036. BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_SNORM_PACK32);
  6037. BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_USCALED_PACK32);
  6038. BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_SSCALED_PACK32);
  6039. BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_UINT_PACK32);
  6040. BIND_ENUM_CONSTANT(DATA_FORMAT_A2B10G10R10_SINT_PACK32);
  6041. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_UNORM);
  6042. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SNORM);
  6043. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_USCALED);
  6044. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SSCALED);
  6045. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_UINT);
  6046. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SINT);
  6047. BIND_ENUM_CONSTANT(DATA_FORMAT_R16_SFLOAT);
  6048. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_UNORM);
  6049. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SNORM);
  6050. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_USCALED);
  6051. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SSCALED);
  6052. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_UINT);
  6053. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SINT);
  6054. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16_SFLOAT);
  6055. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_UNORM);
  6056. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SNORM);
  6057. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_USCALED);
  6058. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SSCALED);
  6059. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_UINT);
  6060. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SINT);
  6061. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16_SFLOAT);
  6062. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_UNORM);
  6063. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SNORM);
  6064. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_USCALED);
  6065. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SSCALED);
  6066. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_UINT);
  6067. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SINT);
  6068. BIND_ENUM_CONSTANT(DATA_FORMAT_R16G16B16A16_SFLOAT);
  6069. BIND_ENUM_CONSTANT(DATA_FORMAT_R32_UINT);
  6070. BIND_ENUM_CONSTANT(DATA_FORMAT_R32_SINT);
  6071. BIND_ENUM_CONSTANT(DATA_FORMAT_R32_SFLOAT);
  6072. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32_UINT);
  6073. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32_SINT);
  6074. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32_SFLOAT);
  6075. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32_UINT);
  6076. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32_SINT);
  6077. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32_SFLOAT);
  6078. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32A32_UINT);
  6079. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32A32_SINT);
  6080. BIND_ENUM_CONSTANT(DATA_FORMAT_R32G32B32A32_SFLOAT);
  6081. BIND_ENUM_CONSTANT(DATA_FORMAT_R64_UINT);
  6082. BIND_ENUM_CONSTANT(DATA_FORMAT_R64_SINT);
  6083. BIND_ENUM_CONSTANT(DATA_FORMAT_R64_SFLOAT);
  6084. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64_UINT);
  6085. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64_SINT);
  6086. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64_SFLOAT);
  6087. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64_UINT);
  6088. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64_SINT);
  6089. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64_SFLOAT);
  6090. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64A64_UINT);
  6091. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64A64_SINT);
  6092. BIND_ENUM_CONSTANT(DATA_FORMAT_R64G64B64A64_SFLOAT);
  6093. BIND_ENUM_CONSTANT(DATA_FORMAT_B10G11R11_UFLOAT_PACK32);
  6094. BIND_ENUM_CONSTANT(DATA_FORMAT_E5B9G9R9_UFLOAT_PACK32);
  6095. BIND_ENUM_CONSTANT(DATA_FORMAT_D16_UNORM);
  6096. BIND_ENUM_CONSTANT(DATA_FORMAT_X8_D24_UNORM_PACK32);
  6097. BIND_ENUM_CONSTANT(DATA_FORMAT_D32_SFLOAT);
  6098. BIND_ENUM_CONSTANT(DATA_FORMAT_S8_UINT);
  6099. BIND_ENUM_CONSTANT(DATA_FORMAT_D16_UNORM_S8_UINT);
  6100. BIND_ENUM_CONSTANT(DATA_FORMAT_D24_UNORM_S8_UINT);
  6101. BIND_ENUM_CONSTANT(DATA_FORMAT_D32_SFLOAT_S8_UINT);
  6102. BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGB_UNORM_BLOCK);
  6103. BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGB_SRGB_BLOCK);
  6104. BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGBA_UNORM_BLOCK);
  6105. BIND_ENUM_CONSTANT(DATA_FORMAT_BC1_RGBA_SRGB_BLOCK);
  6106. BIND_ENUM_CONSTANT(DATA_FORMAT_BC2_UNORM_BLOCK);
  6107. BIND_ENUM_CONSTANT(DATA_FORMAT_BC2_SRGB_BLOCK);
  6108. BIND_ENUM_CONSTANT(DATA_FORMAT_BC3_UNORM_BLOCK);
  6109. BIND_ENUM_CONSTANT(DATA_FORMAT_BC3_SRGB_BLOCK);
  6110. BIND_ENUM_CONSTANT(DATA_FORMAT_BC4_UNORM_BLOCK);
  6111. BIND_ENUM_CONSTANT(DATA_FORMAT_BC4_SNORM_BLOCK);
  6112. BIND_ENUM_CONSTANT(DATA_FORMAT_BC5_UNORM_BLOCK);
  6113. BIND_ENUM_CONSTANT(DATA_FORMAT_BC5_SNORM_BLOCK);
  6114. BIND_ENUM_CONSTANT(DATA_FORMAT_BC6H_UFLOAT_BLOCK);
  6115. BIND_ENUM_CONSTANT(DATA_FORMAT_BC6H_SFLOAT_BLOCK);
  6116. BIND_ENUM_CONSTANT(DATA_FORMAT_BC7_UNORM_BLOCK);
  6117. BIND_ENUM_CONSTANT(DATA_FORMAT_BC7_SRGB_BLOCK);
  6118. BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8_UNORM_BLOCK);
  6119. BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8_SRGB_BLOCK);
  6120. BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK);
  6121. BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK);
  6122. BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK);
  6123. BIND_ENUM_CONSTANT(DATA_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK);
  6124. BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11_UNORM_BLOCK);
  6125. BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11_SNORM_BLOCK);
  6126. BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11G11_UNORM_BLOCK);
  6127. BIND_ENUM_CONSTANT(DATA_FORMAT_EAC_R11G11_SNORM_BLOCK);
  6128. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_4x4_UNORM_BLOCK);
  6129. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_4x4_SRGB_BLOCK);
  6130. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x4_UNORM_BLOCK);
  6131. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x4_SRGB_BLOCK);
  6132. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x5_UNORM_BLOCK);
  6133. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_5x5_SRGB_BLOCK);
  6134. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x5_UNORM_BLOCK);
  6135. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x5_SRGB_BLOCK);
  6136. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x6_UNORM_BLOCK);
  6137. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_6x6_SRGB_BLOCK);
  6138. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x5_UNORM_BLOCK);
  6139. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x5_SRGB_BLOCK);
  6140. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x6_UNORM_BLOCK);
  6141. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x6_SRGB_BLOCK);
  6142. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x8_UNORM_BLOCK);
  6143. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_8x8_SRGB_BLOCK);
  6144. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x5_UNORM_BLOCK);
  6145. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x5_SRGB_BLOCK);
  6146. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x6_UNORM_BLOCK);
  6147. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x6_SRGB_BLOCK);
  6148. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x8_UNORM_BLOCK);
  6149. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x8_SRGB_BLOCK);
  6150. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x10_UNORM_BLOCK);
  6151. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_10x10_SRGB_BLOCK);
  6152. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x10_UNORM_BLOCK);
  6153. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x10_SRGB_BLOCK);
  6154. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x12_UNORM_BLOCK);
  6155. BIND_ENUM_CONSTANT(DATA_FORMAT_ASTC_12x12_SRGB_BLOCK);
  6156. BIND_ENUM_CONSTANT(DATA_FORMAT_G8B8G8R8_422_UNORM);
  6157. BIND_ENUM_CONSTANT(DATA_FORMAT_B8G8R8G8_422_UNORM);
  6158. BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8_R8_3PLANE_420_UNORM);
  6159. BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8R8_2PLANE_420_UNORM);
  6160. BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8_R8_3PLANE_422_UNORM);
  6161. BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8R8_2PLANE_422_UNORM);
  6162. BIND_ENUM_CONSTANT(DATA_FORMAT_G8_B8_R8_3PLANE_444_UNORM);
  6163. BIND_ENUM_CONSTANT(DATA_FORMAT_R10X6_UNORM_PACK16);
  6164. BIND_ENUM_CONSTANT(DATA_FORMAT_R10X6G10X6_UNORM_2PACK16);
  6165. BIND_ENUM_CONSTANT(DATA_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16);
  6166. BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16);
  6167. BIND_ENUM_CONSTANT(DATA_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16);
  6168. BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16);
  6169. BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16);
  6170. BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16);
  6171. BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16);
  6172. BIND_ENUM_CONSTANT(DATA_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16);
  6173. BIND_ENUM_CONSTANT(DATA_FORMAT_R12X4_UNORM_PACK16);
  6174. BIND_ENUM_CONSTANT(DATA_FORMAT_R12X4G12X4_UNORM_2PACK16);
  6175. BIND_ENUM_CONSTANT(DATA_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16);
  6176. BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16);
  6177. BIND_ENUM_CONSTANT(DATA_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16);
  6178. BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16);
  6179. BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16);
  6180. BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16);
  6181. BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16);
  6182. BIND_ENUM_CONSTANT(DATA_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16);
  6183. BIND_ENUM_CONSTANT(DATA_FORMAT_G16B16G16R16_422_UNORM);
  6184. BIND_ENUM_CONSTANT(DATA_FORMAT_B16G16R16G16_422_UNORM);
  6185. BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_420_UNORM);
  6186. BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16R16_2PLANE_420_UNORM);
  6187. BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_422_UNORM);
  6188. BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16R16_2PLANE_422_UNORM);
  6189. BIND_ENUM_CONSTANT(DATA_FORMAT_G16_B16_R16_3PLANE_444_UNORM);
  6190. BIND_ENUM_CONSTANT(DATA_FORMAT_MAX);
  6191. #ifndef DISABLE_DEPRECATED
  6192. BIND_BITFIELD_FLAG(BARRIER_MASK_VERTEX);
  6193. BIND_BITFIELD_FLAG(BARRIER_MASK_FRAGMENT);
  6194. BIND_BITFIELD_FLAG(BARRIER_MASK_COMPUTE);
  6195. BIND_BITFIELD_FLAG(BARRIER_MASK_TRANSFER);
  6196. BIND_BITFIELD_FLAG(BARRIER_MASK_RASTER);
  6197. BIND_BITFIELD_FLAG(BARRIER_MASK_ALL_BARRIERS);
  6198. BIND_BITFIELD_FLAG(BARRIER_MASK_NO_BARRIER);
  6199. #endif
  6200. BIND_ENUM_CONSTANT(TEXTURE_TYPE_1D);
  6201. BIND_ENUM_CONSTANT(TEXTURE_TYPE_2D);
  6202. BIND_ENUM_CONSTANT(TEXTURE_TYPE_3D);
  6203. BIND_ENUM_CONSTANT(TEXTURE_TYPE_CUBE);
  6204. BIND_ENUM_CONSTANT(TEXTURE_TYPE_1D_ARRAY);
  6205. BIND_ENUM_CONSTANT(TEXTURE_TYPE_2D_ARRAY);
  6206. BIND_ENUM_CONSTANT(TEXTURE_TYPE_CUBE_ARRAY);
  6207. BIND_ENUM_CONSTANT(TEXTURE_TYPE_MAX);
  6208. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_1);
  6209. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_2);
  6210. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_4);
  6211. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_8);
  6212. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_16);
  6213. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_32);
  6214. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_64);
  6215. BIND_ENUM_CONSTANT(TEXTURE_SAMPLES_MAX);
  6216. BIND_BITFIELD_FLAG(TEXTURE_USAGE_SAMPLING_BIT);
  6217. BIND_BITFIELD_FLAG(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT);
  6218. BIND_BITFIELD_FLAG(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
  6219. BIND_BITFIELD_FLAG(TEXTURE_USAGE_STORAGE_BIT);
  6220. BIND_BITFIELD_FLAG(TEXTURE_USAGE_STORAGE_ATOMIC_BIT);
  6221. BIND_BITFIELD_FLAG(TEXTURE_USAGE_CPU_READ_BIT);
  6222. BIND_BITFIELD_FLAG(TEXTURE_USAGE_CAN_UPDATE_BIT);
  6223. BIND_BITFIELD_FLAG(TEXTURE_USAGE_CAN_COPY_FROM_BIT);
  6224. BIND_BITFIELD_FLAG(TEXTURE_USAGE_CAN_COPY_TO_BIT);
  6225. BIND_BITFIELD_FLAG(TEXTURE_USAGE_INPUT_ATTACHMENT_BIT);
  6226. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_IDENTITY);
  6227. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_ZERO);
  6228. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_ONE);
  6229. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_R);
  6230. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_G);
  6231. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_B);
  6232. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_A);
  6233. BIND_ENUM_CONSTANT(TEXTURE_SWIZZLE_MAX);
  6234. BIND_ENUM_CONSTANT(TEXTURE_SLICE_2D);
  6235. BIND_ENUM_CONSTANT(TEXTURE_SLICE_CUBEMAP);
  6236. BIND_ENUM_CONSTANT(TEXTURE_SLICE_3D);
  6237. BIND_ENUM_CONSTANT(SAMPLER_FILTER_NEAREST);
  6238. BIND_ENUM_CONSTANT(SAMPLER_FILTER_LINEAR);
  6239. BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_REPEAT);
  6240. BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_MIRRORED_REPEAT);
  6241. BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_CLAMP_TO_EDGE);
  6242. BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_CLAMP_TO_BORDER);
  6243. BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_MIRROR_CLAMP_TO_EDGE);
  6244. BIND_ENUM_CONSTANT(SAMPLER_REPEAT_MODE_MAX);
  6245. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK);
  6246. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_INT_TRANSPARENT_BLACK);
  6247. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_BLACK);
  6248. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_INT_OPAQUE_BLACK);
  6249. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_FLOAT_OPAQUE_WHITE);
  6250. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_INT_OPAQUE_WHITE);
  6251. BIND_ENUM_CONSTANT(SAMPLER_BORDER_COLOR_MAX);
  6252. BIND_ENUM_CONSTANT(VERTEX_FREQUENCY_VERTEX);
  6253. BIND_ENUM_CONSTANT(VERTEX_FREQUENCY_INSTANCE);
  6254. BIND_ENUM_CONSTANT(INDEX_BUFFER_FORMAT_UINT16);
  6255. BIND_ENUM_CONSTANT(INDEX_BUFFER_FORMAT_UINT32);
  6256. BIND_BITFIELD_FLAG(STORAGE_BUFFER_USAGE_DISPATCH_INDIRECT);
  6257. BIND_ENUM_CONSTANT(UNIFORM_TYPE_SAMPLER); //for sampling only (sampler GLSL type)
  6258. BIND_ENUM_CONSTANT(UNIFORM_TYPE_SAMPLER_WITH_TEXTURE); // for sampling only); but includes a texture); (samplerXX GLSL type)); first a sampler then a texture
  6259. BIND_ENUM_CONSTANT(UNIFORM_TYPE_TEXTURE); //only texture); (textureXX GLSL type)
  6260. BIND_ENUM_CONSTANT(UNIFORM_TYPE_IMAGE); // storage image (imageXX GLSL type)); for compute mostly
  6261. BIND_ENUM_CONSTANT(UNIFORM_TYPE_TEXTURE_BUFFER); // buffer texture (or TBO); textureBuffer type)
  6262. BIND_ENUM_CONSTANT(UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER); // buffer texture with a sampler(or TBO); samplerBuffer type)
  6263. BIND_ENUM_CONSTANT(UNIFORM_TYPE_IMAGE_BUFFER); //texel buffer); (imageBuffer type)); for compute mostly
  6264. BIND_ENUM_CONSTANT(UNIFORM_TYPE_UNIFORM_BUFFER); //regular uniform buffer (or UBO).
  6265. BIND_ENUM_CONSTANT(UNIFORM_TYPE_STORAGE_BUFFER); //storage buffer ("buffer" qualifier) like UBO); but supports storage); for compute mostly
  6266. BIND_ENUM_CONSTANT(UNIFORM_TYPE_INPUT_ATTACHMENT); //used for sub-pass read/write); for mobile mostly
  6267. BIND_ENUM_CONSTANT(UNIFORM_TYPE_MAX);
  6268. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_POINTS);
  6269. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINES);
  6270. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINES_WITH_ADJACENCY);
  6271. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINESTRIPS);
  6272. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_LINESTRIPS_WITH_ADJACENCY);
  6273. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLES);
  6274. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLES_WITH_ADJACENCY);
  6275. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLE_STRIPS);
  6276. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_AJACENCY);
  6277. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX);
  6278. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_TESSELATION_PATCH);
  6279. BIND_ENUM_CONSTANT(RENDER_PRIMITIVE_MAX);
  6280. BIND_ENUM_CONSTANT(POLYGON_CULL_DISABLED);
  6281. BIND_ENUM_CONSTANT(POLYGON_CULL_FRONT);
  6282. BIND_ENUM_CONSTANT(POLYGON_CULL_BACK);
  6283. BIND_ENUM_CONSTANT(POLYGON_FRONT_FACE_CLOCKWISE);
  6284. BIND_ENUM_CONSTANT(POLYGON_FRONT_FACE_COUNTER_CLOCKWISE);
  6285. BIND_ENUM_CONSTANT(STENCIL_OP_KEEP);
  6286. BIND_ENUM_CONSTANT(STENCIL_OP_ZERO);
  6287. BIND_ENUM_CONSTANT(STENCIL_OP_REPLACE);
  6288. BIND_ENUM_CONSTANT(STENCIL_OP_INCREMENT_AND_CLAMP);
  6289. BIND_ENUM_CONSTANT(STENCIL_OP_DECREMENT_AND_CLAMP);
  6290. BIND_ENUM_CONSTANT(STENCIL_OP_INVERT);
  6291. BIND_ENUM_CONSTANT(STENCIL_OP_INCREMENT_AND_WRAP);
  6292. BIND_ENUM_CONSTANT(STENCIL_OP_DECREMENT_AND_WRAP);
  6293. BIND_ENUM_CONSTANT(STENCIL_OP_MAX); //not an actual operator); just the amount of operators :D
  6294. BIND_ENUM_CONSTANT(COMPARE_OP_NEVER);
  6295. BIND_ENUM_CONSTANT(COMPARE_OP_LESS);
  6296. BIND_ENUM_CONSTANT(COMPARE_OP_EQUAL);
  6297. BIND_ENUM_CONSTANT(COMPARE_OP_LESS_OR_EQUAL);
  6298. BIND_ENUM_CONSTANT(COMPARE_OP_GREATER);
  6299. BIND_ENUM_CONSTANT(COMPARE_OP_NOT_EQUAL);
  6300. BIND_ENUM_CONSTANT(COMPARE_OP_GREATER_OR_EQUAL);
  6301. BIND_ENUM_CONSTANT(COMPARE_OP_ALWAYS);
  6302. BIND_ENUM_CONSTANT(COMPARE_OP_MAX);
  6303. BIND_ENUM_CONSTANT(LOGIC_OP_CLEAR);
  6304. BIND_ENUM_CONSTANT(LOGIC_OP_AND);
  6305. BIND_ENUM_CONSTANT(LOGIC_OP_AND_REVERSE);
  6306. BIND_ENUM_CONSTANT(LOGIC_OP_COPY);
  6307. BIND_ENUM_CONSTANT(LOGIC_OP_AND_INVERTED);
  6308. BIND_ENUM_CONSTANT(LOGIC_OP_NO_OP);
  6309. BIND_ENUM_CONSTANT(LOGIC_OP_XOR);
  6310. BIND_ENUM_CONSTANT(LOGIC_OP_OR);
  6311. BIND_ENUM_CONSTANT(LOGIC_OP_NOR);
  6312. BIND_ENUM_CONSTANT(LOGIC_OP_EQUIVALENT);
  6313. BIND_ENUM_CONSTANT(LOGIC_OP_INVERT);
  6314. BIND_ENUM_CONSTANT(LOGIC_OP_OR_REVERSE);
  6315. BIND_ENUM_CONSTANT(LOGIC_OP_COPY_INVERTED);
  6316. BIND_ENUM_CONSTANT(LOGIC_OP_OR_INVERTED);
  6317. BIND_ENUM_CONSTANT(LOGIC_OP_NAND);
  6318. BIND_ENUM_CONSTANT(LOGIC_OP_SET);
  6319. BIND_ENUM_CONSTANT(LOGIC_OP_MAX); //not an actual operator); just the amount of operators :D
  6320. BIND_ENUM_CONSTANT(BLEND_FACTOR_ZERO);
  6321. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE);
  6322. BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC_COLOR);
  6323. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC_COLOR);
  6324. BIND_ENUM_CONSTANT(BLEND_FACTOR_DST_COLOR);
  6325. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_DST_COLOR);
  6326. BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC_ALPHA);
  6327. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC_ALPHA);
  6328. BIND_ENUM_CONSTANT(BLEND_FACTOR_DST_ALPHA);
  6329. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_DST_ALPHA);
  6330. BIND_ENUM_CONSTANT(BLEND_FACTOR_CONSTANT_COLOR);
  6331. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR);
  6332. BIND_ENUM_CONSTANT(BLEND_FACTOR_CONSTANT_ALPHA);
  6333. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA);
  6334. BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC_ALPHA_SATURATE);
  6335. BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC1_COLOR);
  6336. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC1_COLOR);
  6337. BIND_ENUM_CONSTANT(BLEND_FACTOR_SRC1_ALPHA);
  6338. BIND_ENUM_CONSTANT(BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA);
  6339. BIND_ENUM_CONSTANT(BLEND_FACTOR_MAX);
  6340. BIND_ENUM_CONSTANT(BLEND_OP_ADD);
  6341. BIND_ENUM_CONSTANT(BLEND_OP_SUBTRACT);
  6342. BIND_ENUM_CONSTANT(BLEND_OP_REVERSE_SUBTRACT);
  6343. BIND_ENUM_CONSTANT(BLEND_OP_MINIMUM);
  6344. BIND_ENUM_CONSTANT(BLEND_OP_MAXIMUM);
  6345. BIND_ENUM_CONSTANT(BLEND_OP_MAX);
  6346. BIND_BITFIELD_FLAG(DYNAMIC_STATE_LINE_WIDTH);
  6347. BIND_BITFIELD_FLAG(DYNAMIC_STATE_DEPTH_BIAS);
  6348. BIND_BITFIELD_FLAG(DYNAMIC_STATE_BLEND_CONSTANTS);
  6349. BIND_BITFIELD_FLAG(DYNAMIC_STATE_DEPTH_BOUNDS);
  6350. BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_COMPARE_MASK);
  6351. BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_WRITE_MASK);
  6352. BIND_BITFIELD_FLAG(DYNAMIC_STATE_STENCIL_REFERENCE);
  6353. #ifndef DISABLE_DEPRECATED
  6354. BIND_ENUM_CONSTANT(INITIAL_ACTION_LOAD);
  6355. BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR);
  6356. BIND_ENUM_CONSTANT(INITIAL_ACTION_DISCARD);
  6357. BIND_ENUM_CONSTANT(INITIAL_ACTION_MAX);
  6358. BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION);
  6359. BIND_ENUM_CONSTANT(INITIAL_ACTION_CLEAR_REGION_CONTINUE);
  6360. BIND_ENUM_CONSTANT(INITIAL_ACTION_KEEP);
  6361. BIND_ENUM_CONSTANT(INITIAL_ACTION_DROP);
  6362. BIND_ENUM_CONSTANT(INITIAL_ACTION_CONTINUE);
  6363. BIND_ENUM_CONSTANT(FINAL_ACTION_STORE);
  6364. BIND_ENUM_CONSTANT(FINAL_ACTION_DISCARD);
  6365. BIND_ENUM_CONSTANT(FINAL_ACTION_MAX);
  6366. BIND_ENUM_CONSTANT(FINAL_ACTION_READ);
  6367. BIND_ENUM_CONSTANT(FINAL_ACTION_CONTINUE);
  6368. #endif
  6369. BIND_ENUM_CONSTANT(SHADER_STAGE_VERTEX);
  6370. BIND_ENUM_CONSTANT(SHADER_STAGE_FRAGMENT);
  6371. BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_CONTROL);
  6372. BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_EVALUATION);
  6373. BIND_ENUM_CONSTANT(SHADER_STAGE_COMPUTE);
  6374. BIND_ENUM_CONSTANT(SHADER_STAGE_MAX);
  6375. BIND_ENUM_CONSTANT(SHADER_STAGE_VERTEX_BIT);
  6376. BIND_ENUM_CONSTANT(SHADER_STAGE_FRAGMENT_BIT);
  6377. BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_CONTROL_BIT);
  6378. BIND_ENUM_CONSTANT(SHADER_STAGE_TESSELATION_EVALUATION_BIT);
  6379. BIND_ENUM_CONSTANT(SHADER_STAGE_COMPUTE_BIT);
  6380. BIND_ENUM_CONSTANT(SHADER_LANGUAGE_GLSL);
  6381. BIND_ENUM_CONSTANT(SHADER_LANGUAGE_HLSL);
  6382. BIND_ENUM_CONSTANT(PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL);
  6383. BIND_ENUM_CONSTANT(PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT);
  6384. BIND_ENUM_CONSTANT(PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT);
  6385. BIND_ENUM_CONSTANT(LIMIT_MAX_BOUND_UNIFORM_SETS);
  6386. BIND_ENUM_CONSTANT(LIMIT_MAX_FRAMEBUFFER_COLOR_ATTACHMENTS);
  6387. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURES_PER_UNIFORM_SET);
  6388. BIND_ENUM_CONSTANT(LIMIT_MAX_SAMPLERS_PER_UNIFORM_SET);
  6389. BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_BUFFERS_PER_UNIFORM_SET);
  6390. BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_IMAGES_PER_UNIFORM_SET);
  6391. BIND_ENUM_CONSTANT(LIMIT_MAX_UNIFORM_BUFFERS_PER_UNIFORM_SET);
  6392. BIND_ENUM_CONSTANT(LIMIT_MAX_DRAW_INDEXED_INDEX);
  6393. BIND_ENUM_CONSTANT(LIMIT_MAX_FRAMEBUFFER_HEIGHT);
  6394. BIND_ENUM_CONSTANT(LIMIT_MAX_FRAMEBUFFER_WIDTH);
  6395. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_ARRAY_LAYERS);
  6396. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_1D);
  6397. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_2D);
  6398. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_3D);
  6399. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURE_SIZE_CUBE);
  6400. BIND_ENUM_CONSTANT(LIMIT_MAX_TEXTURES_PER_SHADER_STAGE);
  6401. BIND_ENUM_CONSTANT(LIMIT_MAX_SAMPLERS_PER_SHADER_STAGE);
  6402. BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_BUFFERS_PER_SHADER_STAGE);
  6403. BIND_ENUM_CONSTANT(LIMIT_MAX_STORAGE_IMAGES_PER_SHADER_STAGE);
  6404. BIND_ENUM_CONSTANT(LIMIT_MAX_UNIFORM_BUFFERS_PER_SHADER_STAGE);
  6405. BIND_ENUM_CONSTANT(LIMIT_MAX_PUSH_CONSTANT_SIZE);
  6406. BIND_ENUM_CONSTANT(LIMIT_MAX_UNIFORM_BUFFER_SIZE);
  6407. BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_ATTRIBUTE_OFFSET);
  6408. BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_ATTRIBUTES);
  6409. BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_BINDINGS);
  6410. BIND_ENUM_CONSTANT(LIMIT_MAX_VERTEX_INPUT_BINDING_STRIDE);
  6411. BIND_ENUM_CONSTANT(LIMIT_MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT);
  6412. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_SHARED_MEMORY_SIZE);
  6413. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X);
  6414. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y);
  6415. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z);
  6416. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_INVOCATIONS);
  6417. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_X);
  6418. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Y);
  6419. BIND_ENUM_CONSTANT(LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Z);
  6420. BIND_ENUM_CONSTANT(LIMIT_MAX_VIEWPORT_DIMENSIONS_X);
  6421. BIND_ENUM_CONSTANT(LIMIT_MAX_VIEWPORT_DIMENSIONS_Y);
  6422. BIND_ENUM_CONSTANT(MEMORY_TEXTURES);
  6423. BIND_ENUM_CONSTANT(MEMORY_BUFFERS);
  6424. BIND_ENUM_CONSTANT(MEMORY_TOTAL);
  6425. BIND_CONSTANT(INVALID_ID);
  6426. BIND_CONSTANT(INVALID_FORMAT_ID);
  6427. BIND_ENUM_CONSTANT(NONE);
  6428. BIND_ENUM_CONSTANT(REFLECTION_PROBES);
  6429. BIND_ENUM_CONSTANT(SKY_PASS);
  6430. BIND_ENUM_CONSTANT(LIGHTMAPPER_PASS);
  6431. BIND_ENUM_CONSTANT(SHADOW_PASS_DIRECTIONAL);
  6432. BIND_ENUM_CONSTANT(SHADOW_PASS_CUBE);
  6433. BIND_ENUM_CONSTANT(OPAQUE_PASS);
  6434. BIND_ENUM_CONSTANT(ALPHA_PASS);
  6435. BIND_ENUM_CONSTANT(TRANSPARENT_PASS);
  6436. BIND_ENUM_CONSTANT(POST_PROCESSING_PASS);
  6437. BIND_ENUM_CONSTANT(BLIT_PASS);
  6438. BIND_ENUM_CONSTANT(UI_PASS);
  6439. BIND_ENUM_CONSTANT(DEBUG_PASS);
  6440. BIND_BITFIELD_FLAG(DRAW_DEFAULT_ALL);
  6441. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_0);
  6442. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_1);
  6443. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_2);
  6444. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_3);
  6445. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_4);
  6446. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_5);
  6447. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_6);
  6448. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_7);
  6449. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_MASK);
  6450. BIND_BITFIELD_FLAG(DRAW_CLEAR_COLOR_ALL);
  6451. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_0);
  6452. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_1);
  6453. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_2);
  6454. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_3);
  6455. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_4);
  6456. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_5);
  6457. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_6);
  6458. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_7);
  6459. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_MASK);
  6460. BIND_BITFIELD_FLAG(DRAW_IGNORE_COLOR_ALL);
  6461. BIND_BITFIELD_FLAG(DRAW_CLEAR_DEPTH);
  6462. BIND_BITFIELD_FLAG(DRAW_IGNORE_DEPTH);
  6463. BIND_BITFIELD_FLAG(DRAW_CLEAR_STENCIL);
  6464. BIND_BITFIELD_FLAG(DRAW_IGNORE_STENCIL);
  6465. BIND_BITFIELD_FLAG(DRAW_CLEAR_ALL);
  6466. BIND_BITFIELD_FLAG(DRAW_IGNORE_ALL);
  6467. }
  6468. void RenderingDevice::make_current() {
  6469. render_thread_id = Thread::get_caller_id();
  6470. }
  6471. RenderingDevice::~RenderingDevice() {
  6472. finalize();
  6473. if (singleton == this) {
  6474. singleton = nullptr;
  6475. }
  6476. }
  6477. RenderingDevice::RenderingDevice() {
  6478. if (singleton == nullptr) {
  6479. singleton = this;
  6480. }
  6481. render_thread_id = Thread::get_caller_id();
  6482. }
  6483. /*****************/
  6484. /**** BINDERS ****/
  6485. /*****************/
  6486. RID RenderingDevice::_texture_create(const Ref<RDTextureFormat> &p_format, const Ref<RDTextureView> &p_view, const TypedArray<PackedByteArray> &p_data) {
  6487. ERR_FAIL_COND_V(p_format.is_null(), RID());
  6488. ERR_FAIL_COND_V(p_view.is_null(), RID());
  6489. Vector<Vector<uint8_t>> data;
  6490. for (int i = 0; i < p_data.size(); i++) {
  6491. Vector<uint8_t> byte_slice = p_data[i];
  6492. ERR_FAIL_COND_V(byte_slice.is_empty(), RID());
  6493. data.push_back(byte_slice);
  6494. }
  6495. return texture_create(p_format->base, p_view->base, data);
  6496. }
  6497. RID RenderingDevice::_texture_create_shared(const Ref<RDTextureView> &p_view, RID p_with_texture) {
  6498. ERR_FAIL_COND_V(p_view.is_null(), RID());
  6499. return texture_create_shared(p_view->base, p_with_texture);
  6500. }
  6501. RID RenderingDevice::_texture_create_shared_from_slice(const Ref<RDTextureView> &p_view, RID p_with_texture, uint32_t p_layer, uint32_t p_mipmap, uint32_t p_mipmaps, TextureSliceType p_slice_type) {
  6502. ERR_FAIL_COND_V(p_view.is_null(), RID());
  6503. return texture_create_shared_from_slice(p_view->base, p_with_texture, p_layer, p_mipmap, p_mipmaps, p_slice_type);
  6504. }
  6505. Ref<RDTextureFormat> RenderingDevice::_texture_get_format(RID p_rd_texture) {
  6506. Ref<RDTextureFormat> rtf;
  6507. rtf.instantiate();
  6508. rtf->base = texture_get_format(p_rd_texture);
  6509. return rtf;
  6510. }
  6511. RenderingDevice::FramebufferFormatID RenderingDevice::_framebuffer_format_create(const TypedArray<RDAttachmentFormat> &p_attachments, uint32_t p_view_count) {
  6512. Vector<AttachmentFormat> attachments;
  6513. attachments.resize(p_attachments.size());
  6514. for (int i = 0; i < p_attachments.size(); i++) {
  6515. Ref<RDAttachmentFormat> af = p_attachments[i];
  6516. ERR_FAIL_COND_V(af.is_null(), INVALID_FORMAT_ID);
  6517. attachments.write[i] = af->base;
  6518. }
  6519. return framebuffer_format_create(attachments, p_view_count);
  6520. }
  6521. RenderingDevice::FramebufferFormatID RenderingDevice::_framebuffer_format_create_multipass(const TypedArray<RDAttachmentFormat> &p_attachments, const TypedArray<RDFramebufferPass> &p_passes, uint32_t p_view_count) {
  6522. Vector<AttachmentFormat> attachments;
  6523. attachments.resize(p_attachments.size());
  6524. for (int i = 0; i < p_attachments.size(); i++) {
  6525. Ref<RDAttachmentFormat> af = p_attachments[i];
  6526. ERR_FAIL_COND_V(af.is_null(), INVALID_FORMAT_ID);
  6527. attachments.write[i] = af->base;
  6528. }
  6529. Vector<FramebufferPass> passes;
  6530. for (int i = 0; i < p_passes.size(); i++) {
  6531. Ref<RDFramebufferPass> pass = p_passes[i];
  6532. ERR_CONTINUE(pass.is_null());
  6533. passes.push_back(pass->base);
  6534. }
  6535. return framebuffer_format_create_multipass(attachments, passes, p_view_count);
  6536. }
  6537. RID RenderingDevice::_framebuffer_create(const TypedArray<RID> &p_textures, FramebufferFormatID p_format_check, uint32_t p_view_count) {
  6538. Vector<RID> textures = Variant(p_textures);
  6539. return framebuffer_create(textures, p_format_check, p_view_count);
  6540. }
  6541. RID RenderingDevice::_framebuffer_create_multipass(const TypedArray<RID> &p_textures, const TypedArray<RDFramebufferPass> &p_passes, FramebufferFormatID p_format_check, uint32_t p_view_count) {
  6542. Vector<RID> textures = Variant(p_textures);
  6543. Vector<FramebufferPass> passes;
  6544. for (int i = 0; i < p_passes.size(); i++) {
  6545. Ref<RDFramebufferPass> pass = p_passes[i];
  6546. ERR_CONTINUE(pass.is_null());
  6547. passes.push_back(pass->base);
  6548. }
  6549. return framebuffer_create_multipass(textures, passes, p_format_check, p_view_count);
  6550. }
  6551. RID RenderingDevice::_sampler_create(const Ref<RDSamplerState> &p_state) {
  6552. ERR_FAIL_COND_V(p_state.is_null(), RID());
  6553. return sampler_create(p_state->base);
  6554. }
  6555. RenderingDevice::VertexFormatID RenderingDevice::_vertex_format_create(const TypedArray<RDVertexAttribute> &p_vertex_formats) {
  6556. Vector<VertexAttribute> descriptions;
  6557. descriptions.resize(p_vertex_formats.size());
  6558. for (int i = 0; i < p_vertex_formats.size(); i++) {
  6559. Ref<RDVertexAttribute> af = p_vertex_formats[i];
  6560. ERR_FAIL_COND_V(af.is_null(), INVALID_FORMAT_ID);
  6561. descriptions.write[i] = af->base;
  6562. }
  6563. return vertex_format_create(descriptions);
  6564. }
  6565. RID RenderingDevice::_vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const TypedArray<RID> &p_src_buffers, const Vector<int64_t> &p_offsets) {
  6566. Vector<RID> buffers = Variant(p_src_buffers);
  6567. Vector<uint64_t> offsets;
  6568. offsets.resize(p_offsets.size());
  6569. for (int i = 0; i < p_offsets.size(); i++) {
  6570. offsets.write[i] = p_offsets[i];
  6571. }
  6572. return vertex_array_create(p_vertex_count, p_vertex_format, buffers, offsets);
  6573. }
  6574. Ref<RDShaderSPIRV> RenderingDevice::_shader_compile_spirv_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache) {
  6575. ERR_FAIL_COND_V(p_source.is_null(), Ref<RDShaderSPIRV>());
  6576. Ref<RDShaderSPIRV> bytecode;
  6577. bytecode.instantiate();
  6578. for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
  6579. String error;
  6580. ShaderStage stage = ShaderStage(i);
  6581. String source = p_source->get_stage_source(stage);
  6582. if (!source.is_empty()) {
  6583. Vector<uint8_t> spirv = shader_compile_spirv_from_source(stage, source, p_source->get_language(), &error, p_allow_cache);
  6584. bytecode->set_stage_bytecode(stage, spirv);
  6585. bytecode->set_stage_compile_error(stage, error);
  6586. }
  6587. }
  6588. return bytecode;
  6589. }
  6590. Vector<uint8_t> RenderingDevice::_shader_compile_binary_from_spirv(const Ref<RDShaderSPIRV> &p_spirv, const String &p_shader_name) {
  6591. ERR_FAIL_COND_V(p_spirv.is_null(), Vector<uint8_t>());
  6592. Vector<ShaderStageSPIRVData> stage_data;
  6593. for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
  6594. ShaderStage stage = ShaderStage(i);
  6595. ShaderStageSPIRVData sd;
  6596. sd.shader_stage = stage;
  6597. String error = p_spirv->get_stage_compile_error(stage);
  6598. ERR_FAIL_COND_V_MSG(!error.is_empty(), Vector<uint8_t>(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
  6599. sd.spirv = p_spirv->get_stage_bytecode(stage);
  6600. if (sd.spirv.is_empty()) {
  6601. continue;
  6602. }
  6603. stage_data.push_back(sd);
  6604. }
  6605. return shader_compile_binary_from_spirv(stage_data, p_shader_name);
  6606. }
  6607. RID RenderingDevice::_shader_create_from_spirv(const Ref<RDShaderSPIRV> &p_spirv, const String &p_shader_name) {
  6608. ERR_FAIL_COND_V(p_spirv.is_null(), RID());
  6609. Vector<ShaderStageSPIRVData> stage_data;
  6610. for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
  6611. ShaderStage stage = ShaderStage(i);
  6612. ShaderStageSPIRVData sd;
  6613. sd.shader_stage = stage;
  6614. String error = p_spirv->get_stage_compile_error(stage);
  6615. ERR_FAIL_COND_V_MSG(!error.is_empty(), RID(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
  6616. sd.spirv = p_spirv->get_stage_bytecode(stage);
  6617. if (sd.spirv.is_empty()) {
  6618. continue;
  6619. }
  6620. stage_data.push_back(sd);
  6621. }
  6622. return shader_create_from_spirv(stage_data);
  6623. }
  6624. RID RenderingDevice::_uniform_set_create(const TypedArray<RDUniform> &p_uniforms, RID p_shader, uint32_t p_shader_set) {
  6625. Vector<Uniform> uniforms;
  6626. uniforms.resize(p_uniforms.size());
  6627. for (int i = 0; i < p_uniforms.size(); i++) {
  6628. Ref<RDUniform> uniform = p_uniforms[i];
  6629. ERR_FAIL_COND_V(!uniform.is_valid(), RID());
  6630. uniforms.write[i] = uniform->base;
  6631. }
  6632. return uniform_set_create(uniforms, p_shader, p_shader_set);
  6633. }
  6634. Error RenderingDevice::_buffer_update_bind(RID p_buffer, uint32_t p_offset, uint32_t p_size, const Vector<uint8_t> &p_data) {
  6635. return buffer_update(p_buffer, p_offset, p_size, p_data.ptr());
  6636. }
  6637. static Vector<RenderingDevice::PipelineSpecializationConstant> _get_spec_constants(const TypedArray<RDPipelineSpecializationConstant> &p_constants) {
  6638. Vector<RenderingDevice::PipelineSpecializationConstant> ret;
  6639. ret.resize(p_constants.size());
  6640. for (int i = 0; i < p_constants.size(); i++) {
  6641. Ref<RDPipelineSpecializationConstant> c = p_constants[i];
  6642. ERR_CONTINUE(c.is_null());
  6643. RenderingDevice::PipelineSpecializationConstant &sc = ret.write[i];
  6644. Variant value = c->get_value();
  6645. switch (value.get_type()) {
  6646. case Variant::BOOL: {
  6647. sc.type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
  6648. sc.bool_value = value;
  6649. } break;
  6650. case Variant::INT: {
  6651. sc.type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
  6652. sc.int_value = value;
  6653. } break;
  6654. case Variant::FLOAT: {
  6655. sc.type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
  6656. sc.float_value = value;
  6657. } break;
  6658. default: {
  6659. }
  6660. }
  6661. sc.constant_id = c->get_constant_id();
  6662. }
  6663. return ret;
  6664. }
  6665. RID RenderingDevice::_render_pipeline_create(RID p_shader, FramebufferFormatID p_framebuffer_format, VertexFormatID p_vertex_format, RenderPrimitive p_render_primitive, const Ref<RDPipelineRasterizationState> &p_rasterization_state, const Ref<RDPipelineMultisampleState> &p_multisample_state, const Ref<RDPipelineDepthStencilState> &p_depth_stencil_state, const Ref<RDPipelineColorBlendState> &p_blend_state, BitField<PipelineDynamicStateFlags> p_dynamic_state_flags, uint32_t p_for_render_pass, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants) {
  6666. PipelineRasterizationState rasterization_state;
  6667. if (p_rasterization_state.is_valid()) {
  6668. rasterization_state = p_rasterization_state->base;
  6669. }
  6670. PipelineMultisampleState multisample_state;
  6671. if (p_multisample_state.is_valid()) {
  6672. multisample_state = p_multisample_state->base;
  6673. for (int i = 0; i < p_multisample_state->sample_masks.size(); i++) {
  6674. int64_t mask = p_multisample_state->sample_masks[i];
  6675. multisample_state.sample_mask.push_back(mask);
  6676. }
  6677. }
  6678. PipelineDepthStencilState depth_stencil_state;
  6679. if (p_depth_stencil_state.is_valid()) {
  6680. depth_stencil_state = p_depth_stencil_state->base;
  6681. }
  6682. PipelineColorBlendState color_blend_state;
  6683. if (p_blend_state.is_valid()) {
  6684. color_blend_state = p_blend_state->base;
  6685. for (int i = 0; i < p_blend_state->attachments.size(); i++) {
  6686. Ref<RDPipelineColorBlendStateAttachment> attachment = p_blend_state->attachments[i];
  6687. if (attachment.is_valid()) {
  6688. color_blend_state.attachments.push_back(attachment->base);
  6689. }
  6690. }
  6691. }
  6692. return render_pipeline_create(p_shader, p_framebuffer_format, p_vertex_format, p_render_primitive, rasterization_state, multisample_state, depth_stencil_state, color_blend_state, p_dynamic_state_flags, p_for_render_pass, _get_spec_constants(p_specialization_constants));
  6693. }
  6694. RID RenderingDevice::_compute_pipeline_create(RID p_shader, const TypedArray<RDPipelineSpecializationConstant> &p_specialization_constants = TypedArray<RDPipelineSpecializationConstant>()) {
  6695. return compute_pipeline_create(p_shader, _get_spec_constants(p_specialization_constants));
  6696. }
  6697. #ifndef DISABLE_DEPRECATED
  6698. Vector<int64_t> RenderingDevice::_draw_list_begin_split(RID p_framebuffer, uint32_t p_splits, InitialAction p_initial_color_action, FinalAction p_final_color_action, InitialAction p_initial_depth_action, FinalAction p_final_depth_action, const Vector<Color> &p_clear_color_values, float p_clear_depth, uint32_t p_clear_stencil, const Rect2 &p_region, const TypedArray<RID> &p_storage_textures) {
  6699. ERR_FAIL_V_MSG(Vector<int64_t>(), "Deprecated. Split draw lists are used automatically by RenderingDevice.");
  6700. }
  6701. Vector<int64_t> RenderingDevice::_draw_list_switch_to_next_pass_split(uint32_t p_splits) {
  6702. ERR_FAIL_V_MSG(Vector<int64_t>(), "Deprecated. Split draw lists are used automatically by RenderingDevice.");
  6703. }
  6704. #endif
  6705. void RenderingDevice::_draw_list_set_push_constant(DrawListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size) {
  6706. ERR_FAIL_COND(p_data_size > (uint32_t)p_data.size());
  6707. draw_list_set_push_constant(p_list, p_data.ptr(), p_data_size);
  6708. }
  6709. void RenderingDevice::_compute_list_set_push_constant(ComputeListID p_list, const Vector<uint8_t> &p_data, uint32_t p_data_size) {
  6710. ERR_FAIL_COND(p_data_size > (uint32_t)p_data.size());
  6711. compute_list_set_push_constant(p_list, p_data.ptr(), p_data_size);
  6712. }