rendering_device_graph.cpp 131 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393
  1. /**************************************************************************/
  2. /* rendering_device_graph.cpp */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. #include "rendering_device_graph.h"
  31. #define PRINT_RENDER_GRAPH 0
  32. #define FORCE_FULL_ACCESS_BITS 0
  33. #define PRINT_RESOURCE_TRACKER_TOTAL 0
  34. #define PRINT_COMMAND_RECORDING 0
  35. RenderingDeviceGraph::RenderingDeviceGraph() {
  36. driver_honors_barriers = false;
  37. driver_clears_with_copy_engine = false;
  38. }
  39. RenderingDeviceGraph::~RenderingDeviceGraph() {
  40. }
  41. bool RenderingDeviceGraph::_is_write_usage(ResourceUsage p_usage) {
  42. switch (p_usage) {
  43. case RESOURCE_USAGE_COPY_FROM:
  44. case RESOURCE_USAGE_RESOLVE_FROM:
  45. case RESOURCE_USAGE_UNIFORM_BUFFER_READ:
  46. case RESOURCE_USAGE_INDIRECT_BUFFER_READ:
  47. case RESOURCE_USAGE_TEXTURE_BUFFER_READ:
  48. case RESOURCE_USAGE_STORAGE_BUFFER_READ:
  49. case RESOURCE_USAGE_VERTEX_BUFFER_READ:
  50. case RESOURCE_USAGE_INDEX_BUFFER_READ:
  51. case RESOURCE_USAGE_TEXTURE_SAMPLE:
  52. case RESOURCE_USAGE_STORAGE_IMAGE_READ:
  53. return false;
  54. case RESOURCE_USAGE_COPY_TO:
  55. case RESOURCE_USAGE_RESOLVE_TO:
  56. case RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE:
  57. case RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE:
  58. case RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE:
  59. case RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE:
  60. case RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE:
  61. return true;
  62. default:
  63. DEV_ASSERT(false && "Invalid resource tracker usage.");
  64. return false;
  65. }
  66. }
  67. RDD::TextureLayout RenderingDeviceGraph::_usage_to_image_layout(ResourceUsage p_usage) {
  68. switch (p_usage) {
  69. case RESOURCE_USAGE_COPY_FROM:
  70. return RDD::TEXTURE_LAYOUT_COPY_SRC_OPTIMAL;
  71. case RESOURCE_USAGE_COPY_TO:
  72. return RDD::TEXTURE_LAYOUT_COPY_DST_OPTIMAL;
  73. case RESOURCE_USAGE_RESOLVE_FROM:
  74. return RDD::TEXTURE_LAYOUT_RESOLVE_SRC_OPTIMAL;
  75. case RESOURCE_USAGE_RESOLVE_TO:
  76. return RDD::TEXTURE_LAYOUT_RESOLVE_DST_OPTIMAL;
  77. case RESOURCE_USAGE_TEXTURE_SAMPLE:
  78. return RDD::TEXTURE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
  79. case RESOURCE_USAGE_STORAGE_IMAGE_READ:
  80. case RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE:
  81. return RDD::TEXTURE_LAYOUT_STORAGE_OPTIMAL;
  82. case RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE:
  83. return RDD::TEXTURE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
  84. case RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE:
  85. return RDD::TEXTURE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
  86. case RESOURCE_USAGE_NONE:
  87. return RDD::TEXTURE_LAYOUT_UNDEFINED;
  88. default:
  89. DEV_ASSERT(false && "Invalid resource tracker usage or not an image usage.");
  90. return RDD::TEXTURE_LAYOUT_UNDEFINED;
  91. }
  92. }
  93. RDD::BarrierAccessBits RenderingDeviceGraph::_usage_to_access_bits(ResourceUsage p_usage) {
  94. #if FORCE_FULL_ACCESS_BITS
  95. return RDD::BarrierAccessBits(RDD::BARRIER_ACCESS_MEMORY_READ_BIT | RDD::BARRIER_ACCESS_MEMORY_WRITE_BIT);
  96. #else
  97. switch (p_usage) {
  98. case RESOURCE_USAGE_NONE:
  99. return RDD::BarrierAccessBits(0);
  100. case RESOURCE_USAGE_COPY_FROM:
  101. return RDD::BARRIER_ACCESS_COPY_READ_BIT;
  102. case RESOURCE_USAGE_COPY_TO:
  103. return RDD::BARRIER_ACCESS_COPY_WRITE_BIT;
  104. case RESOURCE_USAGE_RESOLVE_FROM:
  105. return RDD::BARRIER_ACCESS_RESOLVE_READ_BIT;
  106. case RESOURCE_USAGE_RESOLVE_TO:
  107. return RDD::BARRIER_ACCESS_RESOLVE_WRITE_BIT;
  108. case RESOURCE_USAGE_UNIFORM_BUFFER_READ:
  109. return RDD::BARRIER_ACCESS_UNIFORM_READ_BIT;
  110. case RESOURCE_USAGE_INDIRECT_BUFFER_READ:
  111. return RDD::BARRIER_ACCESS_INDIRECT_COMMAND_READ_BIT;
  112. case RESOURCE_USAGE_STORAGE_BUFFER_READ:
  113. case RESOURCE_USAGE_STORAGE_IMAGE_READ:
  114. case RESOURCE_USAGE_TEXTURE_BUFFER_READ:
  115. case RESOURCE_USAGE_TEXTURE_SAMPLE:
  116. return RDD::BARRIER_ACCESS_SHADER_READ_BIT;
  117. case RESOURCE_USAGE_TEXTURE_BUFFER_READ_WRITE:
  118. case RESOURCE_USAGE_STORAGE_BUFFER_READ_WRITE:
  119. case RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE:
  120. return RDD::BarrierAccessBits(RDD::BARRIER_ACCESS_SHADER_READ_BIT | RDD::BARRIER_ACCESS_SHADER_WRITE_BIT);
  121. case RESOURCE_USAGE_VERTEX_BUFFER_READ:
  122. return RDD::BARRIER_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
  123. case RESOURCE_USAGE_INDEX_BUFFER_READ:
  124. return RDD::BARRIER_ACCESS_INDEX_READ_BIT;
  125. case RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE:
  126. return RDD::BarrierAccessBits(RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_READ_BIT | RDD::BARRIER_ACCESS_COLOR_ATTACHMENT_WRITE_BIT);
  127. case RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE:
  128. return RDD::BarrierAccessBits(RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | RDD::BARRIER_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT);
  129. default:
  130. DEV_ASSERT(false && "Invalid usage.");
  131. return RDD::BarrierAccessBits(0);
  132. }
  133. #endif
  134. }
  135. bool RenderingDeviceGraph::_check_command_intersection(ResourceTracker *p_resource_tracker, int32_t p_previous_command_index, int32_t p_command_index) const {
  136. if (p_resource_tracker->usage != RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE && p_resource_tracker->usage != RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE) {
  137. // We don't check possible intersections for usages that aren't consecutive color or depth writes.
  138. return true;
  139. }
  140. const uint32_t previous_command_data_offset = command_data_offsets[p_previous_command_index];
  141. const uint32_t current_command_data_offset = command_data_offsets[p_command_index];
  142. const RecordedDrawListCommand &previous_draw_list_command = *reinterpret_cast<const RecordedDrawListCommand *>(&command_data[previous_command_data_offset]);
  143. const RecordedDrawListCommand &current_draw_list_command = *reinterpret_cast<const RecordedDrawListCommand *>(&command_data[current_command_data_offset]);
  144. if (previous_draw_list_command.type != RecordedCommand::TYPE_DRAW_LIST || current_draw_list_command.type != RecordedCommand::TYPE_DRAW_LIST) {
  145. // We don't check possible intersections if both commands aren't draw lists.
  146. return true;
  147. }
  148. // We check if the region used by both draw lists have an intersection.
  149. return previous_draw_list_command.region.intersects(current_draw_list_command.region);
  150. }
  151. bool RenderingDeviceGraph::_check_command_partial_coverage(ResourceTracker *p_resource_tracker, int32_t p_command_index) const {
  152. if (p_resource_tracker->usage != RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE && p_resource_tracker->usage != RESOURCE_USAGE_ATTACHMENT_DEPTH_STENCIL_READ_WRITE) {
  153. // We don't check for partial coverage in usages that aren't attachment writes.
  154. return false;
  155. }
  156. const uint32_t command_data_offset = command_data_offsets[p_command_index];
  157. const RecordedDrawListCommand &draw_list_command = *reinterpret_cast<const RecordedDrawListCommand *>(&command_data[command_data_offset]);
  158. if (draw_list_command.type != RecordedCommand::TYPE_DRAW_LIST) {
  159. // We don't check for partial coverage on commands that aren't draw lists.
  160. return false;
  161. }
  162. Rect2i texture_region(Point2i(0, 0), p_resource_tracker->texture_size);
  163. return !draw_list_command.region.encloses(texture_region);
  164. }
  165. int32_t RenderingDeviceGraph::_add_to_command_list(int32_t p_command_index, int32_t p_list_index) {
  166. DEV_ASSERT(p_command_index < int32_t(command_count));
  167. DEV_ASSERT(p_list_index < int32_t(command_list_nodes.size()));
  168. int32_t next_index = int32_t(command_list_nodes.size());
  169. command_list_nodes.resize(next_index + 1);
  170. RecordedCommandListNode &new_node = command_list_nodes[next_index];
  171. new_node.command_index = p_command_index;
  172. new_node.next_list_index = p_list_index;
  173. return next_index;
  174. }
  175. void RenderingDeviceGraph::_add_adjacent_command(int32_t p_previous_command_index, int32_t p_command_index, RecordedCommand *r_command) {
  176. const uint32_t previous_command_data_offset = command_data_offsets[p_previous_command_index];
  177. RecordedCommand &previous_command = *reinterpret_cast<RecordedCommand *>(&command_data[previous_command_data_offset]);
  178. previous_command.adjacent_command_list_index = _add_to_command_list(p_command_index, previous_command.adjacent_command_list_index);
  179. previous_command.next_stages = previous_command.next_stages | r_command->self_stages;
  180. r_command->previous_stages = r_command->previous_stages | previous_command.self_stages;
  181. }
  182. int32_t RenderingDeviceGraph::_add_to_slice_read_list(int32_t p_command_index, Rect2i p_subresources, int32_t p_list_index) {
  183. DEV_ASSERT(p_command_index < int32_t(command_count));
  184. DEV_ASSERT(p_list_index < int32_t(read_slice_list_nodes.size()));
  185. int32_t next_index = int32_t(read_slice_list_nodes.size());
  186. read_slice_list_nodes.resize(next_index + 1);
  187. RecordedSliceListNode &new_node = read_slice_list_nodes[next_index];
  188. new_node.command_index = p_command_index;
  189. new_node.next_list_index = p_list_index;
  190. new_node.subresources = p_subresources;
  191. return next_index;
  192. }
  193. int32_t RenderingDeviceGraph::_add_to_write_list(int32_t p_command_index, Rect2i p_subresources, int32_t p_list_index, bool p_partial_coverage) {
  194. DEV_ASSERT(p_command_index < int32_t(command_count));
  195. DEV_ASSERT(p_list_index < int32_t(write_slice_list_nodes.size()));
  196. int32_t next_index = int32_t(write_slice_list_nodes.size());
  197. write_slice_list_nodes.resize(next_index + 1);
  198. RecordedSliceListNode &new_node = write_slice_list_nodes[next_index];
  199. new_node.command_index = p_command_index;
  200. new_node.next_list_index = p_list_index;
  201. new_node.subresources = p_subresources;
  202. new_node.partial_coverage = p_partial_coverage;
  203. return next_index;
  204. }
  205. RenderingDeviceGraph::RecordedCommand *RenderingDeviceGraph::_allocate_command(uint32_t p_command_size, int32_t &r_command_index) {
  206. uint32_t command_data_offset = command_data.size();
  207. command_data_offsets.push_back(command_data_offset);
  208. command_data.resize(command_data_offset + p_command_size);
  209. r_command_index = command_count++;
  210. RecordedCommand *new_command = reinterpret_cast<RecordedCommand *>(&command_data[command_data_offset]);
  211. *new_command = RecordedCommand();
  212. return new_command;
  213. }
  214. RenderingDeviceGraph::DrawListInstruction *RenderingDeviceGraph::_allocate_draw_list_instruction(uint32_t p_instruction_size) {
  215. uint32_t draw_list_data_offset = draw_instruction_list.data.size();
  216. draw_instruction_list.data.resize(draw_list_data_offset + p_instruction_size);
  217. return reinterpret_cast<DrawListInstruction *>(&draw_instruction_list.data[draw_list_data_offset]);
  218. }
  219. RenderingDeviceGraph::ComputeListInstruction *RenderingDeviceGraph::_allocate_compute_list_instruction(uint32_t p_instruction_size) {
  220. uint32_t compute_list_data_offset = compute_instruction_list.data.size();
  221. compute_instruction_list.data.resize(compute_list_data_offset + p_instruction_size);
  222. return reinterpret_cast<ComputeListInstruction *>(&compute_instruction_list.data[compute_list_data_offset]);
  223. }
  224. void RenderingDeviceGraph::_check_discardable_attachment_dependency(ResourceTracker *p_resource_tracker, int32_t p_previous_command_index, int32_t p_command_index) {
  225. if (!p_resource_tracker->is_discardable) {
  226. return;
  227. }
  228. // Check if the command is a a draw list that clears the attachment completely. If it is, we don't need to modify the previous draw list.
  229. uint32_t command_offset = command_data_offsets[p_command_index];
  230. RecordedDrawListCommand *draw_list_command = reinterpret_cast<RecordedDrawListCommand *>(&command_data[command_offset]);
  231. if (draw_list_command->type == RecordedCommand::TYPE_DRAW_LIST) {
  232. ResourceTracker **trackers = draw_list_command->trackers();
  233. for (uint32_t i = 0; i < draw_list_command->trackers_count; i++) {
  234. if (trackers[i] == p_resource_tracker && draw_list_command->load_ops()[i] == RDD::ATTACHMENT_LOAD_OP_CLEAR) {
  235. return;
  236. }
  237. }
  238. }
  239. // Check if the previous command is a draw list.
  240. uint32_t previous_command_offset = command_data_offsets[p_previous_command_index];
  241. RecordedDrawListCommand *previous_draw_list_command = reinterpret_cast<RecordedDrawListCommand *>(&command_data[previous_command_offset]);
  242. if (previous_draw_list_command->type != RecordedCommand::TYPE_DRAW_LIST) {
  243. return;
  244. }
  245. // Search for the tracker inside the draw list command and modify the store operation accordingly.
  246. ResourceTracker **trackers = previous_draw_list_command->trackers();
  247. for (uint32_t i = 0; i < previous_draw_list_command->trackers_count; i++) {
  248. if (trackers[i] == p_resource_tracker) {
  249. previous_draw_list_command->store_ops()[i] = RDD::ATTACHMENT_STORE_OP_STORE;
  250. return;
  251. }
  252. }
  253. }
  254. void RenderingDeviceGraph::_add_command_to_graph(ResourceTracker **p_resource_trackers, ResourceUsage *p_resource_usages, uint32_t p_resource_count, int32_t p_command_index, RecordedCommand *r_command) {
  255. // Assign the next stages derived from the stages the command requires first.
  256. r_command->next_stages = r_command->self_stages;
  257. if (command_label_index >= 0) {
  258. // If a label is active, tag the command with the label.
  259. r_command->label_index = command_label_index;
  260. }
  261. if (r_command->type == RecordedCommand::TYPE_CAPTURE_TIMESTAMP) {
  262. // All previous commands starting from the previous timestamp should be adjacent to this command.
  263. int32_t start_command_index = uint32_t(MAX(command_timestamp_index, 0));
  264. for (int32_t i = start_command_index; i < p_command_index; i++) {
  265. _add_adjacent_command(i, p_command_index, r_command);
  266. }
  267. // Make this command the new active timestamp command.
  268. command_timestamp_index = p_command_index;
  269. } else if (command_timestamp_index >= 0) {
  270. // Timestamp command should be adjacent to this command.
  271. _add_adjacent_command(command_timestamp_index, p_command_index, r_command);
  272. }
  273. if (command_synchronization_pending) {
  274. // All previous commands should be adjacent to this command.
  275. int32_t start_command_index = uint32_t(MAX(command_synchronization_index, 0));
  276. for (int32_t i = start_command_index; i < p_command_index; i++) {
  277. _add_adjacent_command(i, p_command_index, r_command);
  278. }
  279. command_synchronization_index = p_command_index;
  280. command_synchronization_pending = false;
  281. } else if (command_synchronization_index >= 0) {
  282. // Synchronization command should be adjacent to this command.
  283. _add_adjacent_command(command_synchronization_index, p_command_index, r_command);
  284. }
  285. for (uint32_t i = 0; i < p_resource_count; i++) {
  286. ResourceTracker *resource_tracker = p_resource_trackers[i];
  287. DEV_ASSERT(resource_tracker != nullptr);
  288. resource_tracker->reset_if_outdated(tracking_frame);
  289. const RDD::TextureSubresourceRange &subresources = resource_tracker->texture_subresources;
  290. const Rect2i resource_tracker_rect(subresources.base_mipmap, subresources.base_layer, subresources.mipmap_count, subresources.layer_count);
  291. Rect2i search_tracker_rect = resource_tracker_rect;
  292. ResourceUsage new_resource_usage = p_resource_usages[i];
  293. bool write_usage = _is_write_usage(new_resource_usage);
  294. BitField<RDD::BarrierAccessBits> new_usage_access = _usage_to_access_bits(new_resource_usage);
  295. bool is_resource_a_slice = resource_tracker->parent != nullptr;
  296. if (is_resource_a_slice) {
  297. // This resource depends on a parent resource.
  298. resource_tracker->parent->reset_if_outdated(tracking_frame);
  299. if (resource_tracker->texture_slice_command_index != p_command_index) {
  300. // Indicate this slice has been used by this command.
  301. resource_tracker->texture_slice_command_index = p_command_index;
  302. }
  303. if (resource_tracker->parent->usage == RESOURCE_USAGE_NONE) {
  304. if (resource_tracker->parent->texture_driver_id.id != 0) {
  305. // If the resource is a texture, we transition it entirely to the layout determined by the first slice that uses it.
  306. _add_texture_barrier_to_command(resource_tracker->parent->texture_driver_id, RDD::BarrierAccessBits(0), new_usage_access, RDG::RESOURCE_USAGE_NONE, new_resource_usage, resource_tracker->parent->texture_subresources, command_normalization_barriers, r_command->normalization_barrier_index, r_command->normalization_barrier_count);
  307. }
  308. // If the parent hasn't been used yet, we assign the usage of the slice to the entire resource.
  309. resource_tracker->parent->usage = new_resource_usage;
  310. // Also assign the usage to the slice and consider it a write operation. Consider the parent's current usage access as its own.
  311. resource_tracker->usage = new_resource_usage;
  312. resource_tracker->usage_access = resource_tracker->parent->usage_access;
  313. write_usage = true;
  314. // Indicate the area that should be tracked is the entire resource.
  315. const RDD::TextureSubresourceRange &parent_subresources = resource_tracker->parent->texture_subresources;
  316. search_tracker_rect = Rect2i(parent_subresources.base_mipmap, parent_subresources.base_layer, parent_subresources.mipmap_count, parent_subresources.layer_count);
  317. } else if (resource_tracker->in_parent_dirty_list) {
  318. if (resource_tracker->parent->usage == new_resource_usage) {
  319. // The slice will be transitioned to the resource of the parent and can be deleted from the dirty list.
  320. ResourceTracker *previous_tracker = nullptr;
  321. ResourceTracker *current_tracker = resource_tracker->parent->dirty_shared_list;
  322. bool initialized_dirty_rect = false;
  323. while (current_tracker != nullptr) {
  324. current_tracker->reset_if_outdated(tracking_frame);
  325. if (current_tracker == resource_tracker) {
  326. current_tracker->in_parent_dirty_list = false;
  327. if (previous_tracker != nullptr) {
  328. previous_tracker->next_shared = current_tracker->next_shared;
  329. } else {
  330. resource_tracker->parent->dirty_shared_list = current_tracker->next_shared;
  331. }
  332. current_tracker = current_tracker->next_shared;
  333. } else {
  334. if (initialized_dirty_rect) {
  335. resource_tracker->parent->texture_slice_or_dirty_rect = resource_tracker->parent->texture_slice_or_dirty_rect.merge(current_tracker->texture_slice_or_dirty_rect);
  336. } else {
  337. resource_tracker->parent->texture_slice_or_dirty_rect = current_tracker->texture_slice_or_dirty_rect;
  338. initialized_dirty_rect = true;
  339. }
  340. previous_tracker = current_tracker;
  341. current_tracker = current_tracker->next_shared;
  342. }
  343. }
  344. }
  345. } else {
  346. if (resource_tracker->parent->dirty_shared_list != nullptr && resource_tracker->parent->texture_slice_or_dirty_rect.intersects(resource_tracker->texture_slice_or_dirty_rect)) {
  347. // There's an intersection with the current dirty area of the parent and the slice. We must verify if the intersection is against a slice
  348. // that was used in this command or not. Any slice we can find that wasn't used by this command must be reverted to the layout of the parent.
  349. ResourceTracker *previous_tracker = nullptr;
  350. ResourceTracker *current_tracker = resource_tracker->parent->dirty_shared_list;
  351. bool initialized_dirty_rect = false;
  352. while (current_tracker != nullptr) {
  353. current_tracker->reset_if_outdated(tracking_frame);
  354. if (current_tracker->texture_slice_or_dirty_rect.intersects(resource_tracker->texture_slice_or_dirty_rect)) {
  355. if (current_tracker->command_frame == tracking_frame && current_tracker->texture_slice_command_index == p_command_index) {
  356. ERR_FAIL_MSG("Texture slices that overlap can't be used in the same command.");
  357. } else {
  358. // Delete the slice from the dirty list and revert it to the usage of the parent.
  359. if (current_tracker->texture_driver_id.id != 0) {
  360. _add_texture_barrier_to_command(current_tracker->texture_driver_id, current_tracker->usage_access, new_usage_access, current_tracker->usage, resource_tracker->parent->usage, current_tracker->texture_subresources, command_normalization_barriers, r_command->normalization_barrier_index, r_command->normalization_barrier_count);
  361. // Merge the area of the slice with the current tracking area of the command and indicate it's a write usage as well.
  362. search_tracker_rect = search_tracker_rect.merge(current_tracker->texture_slice_or_dirty_rect);
  363. write_usage = true;
  364. }
  365. current_tracker->in_parent_dirty_list = false;
  366. if (previous_tracker != nullptr) {
  367. previous_tracker->next_shared = current_tracker->next_shared;
  368. } else {
  369. resource_tracker->parent->dirty_shared_list = current_tracker->next_shared;
  370. }
  371. current_tracker = current_tracker->next_shared;
  372. }
  373. } else {
  374. // Recalculate the dirty rect of the parent so the deleted slices are excluded.
  375. if (initialized_dirty_rect) {
  376. resource_tracker->parent->texture_slice_or_dirty_rect = resource_tracker->parent->texture_slice_or_dirty_rect.merge(current_tracker->texture_slice_or_dirty_rect);
  377. } else {
  378. resource_tracker->parent->texture_slice_or_dirty_rect = current_tracker->texture_slice_or_dirty_rect;
  379. initialized_dirty_rect = true;
  380. }
  381. previous_tracker = current_tracker;
  382. current_tracker = current_tracker->next_shared;
  383. }
  384. }
  385. }
  386. // If it wasn't in the list, assume the usage is the same as the parent. Consider the parent's current usage access as its own.
  387. resource_tracker->usage = resource_tracker->parent->usage;
  388. resource_tracker->usage_access = resource_tracker->parent->usage_access;
  389. if (resource_tracker->usage != new_resource_usage) {
  390. // Insert to the dirty list if the requested usage is different.
  391. resource_tracker->next_shared = resource_tracker->parent->dirty_shared_list;
  392. resource_tracker->parent->dirty_shared_list = resource_tracker;
  393. resource_tracker->in_parent_dirty_list = true;
  394. if (resource_tracker->parent->dirty_shared_list != nullptr) {
  395. resource_tracker->parent->texture_slice_or_dirty_rect = resource_tracker->parent->texture_slice_or_dirty_rect.merge(resource_tracker->texture_slice_or_dirty_rect);
  396. } else {
  397. resource_tracker->parent->texture_slice_or_dirty_rect = resource_tracker->texture_slice_or_dirty_rect;
  398. }
  399. }
  400. }
  401. } else {
  402. ResourceTracker *current_tracker = resource_tracker->dirty_shared_list;
  403. if (current_tracker != nullptr) {
  404. // Consider the usage as write if we must transition any of the slices.
  405. write_usage = true;
  406. }
  407. while (current_tracker != nullptr) {
  408. current_tracker->reset_if_outdated(tracking_frame);
  409. if (current_tracker->texture_driver_id.id != 0) {
  410. // Transition all slices to the layout of the parent resource.
  411. _add_texture_barrier_to_command(current_tracker->texture_driver_id, current_tracker->usage_access, new_usage_access, current_tracker->usage, resource_tracker->usage, current_tracker->texture_subresources, command_normalization_barriers, r_command->normalization_barrier_index, r_command->normalization_barrier_count);
  412. }
  413. current_tracker->in_parent_dirty_list = false;
  414. current_tracker = current_tracker->next_shared;
  415. }
  416. resource_tracker->dirty_shared_list = nullptr;
  417. }
  418. // Use the resource's parent tracker directly for all search operations.
  419. bool resource_has_parent = resource_tracker->parent != nullptr;
  420. ResourceTracker *search_tracker = resource_has_parent ? resource_tracker->parent : resource_tracker;
  421. bool different_usage = resource_tracker->usage != new_resource_usage;
  422. bool write_usage_after_write = (write_usage && search_tracker->write_command_or_list_index >= 0);
  423. if (different_usage || write_usage_after_write) {
  424. // A barrier must be pushed if the usage is different of it's a write usage and there was already a command that wrote to this resource previously.
  425. if (resource_tracker->texture_driver_id.id != 0) {
  426. if (resource_tracker->usage_access.is_empty()) {
  427. // FIXME: If the tracker does not know the previous type of usage, assume the generic memory write one.
  428. // Tracking access bits across texture slices can be tricky, so this failsafe can be removed once that's improved.
  429. resource_tracker->usage_access = RDD::BARRIER_ACCESS_MEMORY_WRITE_BIT;
  430. }
  431. _add_texture_barrier_to_command(resource_tracker->texture_driver_id, resource_tracker->usage_access, new_usage_access, resource_tracker->usage, new_resource_usage, resource_tracker->texture_subresources, command_transition_barriers, r_command->transition_barrier_index, r_command->transition_barrier_count);
  432. } else if (resource_tracker->buffer_driver_id.id != 0) {
  433. #if USE_BUFFER_BARRIERS
  434. _add_buffer_barrier_to_command(resource_tracker->buffer_driver_id, resource_tracker->usage_access, new_usage_access, r_command->buffer_barrier_index, r_command->buffer_barrier_count);
  435. #endif
  436. // Memory barriers are pushed regardless of buffer barriers being used or not.
  437. r_command->memory_barrier.src_access = r_command->memory_barrier.src_access | resource_tracker->usage_access;
  438. r_command->memory_barrier.dst_access = r_command->memory_barrier.dst_access | new_usage_access;
  439. } else {
  440. DEV_ASSERT(false && "Resource tracker does not contain a valid buffer or texture ID.");
  441. }
  442. }
  443. // Always update the access of the tracker according to the latest usage.
  444. resource_tracker->usage_access = new_usage_access;
  445. // Always accumulate the stages of the tracker with the commands that use it.
  446. search_tracker->current_frame_stages = search_tracker->current_frame_stages | r_command->self_stages;
  447. if (!search_tracker->previous_frame_stages.is_empty()) {
  448. // Add to the command the stages the tracker was used on in the previous frame.
  449. r_command->previous_stages = r_command->previous_stages | search_tracker->previous_frame_stages;
  450. search_tracker->previous_frame_stages.clear();
  451. }
  452. if (different_usage) {
  453. // Even if the usage of the resource isn't a write usage explicitly, a different usage implies a transition and it should therefore be considered a write.
  454. // In the case of buffers however, this is not exactly necessary if the driver does not consider different buffer usages as different states.
  455. write_usage = write_usage || bool(resource_tracker->texture_driver_id) || driver_buffers_require_transitions;
  456. resource_tracker->usage = new_resource_usage;
  457. }
  458. bool write_usage_has_partial_coverage = !different_usage && _check_command_partial_coverage(resource_tracker, p_command_index);
  459. if (search_tracker->write_command_or_list_index >= 0) {
  460. if (search_tracker->write_command_list_enabled) {
  461. // Make this command adjacent to any commands that wrote to this resource and intersect with the slice if it applies.
  462. // For buffers or textures that never use slices, this list will only be one element long at most.
  463. int32_t previous_write_list_index = -1;
  464. int32_t write_list_index = search_tracker->write_command_or_list_index;
  465. while (write_list_index >= 0) {
  466. const RecordedSliceListNode &write_list_node = write_slice_list_nodes[write_list_index];
  467. if (!resource_has_parent || search_tracker_rect.intersects(write_list_node.subresources)) {
  468. if (write_list_node.command_index == p_command_index) {
  469. ERR_FAIL_COND_MSG(!resource_has_parent, "Command can't have itself as a dependency.");
  470. } else if (!write_list_node.partial_coverage || _check_command_intersection(resource_tracker, write_list_node.command_index, p_command_index)) {
  471. _check_discardable_attachment_dependency(search_tracker, write_list_node.command_index, p_command_index);
  472. // Command is dependent on this command. Add this command to the adjacency list of the write command.
  473. _add_adjacent_command(write_list_node.command_index, p_command_index, r_command);
  474. if (resource_has_parent && write_usage && search_tracker_rect.encloses(write_list_node.subresources) && !write_usage_has_partial_coverage) {
  475. // Eliminate redundant writes from the list.
  476. if (previous_write_list_index >= 0) {
  477. RecordedSliceListNode &previous_list_node = write_slice_list_nodes[previous_write_list_index];
  478. previous_list_node.next_list_index = write_list_node.next_list_index;
  479. } else {
  480. search_tracker->write_command_or_list_index = write_list_node.next_list_index;
  481. }
  482. write_list_index = write_list_node.next_list_index;
  483. continue;
  484. }
  485. }
  486. }
  487. previous_write_list_index = write_list_index;
  488. write_list_index = write_list_node.next_list_index;
  489. }
  490. } else {
  491. // The index is just the latest command index that wrote to the resource.
  492. if (search_tracker->write_command_or_list_index == p_command_index) {
  493. ERR_FAIL_MSG("Command can't have itself as a dependency.");
  494. } else {
  495. _check_discardable_attachment_dependency(search_tracker, search_tracker->write_command_or_list_index, p_command_index);
  496. _add_adjacent_command(search_tracker->write_command_or_list_index, p_command_index, r_command);
  497. }
  498. }
  499. }
  500. if (write_usage) {
  501. bool use_write_list = resource_has_parent || write_usage_has_partial_coverage;
  502. if (use_write_list) {
  503. if (!search_tracker->write_command_list_enabled && search_tracker->write_command_or_list_index >= 0) {
  504. // Write command list was not being used but there was a write command recorded. Add a new node with the entire parent resource's subresources and the recorded command index to the list.
  505. const RDD::TextureSubresourceRange &tracker_subresources = search_tracker->texture_subresources;
  506. Rect2i tracker_rect(tracker_subresources.base_mipmap, tracker_subresources.base_layer, tracker_subresources.mipmap_count, tracker_subresources.layer_count);
  507. search_tracker->write_command_or_list_index = _add_to_write_list(search_tracker->write_command_or_list_index, tracker_rect, -1, false);
  508. }
  509. search_tracker->write_command_or_list_index = _add_to_write_list(p_command_index, search_tracker_rect, search_tracker->write_command_or_list_index, write_usage_has_partial_coverage);
  510. search_tracker->write_command_list_enabled = true;
  511. } else {
  512. search_tracker->write_command_or_list_index = p_command_index;
  513. search_tracker->write_command_list_enabled = false;
  514. }
  515. // We add this command to the adjacency list of all commands that were reading from the entire resource.
  516. int32_t read_full_command_list_index = search_tracker->read_full_command_list_index;
  517. while (read_full_command_list_index >= 0) {
  518. int32_t read_full_command_index = command_list_nodes[read_full_command_list_index].command_index;
  519. int32_t read_full_next_index = command_list_nodes[read_full_command_list_index].next_list_index;
  520. if (read_full_command_index == p_command_index) {
  521. if (!resource_has_parent) {
  522. // Only slices are allowed to be in different usages in the same command as they are guaranteed to have no overlap in the same command.
  523. ERR_FAIL_MSG("Command can't have itself as a dependency.");
  524. }
  525. } else {
  526. // Add this command to the adjacency list of each command that was reading this resource.
  527. _add_adjacent_command(read_full_command_index, p_command_index, r_command);
  528. }
  529. read_full_command_list_index = read_full_next_index;
  530. }
  531. if (!use_write_list) {
  532. // Clear the full list if this resource is not a slice.
  533. search_tracker->read_full_command_list_index = -1;
  534. }
  535. // We add this command to the adjacency list of all commands that were reading from resource slices.
  536. int32_t previous_slice_command_list_index = -1;
  537. int32_t read_slice_command_list_index = search_tracker->read_slice_command_list_index;
  538. while (read_slice_command_list_index >= 0) {
  539. const RecordedSliceListNode &read_list_node = read_slice_list_nodes[read_slice_command_list_index];
  540. if (!use_write_list || search_tracker_rect.encloses(read_list_node.subresources)) {
  541. if (previous_slice_command_list_index >= 0) {
  542. // Erase this element and connect the previous one to the next element.
  543. read_slice_list_nodes[previous_slice_command_list_index].next_list_index = read_list_node.next_list_index;
  544. } else {
  545. // Erase this element from the head of the list.
  546. DEV_ASSERT(search_tracker->read_slice_command_list_index == read_slice_command_list_index);
  547. search_tracker->read_slice_command_list_index = read_list_node.next_list_index;
  548. }
  549. // Advance to the next element.
  550. read_slice_command_list_index = read_list_node.next_list_index;
  551. } else {
  552. previous_slice_command_list_index = read_slice_command_list_index;
  553. read_slice_command_list_index = read_list_node.next_list_index;
  554. }
  555. if (!resource_has_parent || search_tracker_rect.intersects(read_list_node.subresources)) {
  556. // Add this command to the adjacency list of each command that was reading this resource.
  557. // We only add the dependency if there's an intersection between slices or this resource isn't a slice.
  558. _add_adjacent_command(read_list_node.command_index, p_command_index, r_command);
  559. }
  560. }
  561. } else if (resource_has_parent) {
  562. // We add a read dependency to the tracker to indicate this command reads from the resource slice.
  563. search_tracker->read_slice_command_list_index = _add_to_slice_read_list(p_command_index, resource_tracker_rect, search_tracker->read_slice_command_list_index);
  564. } else {
  565. // We add a read dependency to the tracker to indicate this command reads from the entire resource.
  566. search_tracker->read_full_command_list_index = _add_to_command_list(p_command_index, search_tracker->read_full_command_list_index);
  567. }
  568. }
  569. }
  570. void RenderingDeviceGraph::_add_texture_barrier_to_command(RDD::TextureID p_texture_id, BitField<RDD::BarrierAccessBits> p_src_access, BitField<RDD::BarrierAccessBits> p_dst_access, ResourceUsage p_prev_usage, ResourceUsage p_next_usage, RDD::TextureSubresourceRange p_subresources, LocalVector<RDD::TextureBarrier> &r_barrier_vector, int32_t &r_barrier_index, int32_t &r_barrier_count) {
  571. if (!driver_honors_barriers) {
  572. return;
  573. }
  574. if (r_barrier_index < 0) {
  575. r_barrier_index = r_barrier_vector.size();
  576. }
  577. RDD::TextureBarrier texture_barrier;
  578. texture_barrier.texture = p_texture_id;
  579. texture_barrier.src_access = p_src_access;
  580. texture_barrier.dst_access = p_dst_access;
  581. texture_barrier.prev_layout = _usage_to_image_layout(p_prev_usage);
  582. texture_barrier.next_layout = _usage_to_image_layout(p_next_usage);
  583. texture_barrier.subresources = p_subresources;
  584. r_barrier_vector.push_back(texture_barrier);
  585. r_barrier_count++;
  586. }
  587. #if USE_BUFFER_BARRIERS
  588. void RenderingDeviceGraph::_add_buffer_barrier_to_command(RDD::BufferID p_buffer_id, BitField<RDD::BarrierAccessBits> p_src_access, BitField<RDD::BarrierAccessBits> p_dst_access, int32_t &r_barrier_index, int32_t &r_barrier_count) {
  589. if (!driver_honors_barriers) {
  590. return;
  591. }
  592. if (r_barrier_index < 0) {
  593. r_barrier_index = command_buffer_barriers.size();
  594. }
  595. RDD::BufferBarrier buffer_barrier;
  596. buffer_barrier.buffer = p_buffer_id;
  597. buffer_barrier.src_access = p_src_access;
  598. buffer_barrier.dst_access = p_dst_access;
  599. buffer_barrier.offset = 0;
  600. buffer_barrier.size = RDD::BUFFER_WHOLE_SIZE;
  601. command_buffer_barriers.push_back(buffer_barrier);
  602. r_barrier_count++;
  603. }
  604. #endif
  605. void RenderingDeviceGraph::_run_compute_list_command(RDD::CommandBufferID p_command_buffer, const uint8_t *p_instruction_data, uint32_t p_instruction_data_size) {
  606. uint32_t instruction_data_cursor = 0;
  607. while (instruction_data_cursor < p_instruction_data_size) {
  608. DEV_ASSERT((instruction_data_cursor + sizeof(ComputeListInstruction)) <= p_instruction_data_size);
  609. const ComputeListInstruction *instruction = reinterpret_cast<const ComputeListInstruction *>(&p_instruction_data[instruction_data_cursor]);
  610. switch (instruction->type) {
  611. case ComputeListInstruction::TYPE_BIND_PIPELINE: {
  612. const ComputeListBindPipelineInstruction *bind_pipeline_instruction = reinterpret_cast<const ComputeListBindPipelineInstruction *>(instruction);
  613. driver->command_bind_compute_pipeline(p_command_buffer, bind_pipeline_instruction->pipeline);
  614. instruction_data_cursor += sizeof(ComputeListBindPipelineInstruction);
  615. } break;
  616. case ComputeListInstruction::TYPE_BIND_UNIFORM_SETS: {
  617. const ComputeListBindUniformSetsInstruction *bind_uniform_sets_instruction = reinterpret_cast<const ComputeListBindUniformSetsInstruction *>(instruction);
  618. driver->command_bind_compute_uniform_sets(p_command_buffer, VectorView<RDD::UniformSetID>(bind_uniform_sets_instruction->uniform_set_ids(), bind_uniform_sets_instruction->set_count), bind_uniform_sets_instruction->shader, bind_uniform_sets_instruction->first_set_index, bind_uniform_sets_instruction->set_count);
  619. instruction_data_cursor += sizeof(ComputeListBindUniformSetsInstruction) + sizeof(RDD::UniformSetID) * bind_uniform_sets_instruction->set_count;
  620. } break;
  621. case ComputeListInstruction::TYPE_DISPATCH: {
  622. const ComputeListDispatchInstruction *dispatch_instruction = reinterpret_cast<const ComputeListDispatchInstruction *>(instruction);
  623. driver->command_compute_dispatch(p_command_buffer, dispatch_instruction->x_groups, dispatch_instruction->y_groups, dispatch_instruction->z_groups);
  624. instruction_data_cursor += sizeof(ComputeListDispatchInstruction);
  625. } break;
  626. case ComputeListInstruction::TYPE_DISPATCH_INDIRECT: {
  627. const ComputeListDispatchIndirectInstruction *dispatch_indirect_instruction = reinterpret_cast<const ComputeListDispatchIndirectInstruction *>(instruction);
  628. driver->command_compute_dispatch_indirect(p_command_buffer, dispatch_indirect_instruction->buffer, dispatch_indirect_instruction->offset);
  629. instruction_data_cursor += sizeof(ComputeListDispatchIndirectInstruction);
  630. } break;
  631. case ComputeListInstruction::TYPE_SET_PUSH_CONSTANT: {
  632. const ComputeListSetPushConstantInstruction *set_push_constant_instruction = reinterpret_cast<const ComputeListSetPushConstantInstruction *>(instruction);
  633. const VectorView push_constant_data_view(reinterpret_cast<const uint32_t *>(set_push_constant_instruction->data()), set_push_constant_instruction->size / sizeof(uint32_t));
  634. driver->command_bind_push_constants(p_command_buffer, set_push_constant_instruction->shader, 0, push_constant_data_view);
  635. instruction_data_cursor += sizeof(ComputeListSetPushConstantInstruction);
  636. instruction_data_cursor += set_push_constant_instruction->size;
  637. } break;
  638. case ComputeListInstruction::TYPE_UNIFORM_SET_PREPARE_FOR_USE: {
  639. const ComputeListUniformSetPrepareForUseInstruction *uniform_set_prepare_for_use_instruction = reinterpret_cast<const ComputeListUniformSetPrepareForUseInstruction *>(instruction);
  640. driver->command_uniform_set_prepare_for_use(p_command_buffer, uniform_set_prepare_for_use_instruction->uniform_set, uniform_set_prepare_for_use_instruction->shader, uniform_set_prepare_for_use_instruction->set_index);
  641. instruction_data_cursor += sizeof(ComputeListUniformSetPrepareForUseInstruction);
  642. } break;
  643. default:
  644. DEV_ASSERT(false && "Unknown compute list instruction type.");
  645. return;
  646. }
  647. }
  648. }
  649. void RenderingDeviceGraph::_get_draw_list_render_pass_and_framebuffer(const RecordedDrawListCommand *p_draw_list_command, RDD::RenderPassID &r_render_pass, RDD::FramebufferID &r_framebuffer) {
  650. DEV_ASSERT(p_draw_list_command->trackers_count <= 21 && "Max number of attachments that can be encoded into the key.");
  651. // Build a unique key from the load and store ops for each attachment.
  652. const RDD::AttachmentLoadOp *load_ops = p_draw_list_command->load_ops();
  653. const RDD::AttachmentStoreOp *store_ops = p_draw_list_command->store_ops();
  654. uint64_t key = 0;
  655. for (uint32_t i = 0; i < p_draw_list_command->trackers_count; i++) {
  656. key |= uint64_t(load_ops[i]) << (i * 3);
  657. key |= uint64_t(store_ops[i]) << (i * 3 + 2);
  658. }
  659. // Check the storage map if the render pass and the framebuffer needs to be created.
  660. FramebufferCache *framebuffer_cache = p_draw_list_command->framebuffer_cache;
  661. HashMap<uint64_t, FramebufferStorage>::Iterator it = framebuffer_cache->storage_map.find(key);
  662. if (it == framebuffer_cache->storage_map.end()) {
  663. FramebufferStorage storage;
  664. VectorView<RDD::AttachmentLoadOp> load_ops_view(load_ops, p_draw_list_command->trackers_count);
  665. VectorView<RDD::AttachmentStoreOp> store_ops_view(store_ops, p_draw_list_command->trackers_count);
  666. storage.render_pass = render_pass_creation_function(driver, load_ops_view, store_ops_view, framebuffer_cache->render_pass_creation_user_data);
  667. ERR_FAIL_COND(!storage.render_pass);
  668. storage.framebuffer = driver->framebuffer_create(storage.render_pass, framebuffer_cache->textures, framebuffer_cache->width, framebuffer_cache->height);
  669. ERR_FAIL_COND(!storage.framebuffer);
  670. it = framebuffer_cache->storage_map.insert(key, storage);
  671. }
  672. r_render_pass = it->value.render_pass;
  673. r_framebuffer = it->value.framebuffer;
  674. }
  675. void RenderingDeviceGraph::_run_draw_list_command(RDD::CommandBufferID p_command_buffer, const uint8_t *p_instruction_data, uint32_t p_instruction_data_size) {
  676. uint32_t instruction_data_cursor = 0;
  677. while (instruction_data_cursor < p_instruction_data_size) {
  678. DEV_ASSERT((instruction_data_cursor + sizeof(DrawListInstruction)) <= p_instruction_data_size);
  679. const DrawListInstruction *instruction = reinterpret_cast<const DrawListInstruction *>(&p_instruction_data[instruction_data_cursor]);
  680. switch (instruction->type) {
  681. case DrawListInstruction::TYPE_BIND_INDEX_BUFFER: {
  682. const DrawListBindIndexBufferInstruction *bind_index_buffer_instruction = reinterpret_cast<const DrawListBindIndexBufferInstruction *>(instruction);
  683. driver->command_render_bind_index_buffer(p_command_buffer, bind_index_buffer_instruction->buffer, bind_index_buffer_instruction->format, bind_index_buffer_instruction->offset);
  684. instruction_data_cursor += sizeof(DrawListBindIndexBufferInstruction);
  685. } break;
  686. case DrawListInstruction::TYPE_BIND_PIPELINE: {
  687. const DrawListBindPipelineInstruction *bind_pipeline_instruction = reinterpret_cast<const DrawListBindPipelineInstruction *>(instruction);
  688. driver->command_bind_render_pipeline(p_command_buffer, bind_pipeline_instruction->pipeline);
  689. instruction_data_cursor += sizeof(DrawListBindPipelineInstruction);
  690. } break;
  691. case DrawListInstruction::TYPE_BIND_UNIFORM_SETS: {
  692. const DrawListBindUniformSetsInstruction *bind_uniform_sets_instruction = reinterpret_cast<const DrawListBindUniformSetsInstruction *>(instruction);
  693. driver->command_bind_render_uniform_sets(p_command_buffer, VectorView<RDD::UniformSetID>(bind_uniform_sets_instruction->uniform_set_ids(), bind_uniform_sets_instruction->set_count), bind_uniform_sets_instruction->shader, bind_uniform_sets_instruction->first_set_index, bind_uniform_sets_instruction->set_count);
  694. instruction_data_cursor += sizeof(DrawListBindUniformSetsInstruction) + sizeof(RDD::UniformSetID) * bind_uniform_sets_instruction->set_count;
  695. } break;
  696. case DrawListInstruction::TYPE_BIND_VERTEX_BUFFERS: {
  697. const DrawListBindVertexBuffersInstruction *bind_vertex_buffers_instruction = reinterpret_cast<const DrawListBindVertexBuffersInstruction *>(instruction);
  698. driver->command_render_bind_vertex_buffers(p_command_buffer, bind_vertex_buffers_instruction->vertex_buffers_count, bind_vertex_buffers_instruction->vertex_buffers(), bind_vertex_buffers_instruction->vertex_buffer_offsets());
  699. instruction_data_cursor += sizeof(DrawListBindVertexBuffersInstruction);
  700. instruction_data_cursor += sizeof(RDD::BufferID) * bind_vertex_buffers_instruction->vertex_buffers_count;
  701. instruction_data_cursor += sizeof(uint64_t) * bind_vertex_buffers_instruction->vertex_buffers_count;
  702. } break;
  703. case DrawListInstruction::TYPE_CLEAR_ATTACHMENTS: {
  704. const DrawListClearAttachmentsInstruction *clear_attachments_instruction = reinterpret_cast<const DrawListClearAttachmentsInstruction *>(instruction);
  705. const VectorView attachments_clear_view(clear_attachments_instruction->attachments_clear(), clear_attachments_instruction->attachments_clear_count);
  706. const VectorView attachments_clear_rect_view(clear_attachments_instruction->attachments_clear_rect(), clear_attachments_instruction->attachments_clear_rect_count);
  707. driver->command_render_clear_attachments(p_command_buffer, attachments_clear_view, attachments_clear_rect_view);
  708. instruction_data_cursor += sizeof(DrawListClearAttachmentsInstruction);
  709. instruction_data_cursor += sizeof(RDD::AttachmentClear) * clear_attachments_instruction->attachments_clear_count;
  710. instruction_data_cursor += sizeof(Rect2i) * clear_attachments_instruction->attachments_clear_rect_count;
  711. } break;
  712. case DrawListInstruction::TYPE_DRAW: {
  713. const DrawListDrawInstruction *draw_instruction = reinterpret_cast<const DrawListDrawInstruction *>(instruction);
  714. driver->command_render_draw(p_command_buffer, draw_instruction->vertex_count, draw_instruction->instance_count, 0, 0);
  715. instruction_data_cursor += sizeof(DrawListDrawInstruction);
  716. } break;
  717. case DrawListInstruction::TYPE_DRAW_INDEXED: {
  718. const DrawListDrawIndexedInstruction *draw_indexed_instruction = reinterpret_cast<const DrawListDrawIndexedInstruction *>(instruction);
  719. driver->command_render_draw_indexed(p_command_buffer, draw_indexed_instruction->index_count, draw_indexed_instruction->instance_count, draw_indexed_instruction->first_index, 0, 0);
  720. instruction_data_cursor += sizeof(DrawListDrawIndexedInstruction);
  721. } break;
  722. case DrawListInstruction::TYPE_DRAW_INDIRECT: {
  723. const DrawListDrawIndirectInstruction *draw_indirect_instruction = reinterpret_cast<const DrawListDrawIndirectInstruction *>(instruction);
  724. driver->command_render_draw_indirect(p_command_buffer, draw_indirect_instruction->buffer, draw_indirect_instruction->offset, draw_indirect_instruction->draw_count, draw_indirect_instruction->stride);
  725. instruction_data_cursor += sizeof(DrawListDrawIndirectInstruction);
  726. } break;
  727. case DrawListInstruction::TYPE_DRAW_INDEXED_INDIRECT: {
  728. const DrawListDrawIndexedIndirectInstruction *draw_indexed_indirect_instruction = reinterpret_cast<const DrawListDrawIndexedIndirectInstruction *>(instruction);
  729. driver->command_render_draw_indexed_indirect(p_command_buffer, draw_indexed_indirect_instruction->buffer, draw_indexed_indirect_instruction->offset, draw_indexed_indirect_instruction->draw_count, draw_indexed_indirect_instruction->stride);
  730. instruction_data_cursor += sizeof(DrawListDrawIndexedIndirectInstruction);
  731. } break;
  732. case DrawListInstruction::TYPE_EXECUTE_COMMANDS: {
  733. const DrawListExecuteCommandsInstruction *execute_commands_instruction = reinterpret_cast<const DrawListExecuteCommandsInstruction *>(instruction);
  734. driver->command_buffer_execute_secondary(p_command_buffer, execute_commands_instruction->command_buffer);
  735. instruction_data_cursor += sizeof(DrawListExecuteCommandsInstruction);
  736. } break;
  737. case DrawListInstruction::TYPE_NEXT_SUBPASS: {
  738. const DrawListNextSubpassInstruction *next_subpass_instruction = reinterpret_cast<const DrawListNextSubpassInstruction *>(instruction);
  739. driver->command_next_render_subpass(p_command_buffer, next_subpass_instruction->command_buffer_type);
  740. instruction_data_cursor += sizeof(DrawListNextSubpassInstruction);
  741. } break;
  742. case DrawListInstruction::TYPE_SET_BLEND_CONSTANTS: {
  743. const DrawListSetBlendConstantsInstruction *set_blend_constants_instruction = reinterpret_cast<const DrawListSetBlendConstantsInstruction *>(instruction);
  744. driver->command_render_set_blend_constants(p_command_buffer, set_blend_constants_instruction->color);
  745. instruction_data_cursor += sizeof(DrawListSetBlendConstantsInstruction);
  746. } break;
  747. case DrawListInstruction::TYPE_SET_LINE_WIDTH: {
  748. const DrawListSetLineWidthInstruction *set_line_width_instruction = reinterpret_cast<const DrawListSetLineWidthInstruction *>(instruction);
  749. driver->command_render_set_line_width(p_command_buffer, set_line_width_instruction->width);
  750. instruction_data_cursor += sizeof(DrawListSetLineWidthInstruction);
  751. } break;
  752. case DrawListInstruction::TYPE_SET_PUSH_CONSTANT: {
  753. const DrawListSetPushConstantInstruction *set_push_constant_instruction = reinterpret_cast<const DrawListSetPushConstantInstruction *>(instruction);
  754. const VectorView push_constant_data_view(reinterpret_cast<const uint32_t *>(set_push_constant_instruction->data()), set_push_constant_instruction->size / sizeof(uint32_t));
  755. driver->command_bind_push_constants(p_command_buffer, set_push_constant_instruction->shader, 0, push_constant_data_view);
  756. instruction_data_cursor += sizeof(DrawListSetPushConstantInstruction);
  757. instruction_data_cursor += set_push_constant_instruction->size;
  758. } break;
  759. case DrawListInstruction::TYPE_SET_SCISSOR: {
  760. const DrawListSetScissorInstruction *set_scissor_instruction = reinterpret_cast<const DrawListSetScissorInstruction *>(instruction);
  761. driver->command_render_set_scissor(p_command_buffer, set_scissor_instruction->rect);
  762. instruction_data_cursor += sizeof(DrawListSetScissorInstruction);
  763. } break;
  764. case DrawListInstruction::TYPE_SET_VIEWPORT: {
  765. const DrawListSetViewportInstruction *set_viewport_instruction = reinterpret_cast<const DrawListSetViewportInstruction *>(instruction);
  766. driver->command_render_set_viewport(p_command_buffer, set_viewport_instruction->rect);
  767. instruction_data_cursor += sizeof(DrawListSetViewportInstruction);
  768. } break;
  769. case DrawListInstruction::TYPE_UNIFORM_SET_PREPARE_FOR_USE: {
  770. const DrawListUniformSetPrepareForUseInstruction *uniform_set_prepare_for_use_instruction = reinterpret_cast<const DrawListUniformSetPrepareForUseInstruction *>(instruction);
  771. driver->command_uniform_set_prepare_for_use(p_command_buffer, uniform_set_prepare_for_use_instruction->uniform_set, uniform_set_prepare_for_use_instruction->shader, uniform_set_prepare_for_use_instruction->set_index);
  772. instruction_data_cursor += sizeof(DrawListUniformSetPrepareForUseInstruction);
  773. } break;
  774. default:
  775. DEV_ASSERT(false && "Unknown draw list instruction type.");
  776. return;
  777. }
  778. }
  779. }
  780. void RenderingDeviceGraph::_add_draw_list_begin(FramebufferCache *p_framebuffer_cache, RDD::RenderPassID p_render_pass, RDD::FramebufferID p_framebuffer, Rect2i p_region, VectorView<AttachmentOperation> p_attachment_operations, VectorView<RDD::RenderPassClearValue> p_attachment_clear_values, bool p_uses_color, bool p_uses_depth, uint32_t p_breadcrumb, bool p_split_cmd_buffer) {
  781. DEV_ASSERT(p_attachment_operations.size() == p_attachment_clear_values.size());
  782. draw_instruction_list.clear();
  783. draw_instruction_list.index++;
  784. draw_instruction_list.framebuffer_cache = p_framebuffer_cache;
  785. draw_instruction_list.render_pass = p_render_pass;
  786. draw_instruction_list.framebuffer = p_framebuffer;
  787. draw_instruction_list.region = p_region;
  788. draw_instruction_list.attachment_operations.resize(p_attachment_operations.size());
  789. draw_instruction_list.attachment_clear_values.resize(p_attachment_clear_values.size());
  790. for (uint32_t i = 0; i < p_attachment_operations.size(); i++) {
  791. draw_instruction_list.attachment_operations[i] = p_attachment_operations[i];
  792. draw_instruction_list.attachment_clear_values[i] = p_attachment_clear_values[i];
  793. }
  794. if (p_uses_color) {
  795. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
  796. }
  797. if (p_uses_depth) {
  798. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT);
  799. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
  800. }
  801. draw_instruction_list.split_cmd_buffer = p_split_cmd_buffer;
  802. #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
  803. draw_instruction_list.breadcrumb = p_breadcrumb;
  804. #endif
  805. }
  806. void RenderingDeviceGraph::_run_secondary_command_buffer_task(const SecondaryCommandBuffer *p_secondary) {
  807. driver->command_buffer_begin_secondary(p_secondary->command_buffer, p_secondary->render_pass, 0, p_secondary->framebuffer);
  808. _run_draw_list_command(p_secondary->command_buffer, p_secondary->instruction_data.ptr(), p_secondary->instruction_data.size());
  809. driver->command_buffer_end(p_secondary->command_buffer);
  810. }
  811. void RenderingDeviceGraph::_wait_for_secondary_command_buffer_tasks() {
  812. for (uint32_t i = 0; i < frames[frame].secondary_command_buffers_used; i++) {
  813. WorkerThreadPool::TaskID &task = frames[frame].secondary_command_buffers[i].task;
  814. if (task != WorkerThreadPool::INVALID_TASK_ID) {
  815. WorkerThreadPool::get_singleton()->wait_for_task_completion(task);
  816. task = WorkerThreadPool::INVALID_TASK_ID;
  817. }
  818. }
  819. }
  820. void RenderingDeviceGraph::_run_render_commands(int32_t p_level, const RecordedCommandSort *p_sorted_commands, uint32_t p_sorted_commands_count, RDD::CommandBufferID &r_command_buffer, CommandBufferPool &r_command_buffer_pool, int32_t &r_current_label_index, int32_t &r_current_label_level) {
  821. for (uint32_t i = 0; i < p_sorted_commands_count; i++) {
  822. const uint32_t command_index = p_sorted_commands[i].index;
  823. const uint32_t command_data_offset = command_data_offsets[command_index];
  824. const RecordedCommand *command = reinterpret_cast<const RecordedCommand *>(&command_data[command_data_offset]);
  825. _run_label_command_change(r_command_buffer, command->label_index, p_level, false, true, &p_sorted_commands[i], p_sorted_commands_count - i, r_current_label_index, r_current_label_level);
  826. switch (command->type) {
  827. case RecordedCommand::TYPE_BUFFER_CLEAR: {
  828. const RecordedBufferClearCommand *buffer_clear_command = reinterpret_cast<const RecordedBufferClearCommand *>(command);
  829. driver->command_clear_buffer(r_command_buffer, buffer_clear_command->buffer, buffer_clear_command->offset, buffer_clear_command->size);
  830. } break;
  831. case RecordedCommand::TYPE_BUFFER_COPY: {
  832. const RecordedBufferCopyCommand *buffer_copy_command = reinterpret_cast<const RecordedBufferCopyCommand *>(command);
  833. driver->command_copy_buffer(r_command_buffer, buffer_copy_command->source, buffer_copy_command->destination, buffer_copy_command->region);
  834. } break;
  835. case RecordedCommand::TYPE_BUFFER_GET_DATA: {
  836. const RecordedBufferGetDataCommand *buffer_get_data_command = reinterpret_cast<const RecordedBufferGetDataCommand *>(command);
  837. driver->command_copy_buffer(r_command_buffer, buffer_get_data_command->source, buffer_get_data_command->destination, buffer_get_data_command->region);
  838. } break;
  839. case RecordedCommand::TYPE_BUFFER_UPDATE: {
  840. const RecordedBufferUpdateCommand *buffer_update_command = reinterpret_cast<const RecordedBufferUpdateCommand *>(command);
  841. const RecordedBufferCopy *command_buffer_copies = buffer_update_command->buffer_copies();
  842. for (uint32_t j = 0; j < buffer_update_command->buffer_copies_count; j++) {
  843. driver->command_copy_buffer(r_command_buffer, command_buffer_copies[j].source, buffer_update_command->destination, command_buffer_copies[j].region);
  844. }
  845. } break;
  846. case RecordedCommand::TYPE_COMPUTE_LIST: {
  847. if (device.workarounds.avoid_compute_after_draw && workarounds_state.draw_list_found) {
  848. // Avoid compute after draw workaround. Refer to the comment that enables this in the Vulkan driver for more information.
  849. workarounds_state.draw_list_found = false;
  850. // Create or reuse a command buffer and finish recording the current one.
  851. driver->command_buffer_end(r_command_buffer);
  852. while (r_command_buffer_pool.buffers_used >= r_command_buffer_pool.buffers.size()) {
  853. RDD::CommandBufferID command_buffer = driver->command_buffer_create(r_command_buffer_pool.pool);
  854. RDD::SemaphoreID command_semaphore = driver->semaphore_create();
  855. r_command_buffer_pool.buffers.push_back(command_buffer);
  856. r_command_buffer_pool.semaphores.push_back(command_semaphore);
  857. }
  858. // Start recording on the next usable command buffer from the pool.
  859. uint32_t command_buffer_index = r_command_buffer_pool.buffers_used++;
  860. r_command_buffer = r_command_buffer_pool.buffers[command_buffer_index];
  861. driver->command_buffer_begin(r_command_buffer);
  862. }
  863. const RecordedComputeListCommand *compute_list_command = reinterpret_cast<const RecordedComputeListCommand *>(command);
  864. _run_compute_list_command(r_command_buffer, compute_list_command->instruction_data(), compute_list_command->instruction_data_size);
  865. } break;
  866. case RecordedCommand::TYPE_DRAW_LIST: {
  867. if (device.workarounds.avoid_compute_after_draw) {
  868. // Indicate that a draw list was encountered for the workaround.
  869. workarounds_state.draw_list_found = true;
  870. }
  871. const RecordedDrawListCommand *draw_list_command = reinterpret_cast<const RecordedDrawListCommand *>(command);
  872. if (draw_list_command->split_cmd_buffer) {
  873. // Create or reuse a command buffer and finish recording the current one.
  874. driver->command_buffer_end(r_command_buffer);
  875. while (r_command_buffer_pool.buffers_used >= r_command_buffer_pool.buffers.size()) {
  876. RDD::CommandBufferID command_buffer = driver->command_buffer_create(r_command_buffer_pool.pool);
  877. RDD::SemaphoreID command_semaphore = driver->semaphore_create();
  878. r_command_buffer_pool.buffers.push_back(command_buffer);
  879. r_command_buffer_pool.semaphores.push_back(command_semaphore);
  880. }
  881. // Start recording on the next usable command buffer from the pool.
  882. uint32_t command_buffer_index = r_command_buffer_pool.buffers_used++;
  883. r_command_buffer = r_command_buffer_pool.buffers[command_buffer_index];
  884. driver->command_buffer_begin(r_command_buffer);
  885. }
  886. const VectorView clear_values(draw_list_command->clear_values(), draw_list_command->clear_values_count);
  887. #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
  888. driver->command_insert_breadcrumb(r_command_buffer, draw_list_command->breadcrumb);
  889. #endif
  890. RDD::RenderPassID render_pass;
  891. RDD::FramebufferID framebuffer;
  892. if (draw_list_command->framebuffer_cache != nullptr) {
  893. _get_draw_list_render_pass_and_framebuffer(draw_list_command, render_pass, framebuffer);
  894. } else {
  895. render_pass = draw_list_command->render_pass;
  896. framebuffer = draw_list_command->framebuffer;
  897. }
  898. if (framebuffer && render_pass) {
  899. driver->command_begin_render_pass(r_command_buffer, render_pass, framebuffer, draw_list_command->command_buffer_type, draw_list_command->region, clear_values);
  900. _run_draw_list_command(r_command_buffer, draw_list_command->instruction_data(), draw_list_command->instruction_data_size);
  901. driver->command_end_render_pass(r_command_buffer);
  902. }
  903. } break;
  904. case RecordedCommand::TYPE_TEXTURE_CLEAR: {
  905. const RecordedTextureClearCommand *texture_clear_command = reinterpret_cast<const RecordedTextureClearCommand *>(command);
  906. driver->command_clear_color_texture(r_command_buffer, texture_clear_command->texture, RDD::TEXTURE_LAYOUT_COPY_DST_OPTIMAL, texture_clear_command->color, texture_clear_command->range);
  907. } break;
  908. case RecordedCommand::TYPE_TEXTURE_COPY: {
  909. const RecordedTextureCopyCommand *texture_copy_command = reinterpret_cast<const RecordedTextureCopyCommand *>(command);
  910. const VectorView<RDD::TextureCopyRegion> command_texture_copy_regions_view(texture_copy_command->texture_copy_regions(), texture_copy_command->texture_copy_regions_count);
  911. driver->command_copy_texture(r_command_buffer, texture_copy_command->from_texture, RDD::TEXTURE_LAYOUT_COPY_SRC_OPTIMAL, texture_copy_command->to_texture, RDD::TEXTURE_LAYOUT_COPY_DST_OPTIMAL, command_texture_copy_regions_view);
  912. } break;
  913. case RecordedCommand::TYPE_TEXTURE_GET_DATA: {
  914. const RecordedTextureGetDataCommand *texture_get_data_command = reinterpret_cast<const RecordedTextureGetDataCommand *>(command);
  915. const VectorView<RDD::BufferTextureCopyRegion> command_buffer_texture_copy_regions_view(texture_get_data_command->buffer_texture_copy_regions(), texture_get_data_command->buffer_texture_copy_regions_count);
  916. driver->command_copy_texture_to_buffer(r_command_buffer, texture_get_data_command->from_texture, RDD::TEXTURE_LAYOUT_COPY_SRC_OPTIMAL, texture_get_data_command->to_buffer, command_buffer_texture_copy_regions_view);
  917. } break;
  918. case RecordedCommand::TYPE_TEXTURE_RESOLVE: {
  919. const RecordedTextureResolveCommand *texture_resolve_command = reinterpret_cast<const RecordedTextureResolveCommand *>(command);
  920. driver->command_resolve_texture(r_command_buffer, texture_resolve_command->from_texture, RDD::TEXTURE_LAYOUT_RESOLVE_SRC_OPTIMAL, texture_resolve_command->src_layer, texture_resolve_command->src_mipmap, texture_resolve_command->to_texture, RDD::TEXTURE_LAYOUT_RESOLVE_DST_OPTIMAL, texture_resolve_command->dst_layer, texture_resolve_command->dst_mipmap);
  921. } break;
  922. case RecordedCommand::TYPE_TEXTURE_UPDATE: {
  923. const RecordedTextureUpdateCommand *texture_update_command = reinterpret_cast<const RecordedTextureUpdateCommand *>(command);
  924. const RecordedBufferToTextureCopy *command_buffer_to_texture_copies = texture_update_command->buffer_to_texture_copies();
  925. for (uint32_t j = 0; j < texture_update_command->buffer_to_texture_copies_count; j++) {
  926. driver->command_copy_buffer_to_texture(r_command_buffer, command_buffer_to_texture_copies[j].from_buffer, texture_update_command->to_texture, RDD::TEXTURE_LAYOUT_COPY_DST_OPTIMAL, command_buffer_to_texture_copies[j].region);
  927. }
  928. } break;
  929. case RecordedCommand::TYPE_CAPTURE_TIMESTAMP: {
  930. const RecordedCaptureTimestampCommand *texture_capture_timestamp_command = reinterpret_cast<const RecordedCaptureTimestampCommand *>(command);
  931. driver->command_timestamp_write(r_command_buffer, texture_capture_timestamp_command->pool, texture_capture_timestamp_command->index);
  932. } break;
  933. default: {
  934. DEV_ASSERT(false && "Unknown recorded command type.");
  935. return;
  936. }
  937. }
  938. }
  939. }
  940. void RenderingDeviceGraph::_run_label_command_change(RDD::CommandBufferID p_command_buffer, int32_t p_new_label_index, int32_t p_new_level, bool p_ignore_previous_value, bool p_use_label_for_empty, const RecordedCommandSort *p_sorted_commands, uint32_t p_sorted_commands_count, int32_t &r_current_label_index, int32_t &r_current_label_level) {
  941. if (command_label_count == 0) {
  942. // Ignore any label operations if no labels were pushed.
  943. return;
  944. }
  945. if (p_ignore_previous_value || p_new_label_index != r_current_label_index || p_new_level != r_current_label_level) {
  946. if (!p_ignore_previous_value && (p_use_label_for_empty || r_current_label_index >= 0 || r_current_label_level >= 0)) {
  947. // End the current label.
  948. driver->command_end_label(p_command_buffer);
  949. }
  950. String label_name;
  951. Color label_color;
  952. if (p_new_label_index >= 0) {
  953. const char *label_chars = &command_label_chars[command_label_offsets[p_new_label_index]];
  954. label_name.parse_utf8(label_chars);
  955. label_color = command_label_colors[p_new_label_index];
  956. } else if (p_use_label_for_empty) {
  957. label_name = "Command graph";
  958. label_color = Color(1, 1, 1, 1);
  959. } else {
  960. return;
  961. }
  962. // Add the level to the name.
  963. label_name += " (L" + itos(p_new_level) + ")";
  964. if (p_sorted_commands != nullptr && p_sorted_commands_count > 0) {
  965. // Analyze the commands in the level that have the same label to detect what type of operations are performed.
  966. bool copy_commands = false;
  967. bool compute_commands = false;
  968. bool draw_commands = false;
  969. for (uint32_t i = 0; i < p_sorted_commands_count; i++) {
  970. const uint32_t command_index = p_sorted_commands[i].index;
  971. const uint32_t command_data_offset = command_data_offsets[command_index];
  972. const RecordedCommand *command = reinterpret_cast<RecordedCommand *>(&command_data[command_data_offset]);
  973. if (command->label_index != p_new_label_index) {
  974. break;
  975. }
  976. switch (command->type) {
  977. case RecordedCommand::TYPE_BUFFER_CLEAR:
  978. case RecordedCommand::TYPE_BUFFER_COPY:
  979. case RecordedCommand::TYPE_BUFFER_GET_DATA:
  980. case RecordedCommand::TYPE_BUFFER_UPDATE:
  981. case RecordedCommand::TYPE_TEXTURE_CLEAR:
  982. case RecordedCommand::TYPE_TEXTURE_COPY:
  983. case RecordedCommand::TYPE_TEXTURE_GET_DATA:
  984. case RecordedCommand::TYPE_TEXTURE_RESOLVE:
  985. case RecordedCommand::TYPE_TEXTURE_UPDATE: {
  986. copy_commands = true;
  987. } break;
  988. case RecordedCommand::TYPE_COMPUTE_LIST: {
  989. compute_commands = true;
  990. } break;
  991. case RecordedCommand::TYPE_DRAW_LIST: {
  992. draw_commands = true;
  993. } break;
  994. default: {
  995. // Ignore command.
  996. } break;
  997. }
  998. if (copy_commands && compute_commands && draw_commands) {
  999. // There's no more command types to find.
  1000. break;
  1001. }
  1002. }
  1003. if (copy_commands || compute_commands || draw_commands) {
  1004. // Add the operations to the name.
  1005. bool plus_after_copy = copy_commands && (compute_commands || draw_commands);
  1006. bool plus_after_compute = compute_commands && draw_commands;
  1007. label_name += " (";
  1008. label_name += copy_commands ? "Copy" : "";
  1009. label_name += plus_after_copy ? "+" : "";
  1010. label_name += compute_commands ? "Compute" : "";
  1011. label_name += plus_after_compute ? "+" : "";
  1012. label_name += draw_commands ? "Draw" : "";
  1013. label_name += ")";
  1014. }
  1015. }
  1016. // Start the new label.
  1017. CharString label_name_utf8 = label_name.utf8();
  1018. driver->command_begin_label(p_command_buffer, label_name_utf8.get_data(), label_color);
  1019. r_current_label_index = p_new_label_index;
  1020. r_current_label_level = p_new_level;
  1021. }
  1022. }
  1023. void RenderingDeviceGraph::_boost_priority_for_render_commands(RecordedCommandSort *p_sorted_commands, uint32_t p_sorted_commands_count, uint32_t &r_boosted_priority) {
  1024. if (p_sorted_commands_count == 0) {
  1025. return;
  1026. }
  1027. const uint32_t boosted_priority_value = 0;
  1028. if (r_boosted_priority > 0) {
  1029. bool perform_sort = false;
  1030. for (uint32_t j = 0; j < p_sorted_commands_count; j++) {
  1031. if (p_sorted_commands[j].priority == r_boosted_priority) {
  1032. p_sorted_commands[j].priority = boosted_priority_value;
  1033. perform_sort = true;
  1034. }
  1035. }
  1036. if (perform_sort) {
  1037. SortArray<RecordedCommandSort> command_sorter;
  1038. command_sorter.sort(p_sorted_commands, p_sorted_commands_count);
  1039. }
  1040. }
  1041. if (p_sorted_commands[p_sorted_commands_count - 1].priority != boosted_priority_value) {
  1042. r_boosted_priority = p_sorted_commands[p_sorted_commands_count - 1].priority;
  1043. }
  1044. }
  1045. void RenderingDeviceGraph::_group_barriers_for_render_commands(RDD::CommandBufferID p_command_buffer, const RecordedCommandSort *p_sorted_commands, uint32_t p_sorted_commands_count, bool p_full_memory_barrier) {
  1046. if (!driver_honors_barriers) {
  1047. return;
  1048. }
  1049. barrier_group.clear();
  1050. barrier_group.src_stages = RDD::PIPELINE_STAGE_TOP_OF_PIPE_BIT;
  1051. barrier_group.dst_stages = RDD::PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
  1052. for (uint32_t i = 0; i < p_sorted_commands_count; i++) {
  1053. const uint32_t command_index = p_sorted_commands[i].index;
  1054. const uint32_t command_data_offset = command_data_offsets[command_index];
  1055. const RecordedCommand *command = reinterpret_cast<RecordedCommand *>(&command_data[command_data_offset]);
  1056. #if PRINT_COMMAND_RECORDING
  1057. print_line(vformat("Grouping barriers for #%d", command_index));
  1058. #endif
  1059. // Merge command's stage bits with the barrier group.
  1060. barrier_group.src_stages = barrier_group.src_stages | command->previous_stages;
  1061. barrier_group.dst_stages = barrier_group.dst_stages | command->next_stages;
  1062. // Merge command's memory barrier bits with the barrier group.
  1063. barrier_group.memory_barrier.src_access = barrier_group.memory_barrier.src_access | command->memory_barrier.src_access;
  1064. barrier_group.memory_barrier.dst_access = barrier_group.memory_barrier.dst_access | command->memory_barrier.dst_access;
  1065. // Gather texture barriers.
  1066. for (int32_t j = 0; j < command->normalization_barrier_count; j++) {
  1067. const RDD::TextureBarrier &recorded_barrier = command_normalization_barriers[command->normalization_barrier_index + j];
  1068. barrier_group.normalization_barriers.push_back(recorded_barrier);
  1069. #if PRINT_COMMAND_RECORDING
  1070. print_line(vformat("Normalization Barrier #%d", barrier_group.normalization_barriers.size() - 1));
  1071. #endif
  1072. }
  1073. for (int32_t j = 0; j < command->transition_barrier_count; j++) {
  1074. const RDD::TextureBarrier &recorded_barrier = command_transition_barriers[command->transition_barrier_index + j];
  1075. barrier_group.transition_barriers.push_back(recorded_barrier);
  1076. #if PRINT_COMMAND_RECORDING
  1077. print_line(vformat("Transition Barrier #%d", barrier_group.transition_barriers.size() - 1));
  1078. #endif
  1079. }
  1080. #if USE_BUFFER_BARRIERS
  1081. // Gather buffer barriers.
  1082. for (int32_t j = 0; j < command->buffer_barrier_count; j++) {
  1083. const RDD::BufferBarrier &recorded_barrier = command_buffer_barriers[command->buffer_barrier_index + j];
  1084. barrier_group.buffer_barriers.push_back(recorded_barrier);
  1085. }
  1086. #endif
  1087. }
  1088. if (p_full_memory_barrier) {
  1089. barrier_group.src_stages = RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT;
  1090. barrier_group.dst_stages = RDD::PIPELINE_STAGE_ALL_COMMANDS_BIT;
  1091. barrier_group.memory_barrier.src_access = RDD::BARRIER_ACCESS_MEMORY_READ_BIT | RDD::BARRIER_ACCESS_MEMORY_WRITE_BIT;
  1092. barrier_group.memory_barrier.dst_access = RDD::BARRIER_ACCESS_MEMORY_READ_BIT | RDD::BARRIER_ACCESS_MEMORY_WRITE_BIT;
  1093. }
  1094. const bool is_memory_barrier_empty = barrier_group.memory_barrier.src_access.is_empty() && barrier_group.memory_barrier.dst_access.is_empty();
  1095. const bool are_texture_barriers_empty = barrier_group.normalization_barriers.is_empty() && barrier_group.transition_barriers.is_empty();
  1096. #if USE_BUFFER_BARRIERS
  1097. const bool are_buffer_barriers_empty = barrier_group.buffer_barriers.is_empty();
  1098. #else
  1099. const bool are_buffer_barriers_empty = true;
  1100. #endif
  1101. if (is_memory_barrier_empty && are_texture_barriers_empty && are_buffer_barriers_empty) {
  1102. // Commands don't require synchronization.
  1103. return;
  1104. }
  1105. const VectorView<RDD::MemoryBarrier> memory_barriers = !is_memory_barrier_empty ? barrier_group.memory_barrier : VectorView<RDD::MemoryBarrier>();
  1106. const VectorView<RDD::TextureBarrier> texture_barriers = barrier_group.normalization_barriers.is_empty() ? barrier_group.transition_barriers : barrier_group.normalization_barriers;
  1107. #if USE_BUFFER_BARRIERS
  1108. const VectorView<RDD::BufferBarrier> buffer_barriers = !are_buffer_barriers_empty ? barrier_group.buffer_barriers : VectorView<RDD::BufferBarrier>();
  1109. #else
  1110. const VectorView<RDD::BufferBarrier> buffer_barriers = VectorView<RDD::BufferBarrier>();
  1111. #endif
  1112. driver->command_pipeline_barrier(p_command_buffer, barrier_group.src_stages, barrier_group.dst_stages, memory_barriers, buffer_barriers, texture_barriers);
  1113. bool separate_texture_barriers = !barrier_group.normalization_barriers.is_empty() && !barrier_group.transition_barriers.is_empty();
  1114. if (separate_texture_barriers) {
  1115. driver->command_pipeline_barrier(p_command_buffer, barrier_group.src_stages, barrier_group.dst_stages, VectorView<RDD::MemoryBarrier>(), VectorView<RDD::BufferBarrier>(), barrier_group.transition_barriers);
  1116. }
  1117. }
  1118. void RenderingDeviceGraph::_print_render_commands(const RecordedCommandSort *p_sorted_commands, uint32_t p_sorted_commands_count) {
  1119. for (uint32_t i = 0; i < p_sorted_commands_count; i++) {
  1120. const uint32_t command_index = p_sorted_commands[i].index;
  1121. const uint32_t command_level = p_sorted_commands[i].level;
  1122. const uint32_t command_data_offset = command_data_offsets[command_index];
  1123. const RecordedCommand *command = reinterpret_cast<RecordedCommand *>(&command_data[command_data_offset]);
  1124. switch (command->type) {
  1125. case RecordedCommand::TYPE_BUFFER_CLEAR: {
  1126. const RecordedBufferClearCommand *buffer_clear_command = reinterpret_cast<const RecordedBufferClearCommand *>(command);
  1127. print_line(command_index, "LEVEL", command_level, "BUFFER CLEAR DESTINATION", itos(buffer_clear_command->buffer.id));
  1128. } break;
  1129. case RecordedCommand::TYPE_BUFFER_COPY: {
  1130. const RecordedBufferCopyCommand *buffer_copy_command = reinterpret_cast<const RecordedBufferCopyCommand *>(command);
  1131. print_line(command_index, "LEVEL", command_level, "BUFFER COPY SOURCE", itos(buffer_copy_command->source.id), "DESTINATION", itos(buffer_copy_command->destination.id));
  1132. } break;
  1133. case RecordedCommand::TYPE_BUFFER_GET_DATA: {
  1134. const RecordedBufferGetDataCommand *buffer_get_data_command = reinterpret_cast<const RecordedBufferGetDataCommand *>(command);
  1135. print_line(command_index, "LEVEL", command_level, "BUFFER GET DATA DESTINATION", itos(buffer_get_data_command->destination.id));
  1136. } break;
  1137. case RecordedCommand::TYPE_BUFFER_UPDATE: {
  1138. const RecordedBufferUpdateCommand *buffer_update_command = reinterpret_cast<const RecordedBufferUpdateCommand *>(command);
  1139. print_line(command_index, "LEVEL", command_level, "BUFFER UPDATE DESTINATION", itos(buffer_update_command->destination.id), "COPIES", buffer_update_command->buffer_copies_count);
  1140. } break;
  1141. case RecordedCommand::TYPE_COMPUTE_LIST: {
  1142. const RecordedComputeListCommand *compute_list_command = reinterpret_cast<const RecordedComputeListCommand *>(command);
  1143. print_line(command_index, "LEVEL", command_level, "COMPUTE LIST SIZE", compute_list_command->instruction_data_size);
  1144. } break;
  1145. case RecordedCommand::TYPE_DRAW_LIST: {
  1146. const RecordedDrawListCommand *draw_list_command = reinterpret_cast<const RecordedDrawListCommand *>(command);
  1147. print_line(command_index, "LEVEL", command_level, "DRAW LIST SIZE", draw_list_command->instruction_data_size);
  1148. } break;
  1149. case RecordedCommand::TYPE_TEXTURE_CLEAR: {
  1150. const RecordedTextureClearCommand *texture_clear_command = reinterpret_cast<const RecordedTextureClearCommand *>(command);
  1151. print_line(command_index, "LEVEL", command_level, "TEXTURE CLEAR", itos(texture_clear_command->texture.id), "COLOR", texture_clear_command->color);
  1152. } break;
  1153. case RecordedCommand::TYPE_TEXTURE_COPY: {
  1154. const RecordedTextureCopyCommand *texture_copy_command = reinterpret_cast<const RecordedTextureCopyCommand *>(command);
  1155. print_line(command_index, "LEVEL", command_level, "TEXTURE COPY FROM", itos(texture_copy_command->from_texture.id), "TO", itos(texture_copy_command->to_texture.id));
  1156. } break;
  1157. case RecordedCommand::TYPE_TEXTURE_GET_DATA: {
  1158. print_line(command_index, "LEVEL", command_level, "TEXTURE GET DATA");
  1159. } break;
  1160. case RecordedCommand::TYPE_TEXTURE_RESOLVE: {
  1161. const RecordedTextureResolveCommand *texture_resolve_command = reinterpret_cast<const RecordedTextureResolveCommand *>(command);
  1162. print_line(command_index, "LEVEL", command_level, "TEXTURE RESOLVE FROM", itos(texture_resolve_command->from_texture.id), "TO", itos(texture_resolve_command->to_texture.id));
  1163. } break;
  1164. case RecordedCommand::TYPE_TEXTURE_UPDATE: {
  1165. const RecordedTextureUpdateCommand *texture_update_command = reinterpret_cast<const RecordedTextureUpdateCommand *>(command);
  1166. print_line(command_index, "LEVEL", command_level, "TEXTURE UPDATE TO", itos(texture_update_command->to_texture.id));
  1167. } break;
  1168. case RecordedCommand::TYPE_CAPTURE_TIMESTAMP: {
  1169. const RecordedCaptureTimestampCommand *texture_capture_timestamp_command = reinterpret_cast<const RecordedCaptureTimestampCommand *>(command);
  1170. print_line(command_index, "LEVEL", command_level, "CAPTURE TIMESTAMP POOL", itos(texture_capture_timestamp_command->pool.id), "INDEX", texture_capture_timestamp_command->index);
  1171. } break;
  1172. default:
  1173. DEV_ASSERT(false && "Unknown recorded command type.");
  1174. return;
  1175. }
  1176. }
  1177. }
  1178. void RenderingDeviceGraph::_print_draw_list(const uint8_t *p_instruction_data, uint32_t p_instruction_data_size) {
  1179. uint32_t instruction_data_cursor = 0;
  1180. while (instruction_data_cursor < p_instruction_data_size) {
  1181. DEV_ASSERT((instruction_data_cursor + sizeof(DrawListInstruction)) <= p_instruction_data_size);
  1182. const DrawListInstruction *instruction = reinterpret_cast<const DrawListInstruction *>(&p_instruction_data[instruction_data_cursor]);
  1183. switch (instruction->type) {
  1184. case DrawListInstruction::TYPE_BIND_INDEX_BUFFER: {
  1185. const DrawListBindIndexBufferInstruction *bind_index_buffer_instruction = reinterpret_cast<const DrawListBindIndexBufferInstruction *>(instruction);
  1186. print_line("\tBIND INDEX BUFFER ID", itos(bind_index_buffer_instruction->buffer.id), "FORMAT", bind_index_buffer_instruction->format, "OFFSET", bind_index_buffer_instruction->offset);
  1187. instruction_data_cursor += sizeof(DrawListBindIndexBufferInstruction);
  1188. } break;
  1189. case DrawListInstruction::TYPE_BIND_PIPELINE: {
  1190. const DrawListBindPipelineInstruction *bind_pipeline_instruction = reinterpret_cast<const DrawListBindPipelineInstruction *>(instruction);
  1191. print_line("\tBIND PIPELINE ID", itos(bind_pipeline_instruction->pipeline.id));
  1192. instruction_data_cursor += sizeof(DrawListBindPipelineInstruction);
  1193. } break;
  1194. case DrawListInstruction::TYPE_BIND_UNIFORM_SETS: {
  1195. const DrawListBindUniformSetsInstruction *bind_uniform_sets_instruction = reinterpret_cast<const DrawListBindUniformSetsInstruction *>(instruction);
  1196. print_line("\tBIND UNIFORM SETS COUNT", bind_uniform_sets_instruction->set_count);
  1197. for (uint32_t i = 0; i < bind_uniform_sets_instruction->set_count; i++) {
  1198. print_line("\tBIND UNIFORM SET ID", itos(bind_uniform_sets_instruction->uniform_set_ids()[i].id), "START INDEX", bind_uniform_sets_instruction->first_set_index);
  1199. }
  1200. instruction_data_cursor += sizeof(DrawListBindUniformSetsInstruction) + sizeof(RDD::UniformSetID) * bind_uniform_sets_instruction->set_count;
  1201. } break;
  1202. case DrawListInstruction::TYPE_BIND_VERTEX_BUFFERS: {
  1203. const DrawListBindVertexBuffersInstruction *bind_vertex_buffers_instruction = reinterpret_cast<const DrawListBindVertexBuffersInstruction *>(instruction);
  1204. print_line("\tBIND VERTEX BUFFERS COUNT", bind_vertex_buffers_instruction->vertex_buffers_count);
  1205. instruction_data_cursor += sizeof(DrawListBindVertexBuffersInstruction);
  1206. instruction_data_cursor += sizeof(RDD::BufferID) * bind_vertex_buffers_instruction->vertex_buffers_count;
  1207. instruction_data_cursor += sizeof(uint64_t) * bind_vertex_buffers_instruction->vertex_buffers_count;
  1208. } break;
  1209. case DrawListInstruction::TYPE_CLEAR_ATTACHMENTS: {
  1210. const DrawListClearAttachmentsInstruction *clear_attachments_instruction = reinterpret_cast<const DrawListClearAttachmentsInstruction *>(instruction);
  1211. print_line("\tATTACHMENTS CLEAR COUNT", clear_attachments_instruction->attachments_clear_count, "RECT COUNT", clear_attachments_instruction->attachments_clear_rect_count);
  1212. instruction_data_cursor += sizeof(DrawListClearAttachmentsInstruction);
  1213. instruction_data_cursor += sizeof(RDD::AttachmentClear) * clear_attachments_instruction->attachments_clear_count;
  1214. instruction_data_cursor += sizeof(Rect2i) * clear_attachments_instruction->attachments_clear_rect_count;
  1215. } break;
  1216. case DrawListInstruction::TYPE_DRAW: {
  1217. const DrawListDrawInstruction *draw_instruction = reinterpret_cast<const DrawListDrawInstruction *>(instruction);
  1218. print_line("\tDRAW VERTICES", draw_instruction->vertex_count, "INSTANCES", draw_instruction->instance_count);
  1219. instruction_data_cursor += sizeof(DrawListDrawInstruction);
  1220. } break;
  1221. case DrawListInstruction::TYPE_DRAW_INDEXED: {
  1222. const DrawListDrawIndexedInstruction *draw_indexed_instruction = reinterpret_cast<const DrawListDrawIndexedInstruction *>(instruction);
  1223. print_line("\tDRAW INDICES", draw_indexed_instruction->index_count, "INSTANCES", draw_indexed_instruction->instance_count, "FIRST INDEX", draw_indexed_instruction->first_index);
  1224. instruction_data_cursor += sizeof(DrawListDrawIndexedInstruction);
  1225. } break;
  1226. case DrawListInstruction::TYPE_DRAW_INDIRECT: {
  1227. const DrawListDrawIndirectInstruction *draw_indirect_instruction = reinterpret_cast<const DrawListDrawIndirectInstruction *>(instruction);
  1228. print_line("\tDRAW INDIRECT BUFFER ID", itos(draw_indirect_instruction->buffer.id), "OFFSET", draw_indirect_instruction->offset, "DRAW COUNT", draw_indirect_instruction->draw_count, "STRIDE", draw_indirect_instruction->stride);
  1229. instruction_data_cursor += sizeof(DrawListDrawIndirectInstruction);
  1230. } break;
  1231. case DrawListInstruction::TYPE_DRAW_INDEXED_INDIRECT: {
  1232. const DrawListDrawIndexedIndirectInstruction *draw_indexed_indirect_instruction = reinterpret_cast<const DrawListDrawIndexedIndirectInstruction *>(instruction);
  1233. print_line("\tDRAW INDEXED INDIRECT BUFFER ID", itos(draw_indexed_indirect_instruction->buffer.id), "OFFSET", draw_indexed_indirect_instruction->offset, "DRAW COUNT", draw_indexed_indirect_instruction->draw_count, "STRIDE", draw_indexed_indirect_instruction->stride);
  1234. instruction_data_cursor += sizeof(DrawListDrawIndexedIndirectInstruction);
  1235. } break;
  1236. case DrawListInstruction::TYPE_EXECUTE_COMMANDS: {
  1237. print_line("\tEXECUTE COMMANDS");
  1238. instruction_data_cursor += sizeof(DrawListExecuteCommandsInstruction);
  1239. } break;
  1240. case DrawListInstruction::TYPE_NEXT_SUBPASS: {
  1241. print_line("\tNEXT SUBPASS");
  1242. instruction_data_cursor += sizeof(DrawListNextSubpassInstruction);
  1243. } break;
  1244. case DrawListInstruction::TYPE_SET_BLEND_CONSTANTS: {
  1245. const DrawListSetBlendConstantsInstruction *set_blend_constants_instruction = reinterpret_cast<const DrawListSetBlendConstantsInstruction *>(instruction);
  1246. print_line("\tSET BLEND CONSTANTS COLOR", set_blend_constants_instruction->color);
  1247. instruction_data_cursor += sizeof(DrawListSetBlendConstantsInstruction);
  1248. } break;
  1249. case DrawListInstruction::TYPE_SET_LINE_WIDTH: {
  1250. const DrawListSetLineWidthInstruction *set_line_width_instruction = reinterpret_cast<const DrawListSetLineWidthInstruction *>(instruction);
  1251. print_line("\tSET LINE WIDTH", set_line_width_instruction->width);
  1252. instruction_data_cursor += sizeof(DrawListSetLineWidthInstruction);
  1253. } break;
  1254. case DrawListInstruction::TYPE_SET_PUSH_CONSTANT: {
  1255. const DrawListSetPushConstantInstruction *set_push_constant_instruction = reinterpret_cast<const DrawListSetPushConstantInstruction *>(instruction);
  1256. print_line("\tSET PUSH CONSTANT SIZE", set_push_constant_instruction->size);
  1257. instruction_data_cursor += sizeof(DrawListSetPushConstantInstruction);
  1258. instruction_data_cursor += set_push_constant_instruction->size;
  1259. } break;
  1260. case DrawListInstruction::TYPE_SET_SCISSOR: {
  1261. const DrawListSetScissorInstruction *set_scissor_instruction = reinterpret_cast<const DrawListSetScissorInstruction *>(instruction);
  1262. print_line("\tSET SCISSOR", set_scissor_instruction->rect);
  1263. instruction_data_cursor += sizeof(DrawListSetScissorInstruction);
  1264. } break;
  1265. case DrawListInstruction::TYPE_SET_VIEWPORT: {
  1266. const DrawListSetViewportInstruction *set_viewport_instruction = reinterpret_cast<const DrawListSetViewportInstruction *>(instruction);
  1267. print_line("\tSET VIEWPORT", set_viewport_instruction->rect);
  1268. instruction_data_cursor += sizeof(DrawListSetViewportInstruction);
  1269. } break;
  1270. case DrawListInstruction::TYPE_UNIFORM_SET_PREPARE_FOR_USE: {
  1271. const DrawListUniformSetPrepareForUseInstruction *uniform_set_prepare_for_use_instruction = reinterpret_cast<const DrawListUniformSetPrepareForUseInstruction *>(instruction);
  1272. print_line("\tUNIFORM SET PREPARE FOR USE ID", itos(uniform_set_prepare_for_use_instruction->uniform_set.id), "SHADER ID", itos(uniform_set_prepare_for_use_instruction->shader.id), "INDEX", uniform_set_prepare_for_use_instruction->set_index);
  1273. instruction_data_cursor += sizeof(DrawListUniformSetPrepareForUseInstruction);
  1274. } break;
  1275. default:
  1276. DEV_ASSERT(false && "Unknown draw list instruction type.");
  1277. return;
  1278. }
  1279. }
  1280. }
  1281. void RenderingDeviceGraph::_print_compute_list(const uint8_t *p_instruction_data, uint32_t p_instruction_data_size) {
  1282. uint32_t instruction_data_cursor = 0;
  1283. while (instruction_data_cursor < p_instruction_data_size) {
  1284. DEV_ASSERT((instruction_data_cursor + sizeof(ComputeListInstruction)) <= p_instruction_data_size);
  1285. const ComputeListInstruction *instruction = reinterpret_cast<const ComputeListInstruction *>(&p_instruction_data[instruction_data_cursor]);
  1286. switch (instruction->type) {
  1287. case ComputeListInstruction::TYPE_BIND_PIPELINE: {
  1288. const ComputeListBindPipelineInstruction *bind_pipeline_instruction = reinterpret_cast<const ComputeListBindPipelineInstruction *>(instruction);
  1289. print_line("\tBIND PIPELINE ID", itos(bind_pipeline_instruction->pipeline.id));
  1290. instruction_data_cursor += sizeof(ComputeListBindPipelineInstruction);
  1291. } break;
  1292. case ComputeListInstruction::TYPE_BIND_UNIFORM_SETS: {
  1293. const ComputeListBindUniformSetsInstruction *bind_uniform_sets_instruction = reinterpret_cast<const ComputeListBindUniformSetsInstruction *>(instruction);
  1294. print_line("\tBIND UNIFORM SETS COUNT", bind_uniform_sets_instruction->set_count);
  1295. for (uint32_t i = 0; i < bind_uniform_sets_instruction->set_count; i++) {
  1296. print_line("\tBIND UNIFORM SET ID", itos(bind_uniform_sets_instruction->uniform_set_ids()[i].id), "START INDEX", bind_uniform_sets_instruction->first_set_index);
  1297. }
  1298. instruction_data_cursor += sizeof(ComputeListBindUniformSetsInstruction) + sizeof(RDD::UniformSetID) * bind_uniform_sets_instruction->set_count;
  1299. } break;
  1300. case ComputeListInstruction::TYPE_DISPATCH: {
  1301. const ComputeListDispatchInstruction *dispatch_instruction = reinterpret_cast<const ComputeListDispatchInstruction *>(instruction);
  1302. print_line("\tDISPATCH", dispatch_instruction->x_groups, dispatch_instruction->y_groups, dispatch_instruction->z_groups);
  1303. instruction_data_cursor += sizeof(ComputeListDispatchInstruction);
  1304. } break;
  1305. case ComputeListInstruction::TYPE_DISPATCH_INDIRECT: {
  1306. const ComputeListDispatchIndirectInstruction *dispatch_indirect_instruction = reinterpret_cast<const ComputeListDispatchIndirectInstruction *>(instruction);
  1307. print_line("\tDISPATCH INDIRECT BUFFER ID", itos(dispatch_indirect_instruction->buffer.id), "OFFSET", dispatch_indirect_instruction->offset);
  1308. instruction_data_cursor += sizeof(ComputeListDispatchIndirectInstruction);
  1309. } break;
  1310. case ComputeListInstruction::TYPE_SET_PUSH_CONSTANT: {
  1311. const ComputeListSetPushConstantInstruction *set_push_constant_instruction = reinterpret_cast<const ComputeListSetPushConstantInstruction *>(instruction);
  1312. print_line("\tSET PUSH CONSTANT SIZE", set_push_constant_instruction->size);
  1313. instruction_data_cursor += sizeof(ComputeListSetPushConstantInstruction);
  1314. instruction_data_cursor += set_push_constant_instruction->size;
  1315. } break;
  1316. case ComputeListInstruction::TYPE_UNIFORM_SET_PREPARE_FOR_USE: {
  1317. const ComputeListUniformSetPrepareForUseInstruction *uniform_set_prepare_for_use_instruction = reinterpret_cast<const ComputeListUniformSetPrepareForUseInstruction *>(instruction);
  1318. print_line("\tUNIFORM SET PREPARE FOR USE ID", itos(uniform_set_prepare_for_use_instruction->uniform_set.id), "SHADER ID", itos(uniform_set_prepare_for_use_instruction->shader.id), "INDEX", itos(uniform_set_prepare_for_use_instruction->set_index));
  1319. instruction_data_cursor += sizeof(ComputeListUniformSetPrepareForUseInstruction);
  1320. } break;
  1321. default:
  1322. DEV_ASSERT(false && "Unknown compute list instruction type.");
  1323. return;
  1324. }
  1325. }
  1326. }
  1327. void RenderingDeviceGraph::initialize(RDD *p_driver, RenderingContextDriver::Device p_device, RenderPassCreationFunction p_render_pass_creation_function, uint32_t p_frame_count, RDD::CommandQueueFamilyID p_secondary_command_queue_family, uint32_t p_secondary_command_buffers_per_frame) {
  1328. DEV_ASSERT(p_driver != nullptr);
  1329. DEV_ASSERT(p_render_pass_creation_function != nullptr);
  1330. DEV_ASSERT(p_frame_count > 0);
  1331. driver = p_driver;
  1332. device = p_device;
  1333. render_pass_creation_function = p_render_pass_creation_function;
  1334. frames.resize(p_frame_count);
  1335. for (uint32_t i = 0; i < p_frame_count; i++) {
  1336. frames[i].secondary_command_buffers.resize(p_secondary_command_buffers_per_frame);
  1337. for (uint32_t j = 0; j < p_secondary_command_buffers_per_frame; j++) {
  1338. SecondaryCommandBuffer &secondary = frames[i].secondary_command_buffers[j];
  1339. secondary.command_pool = driver->command_pool_create(p_secondary_command_queue_family, RDD::COMMAND_BUFFER_TYPE_SECONDARY);
  1340. secondary.command_buffer = driver->command_buffer_create(secondary.command_pool);
  1341. secondary.task = WorkerThreadPool::INVALID_TASK_ID;
  1342. }
  1343. }
  1344. driver_honors_barriers = driver->api_trait_get(RDD::API_TRAIT_HONORS_PIPELINE_BARRIERS);
  1345. driver_clears_with_copy_engine = driver->api_trait_get(RDD::API_TRAIT_CLEARS_WITH_COPY_ENGINE);
  1346. driver_buffers_require_transitions = driver->api_trait_get(RDD::API_TRAIT_BUFFERS_REQUIRE_TRANSITIONS);
  1347. }
  1348. void RenderingDeviceGraph::finalize() {
  1349. _wait_for_secondary_command_buffer_tasks();
  1350. for (Frame &f : frames) {
  1351. for (SecondaryCommandBuffer &secondary : f.secondary_command_buffers) {
  1352. if (secondary.command_pool.id != 0) {
  1353. driver->command_pool_free(secondary.command_pool);
  1354. }
  1355. }
  1356. }
  1357. frames.clear();
  1358. }
  1359. void RenderingDeviceGraph::begin() {
  1360. command_data.clear();
  1361. command_data_offsets.clear();
  1362. command_normalization_barriers.clear();
  1363. command_transition_barriers.clear();
  1364. command_buffer_barriers.clear();
  1365. command_label_chars.clear();
  1366. command_label_colors.clear();
  1367. command_label_offsets.clear();
  1368. command_list_nodes.clear();
  1369. read_slice_list_nodes.clear();
  1370. write_slice_list_nodes.clear();
  1371. command_count = 0;
  1372. command_label_count = 0;
  1373. command_timestamp_index = -1;
  1374. command_synchronization_index = -1;
  1375. command_synchronization_pending = false;
  1376. command_label_index = -1;
  1377. frames[frame].secondary_command_buffers_used = 0;
  1378. draw_instruction_list.index = 0;
  1379. compute_instruction_list.index = 0;
  1380. tracking_frame++;
  1381. #ifdef DEV_ENABLED
  1382. write_dependency_counters.clear();
  1383. #endif
  1384. }
  1385. void RenderingDeviceGraph::add_buffer_clear(RDD::BufferID p_dst, ResourceTracker *p_dst_tracker, uint32_t p_offset, uint32_t p_size) {
  1386. DEV_ASSERT(p_dst_tracker != nullptr);
  1387. int32_t command_index;
  1388. RecordedBufferClearCommand *command = static_cast<RecordedBufferClearCommand *>(_allocate_command(sizeof(RecordedBufferClearCommand), command_index));
  1389. command->type = RecordedCommand::TYPE_BUFFER_CLEAR;
  1390. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1391. command->buffer = p_dst;
  1392. command->offset = p_offset;
  1393. command->size = p_size;
  1394. ResourceUsage usage = RESOURCE_USAGE_COPY_TO;
  1395. _add_command_to_graph(&p_dst_tracker, &usage, 1, command_index, command);
  1396. }
  1397. void RenderingDeviceGraph::add_buffer_copy(RDD::BufferID p_src, ResourceTracker *p_src_tracker, RDD::BufferID p_dst, ResourceTracker *p_dst_tracker, RDD::BufferCopyRegion p_region) {
  1398. // Source tracker is allowed to be null as it could be a read-only buffer.
  1399. DEV_ASSERT(p_dst_tracker != nullptr);
  1400. int32_t command_index;
  1401. RecordedBufferCopyCommand *command = static_cast<RecordedBufferCopyCommand *>(_allocate_command(sizeof(RecordedBufferCopyCommand), command_index));
  1402. command->type = RecordedCommand::TYPE_BUFFER_COPY;
  1403. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1404. command->source = p_src;
  1405. command->destination = p_dst;
  1406. command->region = p_region;
  1407. ResourceTracker *trackers[2] = { p_dst_tracker, p_src_tracker };
  1408. ResourceUsage usages[2] = { RESOURCE_USAGE_COPY_TO, RESOURCE_USAGE_COPY_FROM };
  1409. _add_command_to_graph(trackers, usages, p_src_tracker != nullptr ? 2 : 1, command_index, command);
  1410. }
  1411. void RenderingDeviceGraph::add_buffer_get_data(RDD::BufferID p_src, ResourceTracker *p_src_tracker, RDD::BufferID p_dst, RDD::BufferCopyRegion p_region) {
  1412. // Source tracker is allowed to be null as it could be a read-only buffer.
  1413. int32_t command_index;
  1414. RecordedBufferGetDataCommand *command = static_cast<RecordedBufferGetDataCommand *>(_allocate_command(sizeof(RecordedBufferGetDataCommand), command_index));
  1415. command->type = RecordedCommand::TYPE_BUFFER_GET_DATA;
  1416. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1417. command->source = p_src;
  1418. command->destination = p_dst;
  1419. command->region = p_region;
  1420. if (p_src_tracker != nullptr) {
  1421. ResourceUsage usage = RESOURCE_USAGE_COPY_FROM;
  1422. _add_command_to_graph(&p_src_tracker, &usage, 1, command_index, command);
  1423. } else {
  1424. _add_command_to_graph(nullptr, nullptr, 0, command_index, command);
  1425. }
  1426. }
  1427. void RenderingDeviceGraph::add_buffer_update(RDD::BufferID p_dst, ResourceTracker *p_dst_tracker, VectorView<RecordedBufferCopy> p_buffer_copies) {
  1428. DEV_ASSERT(p_dst_tracker != nullptr);
  1429. size_t buffer_copies_size = p_buffer_copies.size() * sizeof(RecordedBufferCopy);
  1430. uint64_t command_size = sizeof(RecordedBufferUpdateCommand) + buffer_copies_size;
  1431. int32_t command_index;
  1432. RecordedBufferUpdateCommand *command = static_cast<RecordedBufferUpdateCommand *>(_allocate_command(command_size, command_index));
  1433. command->type = RecordedCommand::TYPE_BUFFER_UPDATE;
  1434. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1435. command->destination = p_dst;
  1436. command->buffer_copies_count = p_buffer_copies.size();
  1437. RecordedBufferCopy *buffer_copies = command->buffer_copies();
  1438. for (uint32_t i = 0; i < command->buffer_copies_count; i++) {
  1439. buffer_copies[i] = p_buffer_copies[i];
  1440. }
  1441. ResourceUsage buffer_usage = RESOURCE_USAGE_COPY_TO;
  1442. _add_command_to_graph(&p_dst_tracker, &buffer_usage, 1, command_index, command);
  1443. }
  1444. void RenderingDeviceGraph::add_compute_list_begin(RDD::BreadcrumbMarker p_phase, uint32_t p_breadcrumb_data) {
  1445. compute_instruction_list.clear();
  1446. #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
  1447. compute_instruction_list.breadcrumb = p_breadcrumb_data | (p_phase & ((1 << 16) - 1));
  1448. #endif
  1449. compute_instruction_list.index++;
  1450. }
  1451. void RenderingDeviceGraph::add_compute_list_bind_pipeline(RDD::PipelineID p_pipeline) {
  1452. ComputeListBindPipelineInstruction *instruction = reinterpret_cast<ComputeListBindPipelineInstruction *>(_allocate_compute_list_instruction(sizeof(ComputeListBindPipelineInstruction)));
  1453. instruction->type = ComputeListInstruction::TYPE_BIND_PIPELINE;
  1454. instruction->pipeline = p_pipeline;
  1455. compute_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_COMPUTE_SHADER_BIT);
  1456. }
  1457. void RenderingDeviceGraph::add_compute_list_bind_uniform_set(RDD::ShaderID p_shader, RDD::UniformSetID p_uniform_set, uint32_t set_index) {
  1458. add_compute_list_bind_uniform_sets(p_shader, VectorView(&p_uniform_set, 1), set_index, 1);
  1459. }
  1460. void RenderingDeviceGraph::add_compute_list_bind_uniform_sets(RDD::ShaderID p_shader, VectorView<RDD::UniformSetID> p_uniform_sets, uint32_t p_first_set_index, uint32_t p_set_count) {
  1461. DEV_ASSERT(p_uniform_sets.size() >= p_set_count);
  1462. uint32_t instruction_size = sizeof(ComputeListBindUniformSetsInstruction) + sizeof(RDD::UniformSetID) * p_set_count;
  1463. ComputeListBindUniformSetsInstruction *instruction = reinterpret_cast<ComputeListBindUniformSetsInstruction *>(_allocate_compute_list_instruction(instruction_size));
  1464. instruction->type = ComputeListInstruction::TYPE_BIND_UNIFORM_SETS;
  1465. instruction->shader = p_shader;
  1466. instruction->first_set_index = p_first_set_index;
  1467. instruction->set_count = p_set_count;
  1468. RDD::UniformSetID *ids = instruction->uniform_set_ids();
  1469. for (uint32_t i = 0; i < p_set_count; i++) {
  1470. ids[i] = p_uniform_sets[i];
  1471. }
  1472. }
  1473. void RenderingDeviceGraph::add_compute_list_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  1474. ComputeListDispatchInstruction *instruction = reinterpret_cast<ComputeListDispatchInstruction *>(_allocate_compute_list_instruction(sizeof(ComputeListDispatchInstruction)));
  1475. instruction->type = ComputeListInstruction::TYPE_DISPATCH;
  1476. instruction->x_groups = p_x_groups;
  1477. instruction->y_groups = p_y_groups;
  1478. instruction->z_groups = p_z_groups;
  1479. }
  1480. void RenderingDeviceGraph::add_compute_list_dispatch_indirect(RDD::BufferID p_buffer, uint32_t p_offset) {
  1481. ComputeListDispatchIndirectInstruction *instruction = reinterpret_cast<ComputeListDispatchIndirectInstruction *>(_allocate_compute_list_instruction(sizeof(ComputeListDispatchIndirectInstruction)));
  1482. instruction->type = ComputeListInstruction::TYPE_DISPATCH_INDIRECT;
  1483. instruction->buffer = p_buffer;
  1484. instruction->offset = p_offset;
  1485. compute_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
  1486. }
  1487. void RenderingDeviceGraph::add_compute_list_set_push_constant(RDD::ShaderID p_shader, const void *p_data, uint32_t p_data_size) {
  1488. uint32_t instruction_size = sizeof(ComputeListSetPushConstantInstruction) + p_data_size;
  1489. ComputeListSetPushConstantInstruction *instruction = reinterpret_cast<ComputeListSetPushConstantInstruction *>(_allocate_compute_list_instruction(instruction_size));
  1490. instruction->type = ComputeListInstruction::TYPE_SET_PUSH_CONSTANT;
  1491. instruction->size = p_data_size;
  1492. instruction->shader = p_shader;
  1493. memcpy(instruction->data(), p_data, p_data_size);
  1494. }
  1495. void RenderingDeviceGraph::add_compute_list_uniform_set_prepare_for_use(RDD::ShaderID p_shader, RDD::UniformSetID p_uniform_set, uint32_t set_index) {
  1496. ComputeListUniformSetPrepareForUseInstruction *instruction = reinterpret_cast<ComputeListUniformSetPrepareForUseInstruction *>(_allocate_compute_list_instruction(sizeof(ComputeListUniformSetPrepareForUseInstruction)));
  1497. instruction->type = ComputeListInstruction::TYPE_UNIFORM_SET_PREPARE_FOR_USE;
  1498. instruction->shader = p_shader;
  1499. instruction->uniform_set = p_uniform_set;
  1500. instruction->set_index = set_index;
  1501. }
  1502. void RenderingDeviceGraph::add_compute_list_usage(ResourceTracker *p_tracker, ResourceUsage p_usage) {
  1503. DEV_ASSERT(p_tracker != nullptr);
  1504. p_tracker->reset_if_outdated(tracking_frame);
  1505. if (p_tracker->compute_list_index != compute_instruction_list.index) {
  1506. compute_instruction_list.command_trackers.push_back(p_tracker);
  1507. compute_instruction_list.command_tracker_usages.push_back(p_usage);
  1508. p_tracker->compute_list_index = compute_instruction_list.index;
  1509. p_tracker->compute_list_usage = p_usage;
  1510. }
  1511. #ifdef DEV_ENABLED
  1512. else if (p_tracker->compute_list_usage != p_usage) {
  1513. ERR_FAIL_MSG(vformat("Tracker can't have more than one type of usage in the same compute list. Compute list usage is %d and the requested usage is %d.", p_tracker->compute_list_usage, p_usage));
  1514. }
  1515. #endif
  1516. }
  1517. void RenderingDeviceGraph::add_compute_list_usages(VectorView<ResourceTracker *> p_trackers, VectorView<ResourceUsage> p_usages) {
  1518. DEV_ASSERT(p_trackers.size() == p_usages.size());
  1519. for (uint32_t i = 0; i < p_trackers.size(); i++) {
  1520. add_compute_list_usage(p_trackers[i], p_usages[i]);
  1521. }
  1522. }
  1523. void RenderingDeviceGraph::add_compute_list_end() {
  1524. int32_t command_index;
  1525. uint32_t instruction_data_size = compute_instruction_list.data.size();
  1526. uint32_t command_size = sizeof(RecordedComputeListCommand) + instruction_data_size;
  1527. RecordedComputeListCommand *command = static_cast<RecordedComputeListCommand *>(_allocate_command(command_size, command_index));
  1528. command->type = RecordedCommand::TYPE_COMPUTE_LIST;
  1529. command->self_stages = compute_instruction_list.stages;
  1530. command->instruction_data_size = instruction_data_size;
  1531. memcpy(command->instruction_data(), compute_instruction_list.data.ptr(), instruction_data_size);
  1532. _add_command_to_graph(compute_instruction_list.command_trackers.ptr(), compute_instruction_list.command_tracker_usages.ptr(), compute_instruction_list.command_trackers.size(), command_index, command);
  1533. }
  1534. void RenderingDeviceGraph::add_draw_list_begin(FramebufferCache *p_framebuffer_cache, Rect2i p_region, VectorView<AttachmentOperation> p_attachment_operations, VectorView<RDD::RenderPassClearValue> p_attachment_clear_values, bool p_uses_color, bool p_uses_depth, uint32_t p_breadcrumb, bool p_split_cmd_buffer) {
  1535. _add_draw_list_begin(p_framebuffer_cache, RDD::RenderPassID(), RDD::FramebufferID(), p_region, p_attachment_operations, p_attachment_clear_values, p_uses_color, p_uses_depth, p_breadcrumb, p_split_cmd_buffer);
  1536. }
  1537. void RenderingDeviceGraph::add_draw_list_begin(RDD::RenderPassID p_render_pass, RDD::FramebufferID p_framebuffer, Rect2i p_region, VectorView<AttachmentOperation> p_attachment_operations, VectorView<RDD::RenderPassClearValue> p_attachment_clear_values, bool p_uses_color, bool p_uses_depth, uint32_t p_breadcrumb, bool p_split_cmd_buffer) {
  1538. _add_draw_list_begin(nullptr, p_render_pass, p_framebuffer, p_region, p_attachment_operations, p_attachment_clear_values, p_uses_color, p_uses_depth, p_breadcrumb, p_split_cmd_buffer);
  1539. }
  1540. void RenderingDeviceGraph::add_draw_list_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint32_t p_offset) {
  1541. DrawListBindIndexBufferInstruction *instruction = reinterpret_cast<DrawListBindIndexBufferInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListBindIndexBufferInstruction)));
  1542. instruction->type = DrawListInstruction::TYPE_BIND_INDEX_BUFFER;
  1543. instruction->buffer = p_buffer;
  1544. instruction->format = p_format;
  1545. instruction->offset = p_offset;
  1546. if (instruction->buffer.id != 0) {
  1547. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT);
  1548. }
  1549. }
  1550. void RenderingDeviceGraph::add_draw_list_bind_pipeline(RDD::PipelineID p_pipeline, BitField<RDD::PipelineStageBits> p_pipeline_stage_bits) {
  1551. DrawListBindPipelineInstruction *instruction = reinterpret_cast<DrawListBindPipelineInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListBindPipelineInstruction)));
  1552. instruction->type = DrawListInstruction::TYPE_BIND_PIPELINE;
  1553. instruction->pipeline = p_pipeline;
  1554. draw_instruction_list.stages = draw_instruction_list.stages | p_pipeline_stage_bits;
  1555. }
  1556. void RenderingDeviceGraph::add_draw_list_bind_uniform_set(RDD::ShaderID p_shader, RDD::UniformSetID p_uniform_set, uint32_t set_index) {
  1557. add_draw_list_bind_uniform_sets(p_shader, VectorView(&p_uniform_set, 1), set_index, 1);
  1558. }
  1559. void RenderingDeviceGraph::add_draw_list_bind_uniform_sets(RDD::ShaderID p_shader, VectorView<RDD::UniformSetID> p_uniform_sets, uint32_t p_first_index, uint32_t p_set_count) {
  1560. DEV_ASSERT(p_uniform_sets.size() >= p_set_count);
  1561. uint32_t instruction_size = sizeof(DrawListBindUniformSetsInstruction) + sizeof(RDD::UniformSetID) * p_set_count;
  1562. DrawListBindUniformSetsInstruction *instruction = reinterpret_cast<DrawListBindUniformSetsInstruction *>(_allocate_draw_list_instruction(instruction_size));
  1563. instruction->type = DrawListInstruction::TYPE_BIND_UNIFORM_SETS;
  1564. instruction->shader = p_shader;
  1565. instruction->first_set_index = p_first_index;
  1566. instruction->set_count = p_set_count;
  1567. for (uint32_t i = 0; i < p_set_count; i++) {
  1568. instruction->uniform_set_ids()[i] = p_uniform_sets[i];
  1569. }
  1570. }
  1571. void RenderingDeviceGraph::add_draw_list_bind_vertex_buffers(VectorView<RDD::BufferID> p_vertex_buffers, VectorView<uint64_t> p_vertex_buffer_offsets) {
  1572. DEV_ASSERT(p_vertex_buffers.size() == p_vertex_buffer_offsets.size());
  1573. uint32_t instruction_size = sizeof(DrawListBindVertexBuffersInstruction) + sizeof(RDD::BufferID) * p_vertex_buffers.size() + sizeof(uint64_t) * p_vertex_buffer_offsets.size();
  1574. DrawListBindVertexBuffersInstruction *instruction = reinterpret_cast<DrawListBindVertexBuffersInstruction *>(_allocate_draw_list_instruction(instruction_size));
  1575. instruction->type = DrawListInstruction::TYPE_BIND_VERTEX_BUFFERS;
  1576. instruction->vertex_buffers_count = p_vertex_buffers.size();
  1577. RDD::BufferID *vertex_buffers = instruction->vertex_buffers();
  1578. uint64_t *vertex_buffer_offsets = instruction->vertex_buffer_offsets();
  1579. for (uint32_t i = 0; i < instruction->vertex_buffers_count; i++) {
  1580. vertex_buffers[i] = p_vertex_buffers[i];
  1581. vertex_buffer_offsets[i] = p_vertex_buffer_offsets[i];
  1582. }
  1583. if (instruction->vertex_buffers_count > 0) {
  1584. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_VERTEX_INPUT_BIT);
  1585. }
  1586. }
  1587. void RenderingDeviceGraph::add_draw_list_clear_attachments(VectorView<RDD::AttachmentClear> p_attachments_clear, VectorView<Rect2i> p_attachments_clear_rect) {
  1588. uint32_t instruction_size = sizeof(DrawListClearAttachmentsInstruction) + sizeof(RDD::AttachmentClear) * p_attachments_clear.size() + sizeof(Rect2i) * p_attachments_clear_rect.size();
  1589. DrawListClearAttachmentsInstruction *instruction = reinterpret_cast<DrawListClearAttachmentsInstruction *>(_allocate_draw_list_instruction(instruction_size));
  1590. instruction->type = DrawListInstruction::TYPE_CLEAR_ATTACHMENTS;
  1591. instruction->attachments_clear_count = p_attachments_clear.size();
  1592. instruction->attachments_clear_rect_count = p_attachments_clear_rect.size();
  1593. RDD::AttachmentClear *attachments_clear = instruction->attachments_clear();
  1594. Rect2i *attachments_clear_rect = instruction->attachments_clear_rect();
  1595. for (uint32_t i = 0; i < instruction->attachments_clear_count; i++) {
  1596. attachments_clear[i] = p_attachments_clear[i];
  1597. }
  1598. for (uint32_t i = 0; i < instruction->attachments_clear_rect_count; i++) {
  1599. attachments_clear_rect[i] = p_attachments_clear_rect[i];
  1600. }
  1601. }
  1602. void RenderingDeviceGraph::add_draw_list_draw(uint32_t p_vertex_count, uint32_t p_instance_count) {
  1603. DrawListDrawInstruction *instruction = reinterpret_cast<DrawListDrawInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListDrawInstruction)));
  1604. instruction->type = DrawListInstruction::TYPE_DRAW;
  1605. instruction->vertex_count = p_vertex_count;
  1606. instruction->instance_count = p_instance_count;
  1607. }
  1608. void RenderingDeviceGraph::add_draw_list_draw_indexed(uint32_t p_index_count, uint32_t p_instance_count, uint32_t p_first_index) {
  1609. DrawListDrawIndexedInstruction *instruction = reinterpret_cast<DrawListDrawIndexedInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListDrawIndexedInstruction)));
  1610. instruction->type = DrawListInstruction::TYPE_DRAW_INDEXED;
  1611. instruction->index_count = p_index_count;
  1612. instruction->instance_count = p_instance_count;
  1613. instruction->first_index = p_first_index;
  1614. }
  1615. void RenderingDeviceGraph::add_draw_list_draw_indirect(RDD::BufferID p_buffer, uint32_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  1616. DrawListDrawIndirectInstruction *instruction = reinterpret_cast<DrawListDrawIndirectInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListDrawIndirectInstruction)));
  1617. instruction->type = DrawListInstruction::TYPE_DRAW_INDIRECT;
  1618. instruction->buffer = p_buffer;
  1619. instruction->offset = p_offset;
  1620. instruction->draw_count = p_draw_count;
  1621. instruction->stride = p_stride;
  1622. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
  1623. }
  1624. void RenderingDeviceGraph::add_draw_list_draw_indexed_indirect(RDD::BufferID p_buffer, uint32_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  1625. DrawListDrawIndexedIndirectInstruction *instruction = reinterpret_cast<DrawListDrawIndexedIndirectInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListDrawIndexedIndirectInstruction)));
  1626. instruction->type = DrawListInstruction::TYPE_DRAW_INDEXED_INDIRECT;
  1627. instruction->buffer = p_buffer;
  1628. instruction->offset = p_offset;
  1629. instruction->draw_count = p_draw_count;
  1630. instruction->stride = p_stride;
  1631. draw_instruction_list.stages.set_flag(RDD::PIPELINE_STAGE_DRAW_INDIRECT_BIT);
  1632. }
  1633. void RenderingDeviceGraph::add_draw_list_execute_commands(RDD::CommandBufferID p_command_buffer) {
  1634. DrawListExecuteCommandsInstruction *instruction = reinterpret_cast<DrawListExecuteCommandsInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListExecuteCommandsInstruction)));
  1635. instruction->type = DrawListInstruction::TYPE_EXECUTE_COMMANDS;
  1636. instruction->command_buffer = p_command_buffer;
  1637. }
  1638. void RenderingDeviceGraph::add_draw_list_next_subpass(RDD::CommandBufferType p_command_buffer_type) {
  1639. DrawListNextSubpassInstruction *instruction = reinterpret_cast<DrawListNextSubpassInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListNextSubpassInstruction)));
  1640. instruction->type = DrawListInstruction::TYPE_NEXT_SUBPASS;
  1641. instruction->command_buffer_type = p_command_buffer_type;
  1642. }
  1643. void RenderingDeviceGraph::add_draw_list_set_blend_constants(const Color &p_color) {
  1644. DrawListSetBlendConstantsInstruction *instruction = reinterpret_cast<DrawListSetBlendConstantsInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListSetBlendConstantsInstruction)));
  1645. instruction->type = DrawListInstruction::TYPE_SET_BLEND_CONSTANTS;
  1646. instruction->color = p_color;
  1647. }
  1648. void RenderingDeviceGraph::add_draw_list_set_line_width(float p_width) {
  1649. DrawListSetLineWidthInstruction *instruction = reinterpret_cast<DrawListSetLineWidthInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListSetLineWidthInstruction)));
  1650. instruction->type = DrawListInstruction::TYPE_SET_LINE_WIDTH;
  1651. instruction->width = p_width;
  1652. }
  1653. void RenderingDeviceGraph::add_draw_list_set_push_constant(RDD::ShaderID p_shader, const void *p_data, uint32_t p_data_size) {
  1654. uint32_t instruction_size = sizeof(DrawListSetPushConstantInstruction) + p_data_size;
  1655. DrawListSetPushConstantInstruction *instruction = reinterpret_cast<DrawListSetPushConstantInstruction *>(_allocate_draw_list_instruction(instruction_size));
  1656. instruction->type = DrawListInstruction::TYPE_SET_PUSH_CONSTANT;
  1657. instruction->size = p_data_size;
  1658. instruction->shader = p_shader;
  1659. memcpy(instruction->data(), p_data, p_data_size);
  1660. }
  1661. void RenderingDeviceGraph::add_draw_list_set_scissor(Rect2i p_rect) {
  1662. DrawListSetScissorInstruction *instruction = reinterpret_cast<DrawListSetScissorInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListSetScissorInstruction)));
  1663. instruction->type = DrawListInstruction::TYPE_SET_SCISSOR;
  1664. instruction->rect = p_rect;
  1665. }
  1666. void RenderingDeviceGraph::add_draw_list_set_viewport(Rect2i p_rect) {
  1667. DrawListSetViewportInstruction *instruction = reinterpret_cast<DrawListSetViewportInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListSetViewportInstruction)));
  1668. instruction->type = DrawListInstruction::TYPE_SET_VIEWPORT;
  1669. instruction->rect = p_rect;
  1670. }
  1671. void RenderingDeviceGraph::add_draw_list_uniform_set_prepare_for_use(RDD::ShaderID p_shader, RDD::UniformSetID p_uniform_set, uint32_t set_index) {
  1672. DrawListUniformSetPrepareForUseInstruction *instruction = reinterpret_cast<DrawListUniformSetPrepareForUseInstruction *>(_allocate_draw_list_instruction(sizeof(DrawListUniformSetPrepareForUseInstruction)));
  1673. instruction->type = DrawListInstruction::TYPE_UNIFORM_SET_PREPARE_FOR_USE;
  1674. instruction->shader = p_shader;
  1675. instruction->uniform_set = p_uniform_set;
  1676. instruction->set_index = set_index;
  1677. }
  1678. void RenderingDeviceGraph::add_draw_list_usage(ResourceTracker *p_tracker, ResourceUsage p_usage) {
  1679. p_tracker->reset_if_outdated(tracking_frame);
  1680. if (p_tracker->draw_list_index != draw_instruction_list.index) {
  1681. draw_instruction_list.command_trackers.push_back(p_tracker);
  1682. draw_instruction_list.command_tracker_usages.push_back(p_usage);
  1683. p_tracker->draw_list_index = draw_instruction_list.index;
  1684. p_tracker->draw_list_usage = p_usage;
  1685. }
  1686. #ifdef DEV_ENABLED
  1687. else if (p_tracker->draw_list_usage != p_usage) {
  1688. ERR_FAIL_MSG(vformat("Tracker can't have more than one type of usage in the same draw list. Draw list usage is %d and the requested usage is %d.", p_tracker->draw_list_usage, p_usage));
  1689. }
  1690. #endif
  1691. }
  1692. void RenderingDeviceGraph::add_draw_list_usages(VectorView<ResourceTracker *> p_trackers, VectorView<ResourceUsage> p_usages) {
  1693. DEV_ASSERT(p_trackers.size() == p_usages.size());
  1694. for (uint32_t i = 0; i < p_trackers.size(); i++) {
  1695. add_draw_list_usage(p_trackers[i], p_usages[i]);
  1696. }
  1697. }
  1698. void RenderingDeviceGraph::add_draw_list_end() {
  1699. FramebufferCache *framebuffer_cache = draw_instruction_list.framebuffer_cache;
  1700. int32_t command_index;
  1701. uint32_t clear_values_size = sizeof(RDD::RenderPassClearValue) * draw_instruction_list.attachment_clear_values.size();
  1702. uint32_t trackers_count = framebuffer_cache != nullptr ? framebuffer_cache->trackers.size() : 0;
  1703. uint32_t trackers_and_ops_size = (sizeof(ResourceTracker *) + sizeof(RDD::AttachmentLoadOp) + sizeof(RDD::AttachmentStoreOp)) * trackers_count;
  1704. uint32_t instruction_data_size = draw_instruction_list.data.size();
  1705. uint32_t command_size = sizeof(RecordedDrawListCommand) + clear_values_size + trackers_and_ops_size + instruction_data_size;
  1706. RecordedDrawListCommand *command = static_cast<RecordedDrawListCommand *>(_allocate_command(command_size, command_index));
  1707. command->type = RecordedCommand::TYPE_DRAW_LIST;
  1708. command->self_stages = draw_instruction_list.stages;
  1709. command->framebuffer_cache = framebuffer_cache;
  1710. command->render_pass = draw_instruction_list.render_pass;
  1711. command->framebuffer = draw_instruction_list.framebuffer;
  1712. command->instruction_data_size = instruction_data_size;
  1713. command->command_buffer_type = RDD::COMMAND_BUFFER_TYPE_PRIMARY;
  1714. command->region = draw_instruction_list.region;
  1715. #if defined(DEBUG_ENABLED) || defined(DEV_ENABLED)
  1716. command->breadcrumb = draw_instruction_list.breadcrumb;
  1717. #endif
  1718. command->split_cmd_buffer = draw_instruction_list.split_cmd_buffer;
  1719. command->clear_values_count = draw_instruction_list.attachment_clear_values.size();
  1720. command->trackers_count = trackers_count;
  1721. // Initialize the load and store operations to their default behaviors. The store behavior will be modified if a command depends on the result of this render pass.
  1722. uint32_t attachment_op_count = draw_instruction_list.attachment_operations.size();
  1723. ResourceTracker **trackers = command->trackers();
  1724. RDD::AttachmentLoadOp *load_ops = command->load_ops();
  1725. RDD::AttachmentStoreOp *store_ops = command->store_ops();
  1726. for (uint32_t i = 0; i < command->trackers_count; i++) {
  1727. ResourceTracker *resource_tracker = framebuffer_cache->trackers[i];
  1728. if (resource_tracker != nullptr) {
  1729. if (i < command->clear_values_count && i < attachment_op_count && draw_instruction_list.attachment_operations[i] == ATTACHMENT_OPERATION_CLEAR) {
  1730. load_ops[i] = RDD::ATTACHMENT_LOAD_OP_CLEAR;
  1731. } else if (i < attachment_op_count && draw_instruction_list.attachment_operations[i] == ATTACHMENT_OPERATION_IGNORE) {
  1732. load_ops[i] = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  1733. } else if (resource_tracker->is_discardable) {
  1734. bool resource_has_parent = resource_tracker->parent != nullptr;
  1735. ResourceTracker *search_tracker = resource_has_parent ? resource_tracker->parent : resource_tracker;
  1736. search_tracker->reset_if_outdated(tracking_frame);
  1737. bool resource_was_modified_this_frame = search_tracker->write_command_or_list_index >= 0;
  1738. load_ops[i] = resource_was_modified_this_frame ? RDD::ATTACHMENT_LOAD_OP_LOAD : RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  1739. } else {
  1740. load_ops[i] = RDD::ATTACHMENT_LOAD_OP_LOAD;
  1741. }
  1742. store_ops[i] = resource_tracker->is_discardable ? RDD::ATTACHMENT_STORE_OP_DONT_CARE : RDD::ATTACHMENT_STORE_OP_STORE;
  1743. } else {
  1744. load_ops[i] = RDD::ATTACHMENT_LOAD_OP_DONT_CARE;
  1745. store_ops[i] = RDD::ATTACHMENT_STORE_OP_DONT_CARE;
  1746. }
  1747. trackers[i] = resource_tracker;
  1748. }
  1749. RDD::RenderPassClearValue *clear_values = command->clear_values();
  1750. for (uint32_t i = 0; i < command->clear_values_count; i++) {
  1751. clear_values[i] = draw_instruction_list.attachment_clear_values[i];
  1752. }
  1753. memcpy(command->instruction_data(), draw_instruction_list.data.ptr(), instruction_data_size);
  1754. _add_command_to_graph(draw_instruction_list.command_trackers.ptr(), draw_instruction_list.command_tracker_usages.ptr(), draw_instruction_list.command_trackers.size(), command_index, command);
  1755. }
  1756. void RenderingDeviceGraph::add_texture_clear(RDD::TextureID p_dst, ResourceTracker *p_dst_tracker, const Color &p_color, const RDD::TextureSubresourceRange &p_range) {
  1757. DEV_ASSERT(p_dst_tracker != nullptr);
  1758. int32_t command_index;
  1759. RecordedTextureClearCommand *command = static_cast<RecordedTextureClearCommand *>(_allocate_command(sizeof(RecordedTextureClearCommand), command_index));
  1760. command->type = RecordedCommand::TYPE_TEXTURE_CLEAR;
  1761. command->texture = p_dst;
  1762. command->color = p_color;
  1763. command->range = p_range;
  1764. ResourceUsage usage;
  1765. if (driver_clears_with_copy_engine) {
  1766. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1767. usage = RESOURCE_USAGE_COPY_TO;
  1768. } else {
  1769. // If the driver is uncapable of using the copy engine for clearing the image (e.g. D3D12), we must either transition the
  1770. // resource to a render target or a storage image as that's the only two ways it can perform the operation.
  1771. if (p_dst_tracker->texture_usage & RDD::TEXTURE_USAGE_COLOR_ATTACHMENT_BIT) {
  1772. command->self_stages = RDD::PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
  1773. usage = RESOURCE_USAGE_ATTACHMENT_COLOR_READ_WRITE;
  1774. } else {
  1775. command->self_stages = RDD::PIPELINE_STAGE_CLEAR_STORAGE_BIT;
  1776. usage = RESOURCE_USAGE_STORAGE_IMAGE_READ_WRITE;
  1777. }
  1778. }
  1779. _add_command_to_graph(&p_dst_tracker, &usage, 1, command_index, command);
  1780. }
  1781. void RenderingDeviceGraph::add_texture_copy(RDD::TextureID p_src, ResourceTracker *p_src_tracker, RDD::TextureID p_dst, ResourceTracker *p_dst_tracker, VectorView<RDD::TextureCopyRegion> p_texture_copy_regions) {
  1782. DEV_ASSERT(p_src_tracker != nullptr);
  1783. DEV_ASSERT(p_dst_tracker != nullptr);
  1784. int32_t command_index;
  1785. uint64_t command_size = sizeof(RecordedTextureCopyCommand) + p_texture_copy_regions.size() * sizeof(RDD::TextureCopyRegion);
  1786. RecordedTextureCopyCommand *command = static_cast<RecordedTextureCopyCommand *>(_allocate_command(command_size, command_index));
  1787. command->type = RecordedCommand::TYPE_TEXTURE_COPY;
  1788. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1789. command->from_texture = p_src;
  1790. command->to_texture = p_dst;
  1791. command->texture_copy_regions_count = p_texture_copy_regions.size();
  1792. RDD::TextureCopyRegion *texture_copy_regions = command->texture_copy_regions();
  1793. for (uint32_t i = 0; i < command->texture_copy_regions_count; i++) {
  1794. texture_copy_regions[i] = p_texture_copy_regions[i];
  1795. }
  1796. ResourceTracker *trackers[2] = { p_dst_tracker, p_src_tracker };
  1797. ResourceUsage usages[2] = { RESOURCE_USAGE_COPY_TO, RESOURCE_USAGE_COPY_FROM };
  1798. _add_command_to_graph(trackers, usages, 2, command_index, command);
  1799. }
  1800. void RenderingDeviceGraph::add_texture_get_data(RDD::TextureID p_src, ResourceTracker *p_src_tracker, RDD::BufferID p_dst, VectorView<RDD::BufferTextureCopyRegion> p_buffer_texture_copy_regions, ResourceTracker *p_dst_tracker) {
  1801. DEV_ASSERT(p_src_tracker != nullptr);
  1802. int32_t command_index;
  1803. uint64_t command_size = sizeof(RecordedTextureGetDataCommand) + p_buffer_texture_copy_regions.size() * sizeof(RDD::BufferTextureCopyRegion);
  1804. RecordedTextureGetDataCommand *command = static_cast<RecordedTextureGetDataCommand *>(_allocate_command(command_size, command_index));
  1805. command->type = RecordedCommand::TYPE_TEXTURE_GET_DATA;
  1806. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1807. command->from_texture = p_src;
  1808. command->to_buffer = p_dst;
  1809. command->buffer_texture_copy_regions_count = p_buffer_texture_copy_regions.size();
  1810. RDD::BufferTextureCopyRegion *buffer_texture_copy_regions = command->buffer_texture_copy_regions();
  1811. for (uint32_t i = 0; i < command->buffer_texture_copy_regions_count; i++) {
  1812. buffer_texture_copy_regions[i] = p_buffer_texture_copy_regions[i];
  1813. }
  1814. if (p_dst_tracker != nullptr) {
  1815. // Add the optional destination tracker if it was provided.
  1816. ResourceTracker *trackers[2] = { p_dst_tracker, p_src_tracker };
  1817. ResourceUsage usages[2] = { RESOURCE_USAGE_COPY_TO, RESOURCE_USAGE_COPY_FROM };
  1818. _add_command_to_graph(trackers, usages, 2, command_index, command);
  1819. } else {
  1820. ResourceUsage usage = RESOURCE_USAGE_COPY_FROM;
  1821. _add_command_to_graph(&p_src_tracker, &usage, 1, command_index, command);
  1822. }
  1823. }
  1824. void RenderingDeviceGraph::add_texture_resolve(RDD::TextureID p_src, ResourceTracker *p_src_tracker, RDD::TextureID p_dst, ResourceTracker *p_dst_tracker, uint32_t p_src_layer, uint32_t p_src_mipmap, uint32_t p_dst_layer, uint32_t p_dst_mipmap) {
  1825. DEV_ASSERT(p_src_tracker != nullptr);
  1826. DEV_ASSERT(p_dst_tracker != nullptr);
  1827. int32_t command_index;
  1828. RecordedTextureResolveCommand *command = static_cast<RecordedTextureResolveCommand *>(_allocate_command(sizeof(RecordedTextureResolveCommand), command_index));
  1829. command->type = RecordedCommand::TYPE_TEXTURE_RESOLVE;
  1830. command->self_stages = RDD::PIPELINE_STAGE_RESOLVE_BIT;
  1831. command->from_texture = p_src;
  1832. command->to_texture = p_dst;
  1833. command->src_layer = p_src_layer;
  1834. command->src_mipmap = p_src_mipmap;
  1835. command->dst_layer = p_dst_layer;
  1836. command->dst_mipmap = p_dst_mipmap;
  1837. ResourceTracker *trackers[2] = { p_dst_tracker, p_src_tracker };
  1838. ResourceUsage usages[2] = { RESOURCE_USAGE_RESOLVE_TO, RESOURCE_USAGE_RESOLVE_FROM };
  1839. _add_command_to_graph(trackers, usages, 2, command_index, command);
  1840. }
  1841. void RenderingDeviceGraph::add_texture_update(RDD::TextureID p_dst, ResourceTracker *p_dst_tracker, VectorView<RecordedBufferToTextureCopy> p_buffer_copies, VectorView<ResourceTracker *> p_buffer_trackers) {
  1842. DEV_ASSERT(p_dst_tracker != nullptr);
  1843. int32_t command_index;
  1844. uint64_t command_size = sizeof(RecordedTextureUpdateCommand) + p_buffer_copies.size() * sizeof(RecordedBufferToTextureCopy);
  1845. RecordedTextureUpdateCommand *command = static_cast<RecordedTextureUpdateCommand *>(_allocate_command(command_size, command_index));
  1846. command->type = RecordedCommand::TYPE_TEXTURE_UPDATE;
  1847. command->self_stages = RDD::PIPELINE_STAGE_COPY_BIT;
  1848. command->to_texture = p_dst;
  1849. command->buffer_to_texture_copies_count = p_buffer_copies.size();
  1850. RecordedBufferToTextureCopy *buffer_to_texture_copies = command->buffer_to_texture_copies();
  1851. for (uint32_t i = 0; i < command->buffer_to_texture_copies_count; i++) {
  1852. buffer_to_texture_copies[i] = p_buffer_copies[i];
  1853. }
  1854. if (p_buffer_trackers.size() > 0) {
  1855. // Add the optional buffer trackers if they were provided.
  1856. thread_local LocalVector<ResourceTracker *> trackers;
  1857. thread_local LocalVector<ResourceUsage> usages;
  1858. trackers.clear();
  1859. usages.clear();
  1860. for (uint32_t i = 0; i < p_buffer_trackers.size(); i++) {
  1861. trackers.push_back(p_buffer_trackers[i]);
  1862. usages.push_back(RESOURCE_USAGE_COPY_FROM);
  1863. }
  1864. trackers.push_back(p_dst_tracker);
  1865. usages.push_back(RESOURCE_USAGE_COPY_TO);
  1866. _add_command_to_graph(trackers.ptr(), usages.ptr(), trackers.size(), command_index, command);
  1867. } else {
  1868. ResourceUsage usage = RESOURCE_USAGE_COPY_TO;
  1869. _add_command_to_graph(&p_dst_tracker, &usage, 1, command_index, command);
  1870. }
  1871. }
  1872. void RenderingDeviceGraph::add_capture_timestamp(RDD::QueryPoolID p_query_pool, uint32_t p_index) {
  1873. int32_t command_index;
  1874. RecordedCaptureTimestampCommand *command = static_cast<RecordedCaptureTimestampCommand *>(_allocate_command(sizeof(RecordedCaptureTimestampCommand), command_index));
  1875. command->type = RecordedCommand::TYPE_CAPTURE_TIMESTAMP;
  1876. command->self_stages = 0;
  1877. command->pool = p_query_pool;
  1878. command->index = p_index;
  1879. _add_command_to_graph(nullptr, nullptr, 0, command_index, command);
  1880. }
  1881. void RenderingDeviceGraph::add_synchronization() {
  1882. // Synchronization is only acknowledged if commands have been recorded on the graph already.
  1883. if (command_count > 0) {
  1884. command_synchronization_pending = true;
  1885. }
  1886. }
  1887. void RenderingDeviceGraph::begin_label(const String &p_label_name, const Color &p_color) {
  1888. uint32_t command_label_offset = command_label_chars.size();
  1889. PackedByteArray command_label_utf8 = p_label_name.to_utf8_buffer();
  1890. int command_label_utf8_size = command_label_utf8.size();
  1891. command_label_chars.resize(command_label_offset + command_label_utf8_size + 1);
  1892. memcpy(&command_label_chars[command_label_offset], command_label_utf8.ptr(), command_label_utf8.size());
  1893. command_label_chars[command_label_offset + command_label_utf8_size] = '\0';
  1894. command_label_colors.push_back(p_color);
  1895. command_label_offsets.push_back(command_label_offset);
  1896. command_label_index = command_label_count;
  1897. command_label_count++;
  1898. }
  1899. void RenderingDeviceGraph::end_label() {
  1900. command_label_index = -1;
  1901. }
  1902. void RenderingDeviceGraph::end(bool p_reorder_commands, bool p_full_barriers, RDD::CommandBufferID &r_command_buffer, CommandBufferPool &r_command_buffer_pool) {
  1903. if (command_count == 0) {
  1904. // No commands have been logged, do nothing.
  1905. return;
  1906. }
  1907. thread_local LocalVector<RecordedCommandSort> commands_sorted;
  1908. if (p_reorder_commands) {
  1909. thread_local LocalVector<int64_t> command_stack;
  1910. thread_local LocalVector<int32_t> sorted_command_indices;
  1911. thread_local LocalVector<uint32_t> command_degrees;
  1912. int32_t adjacency_list_index = 0;
  1913. int32_t command_index;
  1914. // Count all the incoming connections to every node by traversing their adjacency list.
  1915. command_degrees.resize(command_count);
  1916. memset(command_degrees.ptr(), 0, sizeof(uint32_t) * command_degrees.size());
  1917. for (uint32_t i = 0; i < command_count; i++) {
  1918. const RecordedCommand &recorded_command = *reinterpret_cast<const RecordedCommand *>(&command_data[command_data_offsets[i]]);
  1919. adjacency_list_index = recorded_command.adjacent_command_list_index;
  1920. while (adjacency_list_index >= 0) {
  1921. const RecordedCommandListNode &command_list_node = command_list_nodes[adjacency_list_index];
  1922. DEV_ASSERT((command_list_node.command_index != int32_t(i)) && "Command can't have itself as a dependency.");
  1923. command_degrees[command_list_node.command_index] += 1;
  1924. adjacency_list_index = command_list_node.next_list_index;
  1925. }
  1926. }
  1927. // Push to the stack all nodes that have no incoming connections.
  1928. command_stack.clear();
  1929. for (uint32_t i = 0; i < command_count; i++) {
  1930. if (command_degrees[i] == 0) {
  1931. command_stack.push_back(i);
  1932. }
  1933. }
  1934. sorted_command_indices.clear();
  1935. while (!command_stack.is_empty()) {
  1936. // Pop command from the stack.
  1937. command_index = command_stack[command_stack.size() - 1];
  1938. command_stack.resize(command_stack.size() - 1);
  1939. // Add it to the sorted commands.
  1940. sorted_command_indices.push_back(command_index);
  1941. // Search for its adjacents and lower their degree for every visit. If the degree reaches zero, we push the command to the stack.
  1942. const uint32_t command_data_offset = command_data_offsets[command_index];
  1943. const RecordedCommand &recorded_command = *reinterpret_cast<const RecordedCommand *>(&command_data[command_data_offset]);
  1944. adjacency_list_index = recorded_command.adjacent_command_list_index;
  1945. while (adjacency_list_index >= 0) {
  1946. const RecordedCommandListNode &command_list_node = command_list_nodes[adjacency_list_index];
  1947. uint32_t &command_degree = command_degrees[command_list_node.command_index];
  1948. DEV_ASSERT(command_degree > 0);
  1949. command_degree--;
  1950. if (command_degree == 0) {
  1951. command_stack.push_back(command_list_node.command_index);
  1952. }
  1953. adjacency_list_index = command_list_node.next_list_index;
  1954. }
  1955. }
  1956. // Batch buffer, texture, draw lists and compute operations together.
  1957. const uint32_t PriorityTable[RecordedCommand::TYPE_MAX] = {
  1958. 0, // TYPE_NONE
  1959. 1, // TYPE_BUFFER_CLEAR
  1960. 1, // TYPE_BUFFER_COPY
  1961. 1, // TYPE_BUFFER_GET_DATA
  1962. 1, // TYPE_BUFFER_UPDATE
  1963. 4, // TYPE_COMPUTE_LIST
  1964. 3, // TYPE_DRAW_LIST
  1965. 2, // TYPE_TEXTURE_CLEAR
  1966. 2, // TYPE_TEXTURE_COPY
  1967. 2, // TYPE_TEXTURE_GET_DATA
  1968. 2, // TYPE_TEXTURE_RESOLVE
  1969. 2, // TYPE_TEXTURE_UPDATE
  1970. 2, // TYPE_INSERT_BREADCRUMB
  1971. };
  1972. commands_sorted.clear();
  1973. commands_sorted.resize(command_count);
  1974. for (uint32_t i = 0; i < command_count; i++) {
  1975. const int32_t sorted_command_index = sorted_command_indices[i];
  1976. const uint32_t command_data_offset = command_data_offsets[sorted_command_index];
  1977. const RecordedCommand recorded_command = *reinterpret_cast<const RecordedCommand *>(&command_data[command_data_offset]);
  1978. const uint32_t next_command_level = commands_sorted[sorted_command_index].level + 1;
  1979. adjacency_list_index = recorded_command.adjacent_command_list_index;
  1980. while (adjacency_list_index >= 0) {
  1981. const RecordedCommandListNode &command_list_node = command_list_nodes[adjacency_list_index];
  1982. uint32_t &adjacent_command_level = commands_sorted[command_list_node.command_index].level;
  1983. if (adjacent_command_level < next_command_level) {
  1984. adjacent_command_level = next_command_level;
  1985. }
  1986. adjacency_list_index = command_list_node.next_list_index;
  1987. }
  1988. commands_sorted[sorted_command_index].index = sorted_command_index;
  1989. commands_sorted[sorted_command_index].priority = PriorityTable[recorded_command.type];
  1990. }
  1991. } else {
  1992. commands_sorted.clear();
  1993. commands_sorted.resize(command_count);
  1994. for (uint32_t i = 0; i < command_count; i++) {
  1995. commands_sorted[i].index = i;
  1996. }
  1997. }
  1998. _wait_for_secondary_command_buffer_tasks();
  1999. if (command_count > 0) {
  2000. int32_t current_label_index = -1;
  2001. int32_t current_label_level = -1;
  2002. _run_label_command_change(r_command_buffer, -1, -1, true, true, nullptr, 0, current_label_index, current_label_level);
  2003. if (device.workarounds.avoid_compute_after_draw) {
  2004. // Reset the state of the workaround.
  2005. workarounds_state.draw_list_found = false;
  2006. }
  2007. if (p_reorder_commands) {
  2008. #if PRINT_RENDER_GRAPH
  2009. print_line("BEFORE SORT");
  2010. _print_render_commands(commands_sorted.ptr(), command_count);
  2011. #endif
  2012. commands_sorted.sort();
  2013. #if PRINT_RENDER_GRAPH
  2014. print_line("AFTER SORT");
  2015. _print_render_commands(commands_sorted.ptr(), command_count);
  2016. #endif
  2017. #if PRINT_COMMAND_RECORDING
  2018. print_line(vformat("Recording %d commands", command_count));
  2019. #endif
  2020. uint32_t boosted_priority = 0;
  2021. uint32_t current_level = commands_sorted[0].level;
  2022. uint32_t current_level_start = 0;
  2023. for (uint32_t i = 0; i < command_count; i++) {
  2024. if (current_level != commands_sorted[i].level) {
  2025. RecordedCommandSort *level_command_ptr = &commands_sorted[current_level_start];
  2026. uint32_t level_command_count = i - current_level_start;
  2027. _boost_priority_for_render_commands(level_command_ptr, level_command_count, boosted_priority);
  2028. _group_barriers_for_render_commands(r_command_buffer, level_command_ptr, level_command_count, p_full_barriers);
  2029. _run_render_commands(current_level, level_command_ptr, level_command_count, r_command_buffer, r_command_buffer_pool, current_label_index, current_label_level);
  2030. current_level = commands_sorted[i].level;
  2031. current_level_start = i;
  2032. }
  2033. }
  2034. RecordedCommandSort *level_command_ptr = &commands_sorted[current_level_start];
  2035. uint32_t level_command_count = command_count - current_level_start;
  2036. _boost_priority_for_render_commands(level_command_ptr, level_command_count, boosted_priority);
  2037. _group_barriers_for_render_commands(r_command_buffer, level_command_ptr, level_command_count, p_full_barriers);
  2038. _run_render_commands(current_level, level_command_ptr, level_command_count, r_command_buffer, r_command_buffer_pool, current_label_index, current_label_level);
  2039. #if PRINT_RENDER_GRAPH
  2040. print_line("COMMANDS", command_count, "LEVELS", current_level + 1);
  2041. #endif
  2042. } else {
  2043. for (uint32_t i = 0; i < command_count; i++) {
  2044. _group_barriers_for_render_commands(r_command_buffer, &commands_sorted[i], 1, p_full_barriers);
  2045. _run_render_commands(i, &commands_sorted[i], 1, r_command_buffer, r_command_buffer_pool, current_label_index, current_label_level);
  2046. }
  2047. }
  2048. _run_label_command_change(r_command_buffer, -1, -1, false, false, nullptr, 0, current_label_index, current_label_level);
  2049. #if PRINT_COMMAND_RECORDING
  2050. print_line(vformat("Recorded %d commands", command_count));
  2051. #endif
  2052. }
  2053. // Advance the frame counter. It's not necessary to do this if no commands are recorded because that means no secondary command buffers were used.
  2054. frame = (frame + 1) % frames.size();
  2055. }
  2056. #if PRINT_RESOURCE_TRACKER_TOTAL
  2057. static uint32_t resource_tracker_total = 0;
  2058. #endif
  2059. RenderingDeviceGraph::ResourceTracker *RenderingDeviceGraph::resource_tracker_create() {
  2060. #if PRINT_RESOURCE_TRACKER_TOTAL
  2061. print_line("Resource trackers:", ++resource_tracker_total);
  2062. #endif
  2063. return memnew(ResourceTracker);
  2064. }
  2065. void RenderingDeviceGraph::resource_tracker_free(ResourceTracker *p_tracker) {
  2066. if (p_tracker == nullptr) {
  2067. return;
  2068. }
  2069. if (p_tracker->in_parent_dirty_list) {
  2070. // Delete the tracker from the parent's dirty linked list.
  2071. if (p_tracker->parent->dirty_shared_list == p_tracker) {
  2072. p_tracker->parent->dirty_shared_list = p_tracker->next_shared;
  2073. } else {
  2074. ResourceTracker *node = p_tracker->parent->dirty_shared_list;
  2075. while (node != nullptr) {
  2076. if (node->next_shared == p_tracker) {
  2077. node->next_shared = p_tracker->next_shared;
  2078. node = nullptr;
  2079. } else {
  2080. node = node->next_shared;
  2081. }
  2082. }
  2083. }
  2084. }
  2085. memdelete(p_tracker);
  2086. #if PRINT_RESOURCE_TRACKER_TOTAL
  2087. print_line("Resource trackers:", --resource_tracker_total);
  2088. #endif
  2089. }
  2090. RenderingDeviceGraph::FramebufferCache *RenderingDeviceGraph::framebuffer_cache_create() {
  2091. return memnew(FramebufferCache);
  2092. }
  2093. void RenderingDeviceGraph::framebuffer_cache_free(RDD *p_driver, FramebufferCache *p_cache) {
  2094. DEV_ASSERT(p_driver != nullptr);
  2095. if (p_cache == nullptr) {
  2096. return;
  2097. }
  2098. for (KeyValue<uint64_t, FramebufferStorage> &E : p_cache->storage_map) {
  2099. p_driver->framebuffer_free(E.value.framebuffer);
  2100. p_driver->render_pass_free(E.value.render_pass);
  2101. }
  2102. memdelete(p_cache);
  2103. }