metal_objects.mm 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117
  1. /**************************************************************************/
  2. /* metal_objects.mm */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. /**************************************************************************/
  31. /* */
  32. /* Portions of this code were derived from MoltenVK. */
  33. /* */
  34. /* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. */
  35. /* (http://www.brenwill.com) */
  36. /* */
  37. /* Licensed under the Apache License, Version 2.0 (the "License"); */
  38. /* you may not use this file except in compliance with the License. */
  39. /* You may obtain a copy of the License at */
  40. /* */
  41. /* http://www.apache.org/licenses/LICENSE-2.0 */
  42. /* */
  43. /* Unless required by applicable law or agreed to in writing, software */
  44. /* distributed under the License is distributed on an "AS IS" BASIS, */
  45. /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
  46. /* implied. See the License for the specific language governing */
  47. /* permissions and limitations under the License. */
  48. /**************************************************************************/
  49. #import "metal_objects.h"
  50. #import "metal_utils.h"
  51. #import "pixel_formats.h"
  52. #import "rendering_device_driver_metal.h"
  53. #import <os/signpost.h>
  54. // We have to undefine these macros because they are defined in NSObjCRuntime.h.
  55. #undef MIN
  56. #undef MAX
  57. void MDCommandBuffer::begin() {
  58. DEV_ASSERT(commandBuffer == nil);
  59. commandBuffer = queue.commandBuffer;
  60. }
  61. void MDCommandBuffer::end() {
  62. switch (type) {
  63. case MDCommandBufferStateType::None:
  64. return;
  65. case MDCommandBufferStateType::Render:
  66. return render_end_pass();
  67. case MDCommandBufferStateType::Compute:
  68. return _end_compute_dispatch();
  69. case MDCommandBufferStateType::Blit:
  70. return _end_blit();
  71. }
  72. }
  73. void MDCommandBuffer::commit() {
  74. end();
  75. [commandBuffer commit];
  76. commandBuffer = nil;
  77. }
  78. void MDCommandBuffer::bind_pipeline(RDD::PipelineID p_pipeline) {
  79. MDPipeline *p = (MDPipeline *)(p_pipeline.id);
  80. // End current encoder if it is a compute encoder or blit encoder,
  81. // as they do not have a defined end boundary in the RDD like render.
  82. if (type == MDCommandBufferStateType::Compute) {
  83. _end_compute_dispatch();
  84. } else if (type == MDCommandBufferStateType::Blit) {
  85. _end_blit();
  86. }
  87. if (p->type == MDPipelineType::Render) {
  88. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  89. MDRenderPipeline *rp = (MDRenderPipeline *)p;
  90. if (render.encoder == nil) {
  91. // This error would happen if the render pass failed.
  92. ERR_FAIL_NULL_MSG(render.desc, "Render pass descriptor is null.");
  93. // This condition occurs when there are no attachments when calling render_next_subpass()
  94. // and is due to the SUPPORTS_FRAGMENT_SHADER_WITH_ONLY_SIDE_EFFECTS flag.
  95. render.desc.defaultRasterSampleCount = static_cast<NSUInteger>(rp->sample_count);
  96. // NOTE(sgc): This is to test rdar://FB13605547 and will be deleted once fix is confirmed.
  97. #if 0
  98. if (render.pipeline->sample_count == 4) {
  99. static id<MTLTexture> tex = nil;
  100. static id<MTLTexture> res_tex = nil;
  101. static dispatch_once_t onceToken;
  102. dispatch_once(&onceToken, ^{
  103. Size2i sz = render.frameBuffer->size;
  104. MTLTextureDescriptor *td = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatRGBA8Unorm width:sz.width height:sz.height mipmapped:NO];
  105. td.textureType = MTLTextureType2DMultisample;
  106. td.storageMode = MTLStorageModeMemoryless;
  107. td.usage = MTLTextureUsageRenderTarget;
  108. td.sampleCount = render.pipeline->sample_count;
  109. tex = [device_driver->get_device() newTextureWithDescriptor:td];
  110. td.textureType = MTLTextureType2D;
  111. td.storageMode = MTLStorageModePrivate;
  112. td.usage = MTLTextureUsageShaderWrite;
  113. td.sampleCount = 1;
  114. res_tex = [device_driver->get_device() newTextureWithDescriptor:td];
  115. });
  116. render.desc.colorAttachments[0].texture = tex;
  117. render.desc.colorAttachments[0].loadAction = MTLLoadActionClear;
  118. render.desc.colorAttachments[0].storeAction = MTLStoreActionMultisampleResolve;
  119. render.desc.colorAttachments[0].resolveTexture = res_tex;
  120. }
  121. #endif
  122. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:render.desc];
  123. }
  124. if (render.pipeline != rp) {
  125. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_RASTER));
  126. // Mark all uniforms as dirty, as variants of a shader pipeline may have a different entry point ABI,
  127. // due to setting force_active_argument_buffer_resources = true for spirv_cross::CompilerMSL::Options.
  128. // As a result, uniform sets with the same layout will generate redundant binding warnings when
  129. // capturing a Metal frame in Xcode.
  130. //
  131. // If we don't mark as dirty, then some bindings will generate a validation error.
  132. render.mark_uniforms_dirty();
  133. if (render.pipeline != nullptr && render.pipeline->depth_stencil != rp->depth_stencil) {
  134. render.dirty.set_flag(RenderState::DIRTY_DEPTH);
  135. }
  136. if (rp->raster_state.blend.enabled) {
  137. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  138. }
  139. render.pipeline = rp;
  140. }
  141. } else if (p->type == MDPipelineType::Compute) {
  142. DEV_ASSERT(type == MDCommandBufferStateType::None);
  143. type = MDCommandBufferStateType::Compute;
  144. compute.pipeline = (MDComputePipeline *)p;
  145. compute.encoder = commandBuffer.computeCommandEncoder;
  146. [compute.encoder setComputePipelineState:compute.pipeline->state];
  147. }
  148. }
  149. id<MTLBlitCommandEncoder> MDCommandBuffer::blit_command_encoder() {
  150. switch (type) {
  151. case MDCommandBufferStateType::None:
  152. break;
  153. case MDCommandBufferStateType::Render:
  154. render_end_pass();
  155. break;
  156. case MDCommandBufferStateType::Compute:
  157. _end_compute_dispatch();
  158. break;
  159. case MDCommandBufferStateType::Blit:
  160. return blit.encoder;
  161. }
  162. type = MDCommandBufferStateType::Blit;
  163. blit.encoder = commandBuffer.blitCommandEncoder;
  164. return blit.encoder;
  165. }
  166. void MDCommandBuffer::encodeRenderCommandEncoderWithDescriptor(MTLRenderPassDescriptor *p_desc, NSString *p_label) {
  167. switch (type) {
  168. case MDCommandBufferStateType::None:
  169. break;
  170. case MDCommandBufferStateType::Render:
  171. render_end_pass();
  172. break;
  173. case MDCommandBufferStateType::Compute:
  174. _end_compute_dispatch();
  175. break;
  176. case MDCommandBufferStateType::Blit:
  177. _end_blit();
  178. break;
  179. }
  180. id<MTLRenderCommandEncoder> enc = [commandBuffer renderCommandEncoderWithDescriptor:p_desc];
  181. if (p_label != nil) {
  182. [enc pushDebugGroup:p_label];
  183. [enc popDebugGroup];
  184. }
  185. [enc endEncoding];
  186. }
  187. #pragma mark - Render Commands
  188. void MDCommandBuffer::render_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  189. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  190. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  191. if (render.uniform_sets.size() <= p_set_index) {
  192. uint32_t s = render.uniform_sets.size();
  193. render.uniform_sets.resize(p_set_index + 1);
  194. // Set intermediate values to null.
  195. std::fill(&render.uniform_sets[s], &render.uniform_sets[p_set_index] + 1, nullptr);
  196. }
  197. if (render.uniform_sets[p_set_index] != set) {
  198. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  199. render.uniform_set_mask |= 1ULL << p_set_index;
  200. render.uniform_sets[p_set_index] = set;
  201. }
  202. }
  203. void MDCommandBuffer::render_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  204. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  205. for (size_t i = 0; i < p_set_count; ++i) {
  206. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  207. uint32_t index = p_first_set_index + i;
  208. if (render.uniform_sets.size() <= index) {
  209. uint32_t s = render.uniform_sets.size();
  210. render.uniform_sets.resize(index + 1);
  211. // Set intermediate values to null.
  212. std::fill(&render.uniform_sets[s], &render.uniform_sets[index] + 1, nullptr);
  213. }
  214. if (render.uniform_sets[index] != set) {
  215. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  216. render.uniform_set_mask |= 1ULL << index;
  217. render.uniform_sets[index] = set;
  218. }
  219. }
  220. }
  221. void MDCommandBuffer::render_clear_attachments(VectorView<RDD::AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) {
  222. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  223. const MDSubpass &subpass = render.get_subpass();
  224. uint32_t vertex_count = p_rects.size() * 6 * subpass.view_count;
  225. simd::float4 *vertices = ALLOCA_ARRAY(simd::float4, vertex_count);
  226. simd::float4 clear_colors[ClearAttKey::ATTACHMENT_COUNT];
  227. Size2i size = render.frameBuffer->size;
  228. Rect2i render_area = render.clip_to_render_area({ { 0, 0 }, size });
  229. size = Size2i(render_area.position.x + render_area.size.width, render_area.position.y + render_area.size.height);
  230. _populate_vertices(vertices, size, p_rects);
  231. ClearAttKey key;
  232. key.sample_count = render.pass->get_sample_count();
  233. if (subpass.view_count > 1) {
  234. key.enable_layered_rendering();
  235. }
  236. float depth_value = 0;
  237. uint32_t stencil_value = 0;
  238. for (uint32_t i = 0; i < p_attachment_clears.size(); i++) {
  239. RDD::AttachmentClear const &attClear = p_attachment_clears[i];
  240. uint32_t attachment_index;
  241. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  242. attachment_index = attClear.color_attachment;
  243. } else {
  244. attachment_index = subpass.depth_stencil_reference.attachment;
  245. }
  246. MDAttachment const &mda = render.pass->attachments[attachment_index];
  247. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  248. key.set_color_format(attachment_index, mda.format);
  249. clear_colors[attachment_index] = {
  250. attClear.value.color.r,
  251. attClear.value.color.g,
  252. attClear.value.color.b,
  253. attClear.value.color.a
  254. };
  255. }
  256. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT)) {
  257. key.set_depth_format(mda.format);
  258. depth_value = attClear.value.depth;
  259. }
  260. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT)) {
  261. key.set_stencil_format(mda.format);
  262. stencil_value = attClear.value.stencil;
  263. }
  264. }
  265. clear_colors[ClearAttKey::DEPTH_INDEX] = {
  266. depth_value,
  267. depth_value,
  268. depth_value,
  269. depth_value
  270. };
  271. id<MTLRenderCommandEncoder> enc = render.encoder;
  272. MDResourceCache &cache = device_driver->get_resource_cache();
  273. [enc pushDebugGroup:@"ClearAttachments"];
  274. [enc setRenderPipelineState:cache.get_clear_render_pipeline_state(key, nil)];
  275. [enc setDepthStencilState:cache.get_depth_stencil_state(
  276. key.is_depth_enabled(),
  277. key.is_stencil_enabled())];
  278. [enc setStencilReferenceValue:stencil_value];
  279. [enc setCullMode:MTLCullModeNone];
  280. [enc setTriangleFillMode:MTLTriangleFillModeFill];
  281. [enc setDepthBias:0 slopeScale:0 clamp:0];
  282. [enc setViewport:{ 0, 0, (double)size.width, (double)size.height, 0.0, 1.0 }];
  283. [enc setScissorRect:{ 0, 0, (NSUInteger)size.width, (NSUInteger)size.height }];
  284. [enc setVertexBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  285. [enc setFragmentBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  286. [enc setVertexBytes:vertices length:vertex_count * sizeof(vertices[0]) atIndex:device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX)];
  287. [enc drawPrimitives:MTLPrimitiveTypeTriangle vertexStart:0 vertexCount:vertex_count];
  288. [enc popDebugGroup];
  289. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_DEPTH | RenderState::DIRTY_RASTER));
  290. render.mark_uniforms_dirty({ 0 }); // Mark index 0 dirty, if there is already a binding for index 0.
  291. render.mark_viewport_dirty();
  292. render.mark_scissors_dirty();
  293. render.mark_vertex_dirty();
  294. render.mark_blend_dirty();
  295. }
  296. void MDCommandBuffer::_render_set_dirty_state() {
  297. _render_bind_uniform_sets();
  298. MDSubpass const &subpass = render.get_subpass();
  299. if (subpass.view_count > 1) {
  300. uint32_t view_range[2] = { 0, subpass.view_count };
  301. [render.encoder setVertexBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  302. [render.encoder setFragmentBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  303. }
  304. if (render.dirty.has_flag(RenderState::DIRTY_PIPELINE)) {
  305. [render.encoder setRenderPipelineState:render.pipeline->state];
  306. }
  307. if (render.dirty.has_flag(RenderState::DIRTY_VIEWPORT)) {
  308. [render.encoder setViewports:render.viewports.ptr() count:render.viewports.size()];
  309. }
  310. if (render.dirty.has_flag(RenderState::DIRTY_DEPTH)) {
  311. [render.encoder setDepthStencilState:render.pipeline->depth_stencil];
  312. }
  313. if (render.dirty.has_flag(RenderState::DIRTY_RASTER)) {
  314. render.pipeline->raster_state.apply(render.encoder);
  315. }
  316. if (render.dirty.has_flag(RenderState::DIRTY_SCISSOR) && !render.scissors.is_empty()) {
  317. size_t len = render.scissors.size();
  318. MTLScissorRect *rects = ALLOCA_ARRAY(MTLScissorRect, len);
  319. for (size_t i = 0; i < len; i++) {
  320. rects[i] = render.clip_to_render_area(render.scissors[i]);
  321. }
  322. [render.encoder setScissorRects:rects count:len];
  323. }
  324. if (render.dirty.has_flag(RenderState::DIRTY_BLEND) && render.blend_constants.has_value()) {
  325. [render.encoder setBlendColorRed:render.blend_constants->r green:render.blend_constants->g blue:render.blend_constants->b alpha:render.blend_constants->a];
  326. }
  327. if (render.dirty.has_flag(RenderState::DIRTY_VERTEX)) {
  328. uint32_t p_binding_count = render.vertex_buffers.size();
  329. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  330. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  331. offsets:render.vertex_offsets.ptr()
  332. withRange:NSMakeRange(first, p_binding_count)];
  333. }
  334. render.dirty.clear();
  335. }
  336. void MDCommandBuffer::render_set_viewport(VectorView<Rect2i> p_viewports) {
  337. render.viewports.resize(p_viewports.size());
  338. for (uint32_t i = 0; i < p_viewports.size(); i += 1) {
  339. Rect2i const &vp = p_viewports[i];
  340. render.viewports[i] = {
  341. .originX = static_cast<double>(vp.position.x),
  342. .originY = static_cast<double>(vp.position.y),
  343. .width = static_cast<double>(vp.size.width),
  344. .height = static_cast<double>(vp.size.height),
  345. .znear = 0.0,
  346. .zfar = 1.0,
  347. };
  348. }
  349. render.dirty.set_flag(RenderState::DIRTY_VIEWPORT);
  350. }
  351. void MDCommandBuffer::render_set_scissor(VectorView<Rect2i> p_scissors) {
  352. render.scissors.resize(p_scissors.size());
  353. for (uint32_t i = 0; i < p_scissors.size(); i += 1) {
  354. Rect2i const &vp = p_scissors[i];
  355. render.scissors[i] = {
  356. .x = static_cast<NSUInteger>(vp.position.x),
  357. .y = static_cast<NSUInteger>(vp.position.y),
  358. .width = static_cast<NSUInteger>(vp.size.width),
  359. .height = static_cast<NSUInteger>(vp.size.height),
  360. };
  361. }
  362. render.dirty.set_flag(RenderState::DIRTY_SCISSOR);
  363. }
  364. void MDCommandBuffer::render_set_blend_constants(const Color &p_constants) {
  365. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  366. if (render.blend_constants != p_constants) {
  367. render.blend_constants = p_constants;
  368. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  369. }
  370. }
  371. void BoundUniformSet::merge_into(ResourceUsageMap &p_dst) const {
  372. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : usage_to_resources) {
  373. ResourceVector *resources = p_dst.getptr(keyval.key);
  374. if (resources == nullptr) {
  375. resources = &p_dst.insert(keyval.key, ResourceVector())->value;
  376. }
  377. // Reserve space for the new resources, assuming they are all added.
  378. resources->reserve(resources->size() + keyval.value.size());
  379. uint32_t i = 0, j = 0;
  380. __unsafe_unretained id<MTLResource> *resources_ptr = resources->ptr();
  381. const __unsafe_unretained id<MTLResource> *keyval_ptr = keyval.value.ptr();
  382. // 2-way merge.
  383. while (i < resources->size() && j < keyval.value.size()) {
  384. if (resources_ptr[i] < keyval_ptr[j]) {
  385. i++;
  386. } else if (resources_ptr[i] > keyval_ptr[j]) {
  387. resources->insert(i, keyval_ptr[j]);
  388. i++;
  389. j++;
  390. } else {
  391. i++;
  392. j++;
  393. }
  394. }
  395. // Append the remaining resources.
  396. for (; j < keyval.value.size(); j++) {
  397. resources->push_back(keyval_ptr[j]);
  398. }
  399. }
  400. }
  401. void MDCommandBuffer::_render_bind_uniform_sets() {
  402. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  403. if (!render.dirty.has_flag(RenderState::DIRTY_UNIFORMS)) {
  404. return;
  405. }
  406. render.dirty.clear_flag(RenderState::DIRTY_UNIFORMS);
  407. uint64_t set_uniforms = render.uniform_set_mask;
  408. render.uniform_set_mask = 0;
  409. MDRenderShader *shader = render.pipeline->shader;
  410. while (set_uniforms != 0) {
  411. // Find the index of the next set bit.
  412. uint32_t index = (uint32_t)__builtin_ctzll(set_uniforms);
  413. // Clear the set bit.
  414. set_uniforms &= (set_uniforms - 1);
  415. MDUniformSet *set = render.uniform_sets[index];
  416. if (set == nullptr || index >= (uint32_t)shader->sets.size()) {
  417. continue;
  418. }
  419. set->bind_uniforms(shader, render, index);
  420. }
  421. }
  422. void MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, Size2i p_fb_size, VectorView<Rect2i> p_rects) {
  423. uint32_t idx = 0;
  424. for (uint32_t i = 0; i < p_rects.size(); i++) {
  425. Rect2i const &rect = p_rects[i];
  426. idx = _populate_vertices(p_vertices, idx, rect, p_fb_size);
  427. }
  428. }
  429. uint32_t MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, uint32_t p_index, Rect2i const &p_rect, Size2i p_fb_size) {
  430. // Determine the positions of the four edges of the
  431. // clear rectangle as a fraction of the attachment size.
  432. float leftPos = (float)(p_rect.position.x) / (float)p_fb_size.width;
  433. float rightPos = (float)(p_rect.size.width) / (float)p_fb_size.width + leftPos;
  434. float bottomPos = (float)(p_rect.position.y) / (float)p_fb_size.height;
  435. float topPos = (float)(p_rect.size.height) / (float)p_fb_size.height + bottomPos;
  436. // Transform to clip-space coordinates, which are bounded by (-1.0 < p < 1.0) in clip-space.
  437. leftPos = (leftPos * 2.0f) - 1.0f;
  438. rightPos = (rightPos * 2.0f) - 1.0f;
  439. bottomPos = (bottomPos * 2.0f) - 1.0f;
  440. topPos = (topPos * 2.0f) - 1.0f;
  441. simd::float4 vtx;
  442. uint32_t idx = p_index;
  443. uint32_t endLayer = render.get_subpass().view_count;
  444. for (uint32_t layer = 0; layer < endLayer; layer++) {
  445. vtx.z = 0.0;
  446. vtx.w = (float)layer;
  447. // Top left vertex - First triangle.
  448. vtx.y = topPos;
  449. vtx.x = leftPos;
  450. p_vertices[idx++] = vtx;
  451. // Bottom left vertex.
  452. vtx.y = bottomPos;
  453. vtx.x = leftPos;
  454. p_vertices[idx++] = vtx;
  455. // Bottom right vertex.
  456. vtx.y = bottomPos;
  457. vtx.x = rightPos;
  458. p_vertices[idx++] = vtx;
  459. // Bottom right vertex - Second triangle.
  460. p_vertices[idx++] = vtx;
  461. // Top right vertex.
  462. vtx.y = topPos;
  463. vtx.x = rightPos;
  464. p_vertices[idx++] = vtx;
  465. // Top left vertex.
  466. vtx.y = topPos;
  467. vtx.x = leftPos;
  468. p_vertices[idx++] = vtx;
  469. }
  470. return idx;
  471. }
  472. void MDCommandBuffer::render_begin_pass(RDD::RenderPassID p_render_pass, RDD::FramebufferID p_frameBuffer, RDD::CommandBufferType p_cmd_buffer_type, const Rect2i &p_rect, VectorView<RDD::RenderPassClearValue> p_clear_values) {
  473. DEV_ASSERT(commandBuffer != nil);
  474. end();
  475. MDRenderPass *pass = (MDRenderPass *)(p_render_pass.id);
  476. MDFrameBuffer *fb = (MDFrameBuffer *)(p_frameBuffer.id);
  477. type = MDCommandBufferStateType::Render;
  478. render.pass = pass;
  479. render.current_subpass = UINT32_MAX;
  480. render.render_area = p_rect;
  481. render.clear_values.resize(p_clear_values.size());
  482. for (uint32_t i = 0; i < p_clear_values.size(); i++) {
  483. render.clear_values[i] = p_clear_values[i];
  484. }
  485. render.is_rendering_entire_area = (p_rect.position == Point2i(0, 0)) && p_rect.size == fb->size;
  486. render.frameBuffer = fb;
  487. render_next_subpass();
  488. }
  489. void MDCommandBuffer::_end_render_pass() {
  490. MDFrameBuffer const &fb_info = *render.frameBuffer;
  491. MDSubpass const &subpass = render.get_subpass();
  492. PixelFormats &pf = device_driver->get_pixel_formats();
  493. for (uint32_t i = 0; i < subpass.resolve_references.size(); i++) {
  494. uint32_t color_index = subpass.color_references[i].attachment;
  495. uint32_t resolve_index = subpass.resolve_references[i].attachment;
  496. DEV_ASSERT((color_index == RDD::AttachmentReference::UNUSED) == (resolve_index == RDD::AttachmentReference::UNUSED));
  497. if (color_index == RDD::AttachmentReference::UNUSED || !fb_info.has_texture(color_index)) {
  498. continue;
  499. }
  500. id<MTLTexture> resolve_tex = fb_info.get_texture(resolve_index);
  501. CRASH_COND_MSG(!flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve), "not implemented: unresolvable texture types");
  502. // see: https://github.com/KhronosGroup/MoltenVK/blob/d20d13fe2735adb845636a81522df1b9d89c0fba/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm#L407
  503. }
  504. render.end_encoding();
  505. }
  506. void MDCommandBuffer::_render_clear_render_area() {
  507. MDRenderPass const &pass = *render.pass;
  508. MDSubpass const &subpass = render.get_subpass();
  509. // First determine attachments that should be cleared.
  510. LocalVector<RDD::AttachmentClear> clears;
  511. clears.reserve(subpass.color_references.size() + /* possible depth stencil clear */ 1);
  512. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  513. uint32_t idx = subpass.color_references[i].attachment;
  514. if (idx != RDD::AttachmentReference::UNUSED && pass.attachments[idx].shouldClear(subpass, false)) {
  515. clears.push_back({ .aspect = RDD::TEXTURE_ASPECT_COLOR_BIT, .color_attachment = idx, .value = render.clear_values[idx] });
  516. }
  517. }
  518. uint32_t ds_index = subpass.depth_stencil_reference.attachment;
  519. bool shouldClearDepth = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, false));
  520. bool shouldClearStencil = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, true));
  521. if (shouldClearDepth || shouldClearStencil) {
  522. MDAttachment const &attachment = pass.attachments[ds_index];
  523. BitField<RDD::TextureAspectBits> bits = {};
  524. if (shouldClearDepth && attachment.type & MDAttachmentType::Depth) {
  525. bits.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  526. }
  527. if (shouldClearStencil && attachment.type & MDAttachmentType::Stencil) {
  528. bits.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
  529. }
  530. clears.push_back({ .aspect = bits, .color_attachment = ds_index, .value = render.clear_values[ds_index] });
  531. }
  532. if (clears.is_empty()) {
  533. return;
  534. }
  535. render_clear_attachments(clears, { render.render_area });
  536. }
  537. void MDCommandBuffer::render_next_subpass() {
  538. DEV_ASSERT(commandBuffer != nil);
  539. if (render.current_subpass == UINT32_MAX) {
  540. render.current_subpass = 0;
  541. } else {
  542. _end_render_pass();
  543. render.current_subpass++;
  544. }
  545. MDFrameBuffer const &fb = *render.frameBuffer;
  546. MDRenderPass const &pass = *render.pass;
  547. MDSubpass const &subpass = render.get_subpass();
  548. MTLRenderPassDescriptor *desc = MTLRenderPassDescriptor.renderPassDescriptor;
  549. if (subpass.view_count > 1) {
  550. desc.renderTargetArrayLength = subpass.view_count;
  551. }
  552. PixelFormats &pf = device_driver->get_pixel_formats();
  553. uint32_t attachmentCount = 0;
  554. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  555. uint32_t idx = subpass.color_references[i].attachment;
  556. if (idx == RDD::AttachmentReference::UNUSED) {
  557. continue;
  558. }
  559. attachmentCount += 1;
  560. MTLRenderPassColorAttachmentDescriptor *ca = desc.colorAttachments[i];
  561. uint32_t resolveIdx = subpass.resolve_references.is_empty() ? RDD::AttachmentReference::UNUSED : subpass.resolve_references[i].attachment;
  562. bool has_resolve = resolveIdx != RDD::AttachmentReference::UNUSED;
  563. bool can_resolve = true;
  564. if (resolveIdx != RDD::AttachmentReference::UNUSED) {
  565. id<MTLTexture> resolve_tex = fb.get_texture(resolveIdx);
  566. can_resolve = flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve);
  567. if (can_resolve) {
  568. ca.resolveTexture = resolve_tex;
  569. } else {
  570. CRASH_NOW_MSG("unimplemented: using a texture format that is not supported for resolve");
  571. }
  572. }
  573. MDAttachment const &attachment = pass.attachments[idx];
  574. id<MTLTexture> tex = fb.get_texture(idx);
  575. ERR_FAIL_NULL_MSG(tex, "Frame buffer color texture is null.");
  576. if ((attachment.type & MDAttachmentType::Color)) {
  577. if (attachment.configureDescriptor(ca, pf, subpass, tex, render.is_rendering_entire_area, has_resolve, can_resolve, false)) {
  578. Color clearColor = render.clear_values[idx].color;
  579. ca.clearColor = MTLClearColorMake(clearColor.r, clearColor.g, clearColor.b, clearColor.a);
  580. }
  581. }
  582. }
  583. if (subpass.depth_stencil_reference.attachment != RDD::AttachmentReference::UNUSED) {
  584. attachmentCount += 1;
  585. uint32_t idx = subpass.depth_stencil_reference.attachment;
  586. MDAttachment const &attachment = pass.attachments[idx];
  587. id<MTLTexture> tex = fb.get_texture(idx);
  588. ERR_FAIL_NULL_MSG(tex, "Frame buffer depth / stencil texture is null.");
  589. if (attachment.type & MDAttachmentType::Depth) {
  590. MTLRenderPassDepthAttachmentDescriptor *da = desc.depthAttachment;
  591. if (attachment.configureDescriptor(da, pf, subpass, tex, render.is_rendering_entire_area, false, false, false)) {
  592. da.clearDepth = render.clear_values[idx].depth;
  593. }
  594. }
  595. if (attachment.type & MDAttachmentType::Stencil) {
  596. MTLRenderPassStencilAttachmentDescriptor *sa = desc.stencilAttachment;
  597. if (attachment.configureDescriptor(sa, pf, subpass, tex, render.is_rendering_entire_area, false, false, true)) {
  598. sa.clearStencil = render.clear_values[idx].stencil;
  599. }
  600. }
  601. }
  602. desc.renderTargetWidth = MAX((NSUInteger)MIN(render.render_area.position.x + render.render_area.size.width, fb.size.width), 1u);
  603. desc.renderTargetHeight = MAX((NSUInteger)MIN(render.render_area.position.y + render.render_area.size.height, fb.size.height), 1u);
  604. if (attachmentCount == 0) {
  605. // If there are no attachments, delay the creation of the encoder,
  606. // so we can use a matching sample count for the pipeline, by setting
  607. // the defaultRasterSampleCount from the pipeline's sample count.
  608. render.desc = desc;
  609. } else {
  610. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:desc];
  611. if (!render.is_rendering_entire_area) {
  612. _render_clear_render_area();
  613. }
  614. // With a new encoder, all state is dirty.
  615. render.dirty.set_flag(RenderState::DIRTY_ALL);
  616. }
  617. }
  618. void MDCommandBuffer::render_draw(uint32_t p_vertex_count,
  619. uint32_t p_instance_count,
  620. uint32_t p_base_vertex,
  621. uint32_t p_first_instance) {
  622. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  623. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  624. _render_set_dirty_state();
  625. MDSubpass const &subpass = render.get_subpass();
  626. if (subpass.view_count > 1) {
  627. p_instance_count *= subpass.view_count;
  628. }
  629. DEV_ASSERT(render.dirty == 0);
  630. id<MTLRenderCommandEncoder> enc = render.encoder;
  631. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  632. vertexStart:p_base_vertex
  633. vertexCount:p_vertex_count
  634. instanceCount:p_instance_count
  635. baseInstance:p_first_instance];
  636. }
  637. void MDCommandBuffer::render_bind_vertex_buffers(uint32_t p_binding_count, const RDD::BufferID *p_buffers, const uint64_t *p_offsets) {
  638. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  639. render.vertex_buffers.resize(p_binding_count);
  640. render.vertex_offsets.resize(p_binding_count);
  641. // Reverse the buffers, as their bindings are assigned in descending order.
  642. for (uint32_t i = 0; i < p_binding_count; i += 1) {
  643. render.vertex_buffers[i] = rid::get(p_buffers[p_binding_count - i - 1]);
  644. render.vertex_offsets[i] = p_offsets[p_binding_count - i - 1];
  645. }
  646. if (render.encoder) {
  647. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  648. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  649. offsets:render.vertex_offsets.ptr()
  650. withRange:NSMakeRange(first, p_binding_count)];
  651. render.dirty.clear_flag(RenderState::DIRTY_VERTEX);
  652. } else {
  653. render.dirty.set_flag(RenderState::DIRTY_VERTEX);
  654. }
  655. }
  656. void MDCommandBuffer::render_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint64_t p_offset) {
  657. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  658. render.index_buffer = rid::get(p_buffer);
  659. render.index_type = p_format == RDD::IndexBufferFormat::INDEX_BUFFER_FORMAT_UINT16 ? MTLIndexTypeUInt16 : MTLIndexTypeUInt32;
  660. render.index_offset = p_offset;
  661. }
  662. void MDCommandBuffer::render_draw_indexed(uint32_t p_index_count,
  663. uint32_t p_instance_count,
  664. uint32_t p_first_index,
  665. int32_t p_vertex_offset,
  666. uint32_t p_first_instance) {
  667. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  668. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  669. _render_set_dirty_state();
  670. MDSubpass const &subpass = render.get_subpass();
  671. if (subpass.view_count > 1) {
  672. p_instance_count *= subpass.view_count;
  673. }
  674. id<MTLRenderCommandEncoder> enc = render.encoder;
  675. uint32_t index_offset = render.index_offset;
  676. index_offset += p_first_index * (render.index_type == MTLIndexTypeUInt16 ? sizeof(uint16_t) : sizeof(uint32_t));
  677. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  678. indexCount:p_index_count
  679. indexType:render.index_type
  680. indexBuffer:render.index_buffer
  681. indexBufferOffset:index_offset
  682. instanceCount:p_instance_count
  683. baseVertex:p_vertex_offset
  684. baseInstance:p_first_instance];
  685. }
  686. void MDCommandBuffer::render_draw_indexed_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  687. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  688. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  689. _render_set_dirty_state();
  690. id<MTLRenderCommandEncoder> enc = render.encoder;
  691. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  692. NSUInteger indirect_offset = p_offset;
  693. for (uint32_t i = 0; i < p_draw_count; i++) {
  694. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  695. indexType:render.index_type
  696. indexBuffer:render.index_buffer
  697. indexBufferOffset:0
  698. indirectBuffer:indirect_buffer
  699. indirectBufferOffset:indirect_offset];
  700. indirect_offset += p_stride;
  701. }
  702. }
  703. void MDCommandBuffer::render_draw_indexed_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  704. ERR_FAIL_MSG("not implemented");
  705. }
  706. void MDCommandBuffer::render_draw_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  707. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  708. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  709. _render_set_dirty_state();
  710. id<MTLRenderCommandEncoder> enc = render.encoder;
  711. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  712. NSUInteger indirect_offset = p_offset;
  713. for (uint32_t i = 0; i < p_draw_count; i++) {
  714. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  715. indirectBuffer:indirect_buffer
  716. indirectBufferOffset:indirect_offset];
  717. indirect_offset += p_stride;
  718. }
  719. }
  720. void MDCommandBuffer::render_draw_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  721. ERR_FAIL_MSG("not implemented");
  722. }
  723. void MDCommandBuffer::render_end_pass() {
  724. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  725. render.end_encoding();
  726. render.reset();
  727. type = MDCommandBufferStateType::None;
  728. }
  729. #pragma mark - RenderState
  730. void MDCommandBuffer::RenderState::reset() {
  731. pass = nil;
  732. frameBuffer = nil;
  733. pipeline = nil;
  734. current_subpass = UINT32_MAX;
  735. render_area = {};
  736. is_rendering_entire_area = false;
  737. desc = nil;
  738. encoder = nil;
  739. index_buffer = nil;
  740. index_type = MTLIndexTypeUInt16;
  741. dirty = DIRTY_NONE;
  742. uniform_sets.clear();
  743. uniform_set_mask = 0;
  744. clear_values.clear();
  745. viewports.clear();
  746. scissors.clear();
  747. blend_constants.reset();
  748. vertex_buffers.clear();
  749. vertex_offsets.clear();
  750. // Keep the keys, as they are likely to be used again.
  751. for (KeyValue<StageResourceUsage, LocalVector<__unsafe_unretained id<MTLResource>>> &kv : resource_usage) {
  752. kv.value.clear();
  753. }
  754. }
  755. void MDCommandBuffer::RenderState::end_encoding() {
  756. if (encoder == nil) {
  757. return;
  758. }
  759. // Bind all resources.
  760. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  761. if (keyval.value.is_empty()) {
  762. continue;
  763. }
  764. MTLResourceUsage vert_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_VERTEX);
  765. MTLResourceUsage frag_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_FRAGMENT);
  766. if (vert_usage == frag_usage) {
  767. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex | MTLRenderStageFragment];
  768. } else {
  769. if (vert_usage != 0) {
  770. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex];
  771. }
  772. if (frag_usage != 0) {
  773. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:frag_usage stages:MTLRenderStageFragment];
  774. }
  775. }
  776. }
  777. [encoder endEncoding];
  778. encoder = nil;
  779. }
  780. #pragma mark - ComputeState
  781. void MDCommandBuffer::ComputeState::end_encoding() {
  782. if (encoder == nil) {
  783. return;
  784. }
  785. // Bind all resources.
  786. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  787. if (keyval.value.is_empty()) {
  788. continue;
  789. }
  790. MTLResourceUsage usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_COMPUTE);
  791. if (usage != 0) {
  792. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:usage];
  793. }
  794. }
  795. [encoder endEncoding];
  796. encoder = nil;
  797. }
  798. #pragma mark - Compute
  799. void MDCommandBuffer::compute_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  800. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  801. MDShader *shader = (MDShader *)(p_shader.id);
  802. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  803. set->bind_uniforms(shader, compute, p_set_index);
  804. }
  805. void MDCommandBuffer::compute_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  806. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  807. MDShader *shader = (MDShader *)(p_shader.id);
  808. // TODO(sgc): Bind multiple buffers using [encoder setBuffers:offsets:withRange:]
  809. for (size_t i = 0u; i < p_set_count; ++i) {
  810. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  811. set->bind_uniforms(shader, compute, p_first_set_index + i);
  812. }
  813. }
  814. void MDCommandBuffer::compute_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  815. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  816. MTLRegion region = MTLRegionMake3D(0, 0, 0, p_x_groups, p_y_groups, p_z_groups);
  817. id<MTLComputeCommandEncoder> enc = compute.encoder;
  818. [enc dispatchThreadgroups:region.size threadsPerThreadgroup:compute.pipeline->compute_state.local];
  819. }
  820. void MDCommandBuffer::compute_dispatch_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset) {
  821. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  822. id<MTLBuffer> indirectBuffer = rid::get(p_indirect_buffer);
  823. id<MTLComputeCommandEncoder> enc = compute.encoder;
  824. [enc dispatchThreadgroupsWithIndirectBuffer:indirectBuffer indirectBufferOffset:p_offset threadsPerThreadgroup:compute.pipeline->compute_state.local];
  825. }
  826. void MDCommandBuffer::_end_compute_dispatch() {
  827. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  828. compute.end_encoding();
  829. compute.reset();
  830. type = MDCommandBufferStateType::None;
  831. }
  832. void MDCommandBuffer::_end_blit() {
  833. DEV_ASSERT(type == MDCommandBufferStateType::Blit);
  834. [blit.encoder endEncoding];
  835. blit.reset();
  836. type = MDCommandBufferStateType::None;
  837. }
  838. MDComputeShader::MDComputeShader(CharString p_name,
  839. Vector<UniformSet> p_sets,
  840. bool p_uses_argument_buffers,
  841. MDLibrary *p_kernel) :
  842. MDShader(p_name, p_sets, p_uses_argument_buffers), kernel(p_kernel) {
  843. }
  844. void MDComputeShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  845. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Compute);
  846. if (push_constants.binding == (uint32_t)-1) {
  847. return;
  848. }
  849. id<MTLComputeCommandEncoder> enc = p_cb->compute.encoder;
  850. void const *ptr = p_data.ptr();
  851. size_t length = p_data.size() * sizeof(uint32_t);
  852. [enc setBytes:ptr length:length atIndex:push_constants.binding];
  853. }
  854. MDRenderShader::MDRenderShader(CharString p_name,
  855. Vector<UniformSet> p_sets,
  856. bool p_needs_view_mask_buffer,
  857. bool p_uses_argument_buffers,
  858. MDLibrary *_Nonnull p_vert, MDLibrary *_Nonnull p_frag) :
  859. MDShader(p_name, p_sets, p_uses_argument_buffers),
  860. needs_view_mask_buffer(p_needs_view_mask_buffer),
  861. vert(p_vert),
  862. frag(p_frag) {
  863. }
  864. void MDRenderShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  865. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Render);
  866. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_cb->render.encoder;
  867. void const *ptr = p_data.ptr();
  868. size_t length = p_data.size() * sizeof(uint32_t);
  869. if (push_constants.vert.binding > -1) {
  870. [enc setVertexBytes:ptr length:length atIndex:push_constants.vert.binding];
  871. }
  872. if (push_constants.frag.binding > -1) {
  873. [enc setFragmentBytes:ptr length:length atIndex:push_constants.frag.binding];
  874. }
  875. }
  876. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::RenderState &p_state, uint32_t p_set_index) {
  877. DEV_ASSERT(p_shader->uses_argument_buffers);
  878. DEV_ASSERT(p_state.encoder != nil);
  879. UniformSet const &set_info = p_shader->sets[p_set_index];
  880. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  881. id<MTLDevice> __unsafe_unretained device = enc.device;
  882. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage, p_set_index);
  883. // Set the buffer for the vertex stage.
  884. {
  885. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_VERTEX);
  886. if (offset) {
  887. [enc setVertexBuffer:bus.buffer offset:*offset atIndex:p_set_index];
  888. }
  889. }
  890. // Set the buffer for the fragment stage.
  891. {
  892. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_FRAGMENT);
  893. if (offset) {
  894. [enc setFragmentBuffer:bus.buffer offset:*offset atIndex:p_set_index];
  895. }
  896. }
  897. }
  898. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::RenderState &p_state, uint32_t p_set_index) {
  899. DEV_ASSERT(!p_shader->uses_argument_buffers);
  900. DEV_ASSERT(p_state.encoder != nil);
  901. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  902. UniformSet const &set = p_shader->sets[p_set_index];
  903. for (uint32_t i = 0; i < MIN(uniforms.size(), set.uniforms.size()); i++) {
  904. RDD::BoundUniform const &uniform = uniforms[i];
  905. const UniformInfo &ui = set.uniforms[i];
  906. static const RDC::ShaderStage stage_usages[2] = { RDC::ShaderStage::SHADER_STAGE_VERTEX, RDC::ShaderStage::SHADER_STAGE_FRAGMENT };
  907. for (const RDC::ShaderStage stage : stage_usages) {
  908. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  909. const BindingInfo *bi = ui.bindings.getptr(stage);
  910. if (bi == nullptr) {
  911. // No binding for this stage.
  912. continue;
  913. }
  914. if ((ui.active_stages & stage_usage) == 0) {
  915. // Not active for this state, so don't bind anything.
  916. continue;
  917. }
  918. switch (uniform.type) {
  919. case RDD::UNIFORM_TYPE_SAMPLER: {
  920. size_t count = uniform.ids.size();
  921. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  922. for (size_t j = 0; j < count; j += 1) {
  923. objects[j] = rid::get(uniform.ids[j].id);
  924. }
  925. if (stage == RDD::SHADER_STAGE_VERTEX) {
  926. [enc setVertexSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  927. } else {
  928. [enc setFragmentSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  929. }
  930. } break;
  931. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  932. size_t count = uniform.ids.size() / 2;
  933. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  934. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  935. for (uint32_t j = 0; j < count; j += 1) {
  936. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  937. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  938. samplers[j] = sampler;
  939. textures[j] = texture;
  940. }
  941. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  942. if (sbi) {
  943. if (stage == RDD::SHADER_STAGE_VERTEX) {
  944. [enc setVertexSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  945. } else {
  946. [enc setFragmentSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  947. }
  948. }
  949. if (stage == RDD::SHADER_STAGE_VERTEX) {
  950. [enc setVertexTextures:textures withRange:NSMakeRange(bi->index, count)];
  951. } else {
  952. [enc setFragmentTextures:textures withRange:NSMakeRange(bi->index, count)];
  953. }
  954. } break;
  955. case RDD::UNIFORM_TYPE_TEXTURE: {
  956. size_t count = uniform.ids.size();
  957. if (count == 1) {
  958. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  959. if (stage == RDD::SHADER_STAGE_VERTEX) {
  960. [enc setVertexTexture:obj atIndex:bi->index];
  961. } else {
  962. [enc setFragmentTexture:obj atIndex:bi->index];
  963. }
  964. } else {
  965. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  966. for (size_t j = 0; j < count; j += 1) {
  967. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  968. objects[j] = obj;
  969. }
  970. if (stage == RDD::SHADER_STAGE_VERTEX) {
  971. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  972. } else {
  973. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  974. }
  975. }
  976. } break;
  977. case RDD::UNIFORM_TYPE_IMAGE: {
  978. size_t count = uniform.ids.size();
  979. if (count == 1) {
  980. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  981. if (stage == RDD::SHADER_STAGE_VERTEX) {
  982. [enc setVertexTexture:obj atIndex:bi->index];
  983. } else {
  984. [enc setFragmentTexture:obj atIndex:bi->index];
  985. }
  986. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  987. if (sbi) {
  988. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  989. id<MTLBuffer> buf = tex.buffer;
  990. if (buf) {
  991. if (stage == RDD::SHADER_STAGE_VERTEX) {
  992. [enc setVertexBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  993. } else {
  994. [enc setFragmentBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  995. }
  996. }
  997. }
  998. } else {
  999. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1000. for (size_t j = 0; j < count; j += 1) {
  1001. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1002. objects[j] = obj;
  1003. }
  1004. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1005. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1006. } else {
  1007. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1008. }
  1009. }
  1010. } break;
  1011. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1012. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1013. } break;
  1014. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1015. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1016. } break;
  1017. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1018. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1019. } break;
  1020. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1021. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1022. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1023. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1024. } else {
  1025. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1026. }
  1027. } break;
  1028. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1029. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1030. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1031. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1032. } else {
  1033. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1034. }
  1035. } break;
  1036. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1037. size_t count = uniform.ids.size();
  1038. if (count == 1) {
  1039. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1040. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1041. [enc setVertexTexture:obj atIndex:bi->index];
  1042. } else {
  1043. [enc setFragmentTexture:obj atIndex:bi->index];
  1044. }
  1045. } else {
  1046. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1047. for (size_t j = 0; j < count; j += 1) {
  1048. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1049. objects[j] = obj;
  1050. }
  1051. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1052. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1053. } else {
  1054. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1055. }
  1056. }
  1057. } break;
  1058. default: {
  1059. DEV_ASSERT(false);
  1060. }
  1061. }
  1062. }
  1063. }
  1064. }
  1065. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::RenderState &p_state, uint32_t p_set_index) {
  1066. if (p_shader->uses_argument_buffers) {
  1067. bind_uniforms_argument_buffers(p_shader, p_state, p_set_index);
  1068. } else {
  1069. bind_uniforms_direct(p_shader, p_state, p_set_index);
  1070. }
  1071. }
  1072. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state, uint32_t p_set_index) {
  1073. DEV_ASSERT(p_shader->uses_argument_buffers);
  1074. DEV_ASSERT(p_state.encoder != nil);
  1075. UniformSet const &set_info = p_shader->sets[p_set_index];
  1076. id<MTLComputeCommandEncoder> enc = p_state.encoder;
  1077. id<MTLDevice> device = enc.device;
  1078. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage, p_set_index);
  1079. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_COMPUTE);
  1080. if (offset) {
  1081. [enc setBuffer:bus.buffer offset:*offset atIndex:p_set_index];
  1082. }
  1083. }
  1084. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state, uint32_t p_set_index) {
  1085. DEV_ASSERT(!p_shader->uses_argument_buffers);
  1086. DEV_ASSERT(p_state.encoder != nil);
  1087. id<MTLComputeCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  1088. UniformSet const &set = p_shader->sets[p_set_index];
  1089. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1090. RDD::BoundUniform const &uniform = uniforms[i];
  1091. const UniformInfo &ui = set.uniforms[i];
  1092. const RDC::ShaderStage stage = RDC::ShaderStage::SHADER_STAGE_COMPUTE;
  1093. const ShaderStageUsage stage_usage = ShaderStageUsage(1 << stage);
  1094. const BindingInfo *bi = ui.bindings.getptr(stage);
  1095. if (bi == nullptr) {
  1096. // No binding for this stage.
  1097. continue;
  1098. }
  1099. if ((ui.active_stages & stage_usage) == 0) {
  1100. // Not active for this state, so don't bind anything.
  1101. continue;
  1102. }
  1103. switch (uniform.type) {
  1104. case RDD::UNIFORM_TYPE_SAMPLER: {
  1105. size_t count = uniform.ids.size();
  1106. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1107. for (size_t j = 0; j < count; j += 1) {
  1108. objects[j] = rid::get(uniform.ids[j].id);
  1109. }
  1110. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1111. } break;
  1112. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1113. size_t count = uniform.ids.size() / 2;
  1114. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1115. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1116. for (uint32_t j = 0; j < count; j += 1) {
  1117. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1118. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1119. samplers[j] = sampler;
  1120. textures[j] = texture;
  1121. }
  1122. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1123. if (sbi) {
  1124. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1125. }
  1126. [enc setTextures:textures withRange:NSMakeRange(bi->index, count)];
  1127. } break;
  1128. case RDD::UNIFORM_TYPE_TEXTURE: {
  1129. size_t count = uniform.ids.size();
  1130. if (count == 1) {
  1131. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1132. [enc setTexture:obj atIndex:bi->index];
  1133. } else {
  1134. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1135. for (size_t j = 0; j < count; j += 1) {
  1136. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1137. objects[j] = obj;
  1138. }
  1139. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1140. }
  1141. } break;
  1142. case RDD::UNIFORM_TYPE_IMAGE: {
  1143. size_t count = uniform.ids.size();
  1144. if (count == 1) {
  1145. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1146. [enc setTexture:obj atIndex:bi->index];
  1147. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1148. if (sbi) {
  1149. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1150. id<MTLBuffer> buf = tex.buffer;
  1151. if (buf) {
  1152. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1153. }
  1154. }
  1155. } else {
  1156. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1157. for (size_t j = 0; j < count; j += 1) {
  1158. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1159. objects[j] = obj;
  1160. }
  1161. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1162. }
  1163. } break;
  1164. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1165. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1166. } break;
  1167. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1168. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1169. } break;
  1170. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1171. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1172. } break;
  1173. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1174. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1175. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1176. } break;
  1177. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1178. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1179. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1180. } break;
  1181. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1182. size_t count = uniform.ids.size();
  1183. if (count == 1) {
  1184. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1185. [enc setTexture:obj atIndex:bi->index];
  1186. } else {
  1187. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1188. for (size_t j = 0; j < count; j += 1) {
  1189. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1190. objects[j] = obj;
  1191. }
  1192. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1193. }
  1194. } break;
  1195. default: {
  1196. DEV_ASSERT(false);
  1197. }
  1198. }
  1199. }
  1200. }
  1201. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state, uint32_t p_set_index) {
  1202. if (p_shader->uses_argument_buffers) {
  1203. bind_uniforms_argument_buffers(p_shader, p_state, p_set_index);
  1204. } else {
  1205. bind_uniforms_direct(p_shader, p_state, p_set_index);
  1206. }
  1207. }
  1208. BoundUniformSet &MDUniformSet::bound_uniform_set(MDShader *p_shader, id<MTLDevice> p_device, ResourceUsageMap &p_resource_usage, uint32_t p_set_index) {
  1209. BoundUniformSet *sus = bound_uniforms.getptr(p_shader);
  1210. if (sus != nullptr) {
  1211. sus->merge_into(p_resource_usage);
  1212. return *sus;
  1213. }
  1214. UniformSet const &set = p_shader->sets[p_set_index];
  1215. HashMap<id<MTLResource>, StageResourceUsage> bound_resources;
  1216. auto add_usage = [&bound_resources](id<MTLResource> __unsafe_unretained res, RDD::ShaderStage stage, MTLResourceUsage usage) {
  1217. StageResourceUsage *sru = bound_resources.getptr(res);
  1218. if (sru == nullptr) {
  1219. bound_resources.insert(res, stage_resource_usage(stage, usage));
  1220. } else {
  1221. *sru |= stage_resource_usage(stage, usage);
  1222. }
  1223. };
  1224. id<MTLBuffer> enc_buffer = nil;
  1225. if (set.buffer_size > 0) {
  1226. MTLResourceOptions options = MTLResourceStorageModeShared | MTLResourceHazardTrackingModeTracked;
  1227. enc_buffer = [p_device newBufferWithLength:set.buffer_size options:options];
  1228. for (KeyValue<RDC::ShaderStage, id<MTLArgumentEncoder>> const &kv : set.encoders) {
  1229. RDD::ShaderStage const stage = kv.key;
  1230. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  1231. id<MTLArgumentEncoder> const enc = kv.value;
  1232. [enc setArgumentBuffer:enc_buffer offset:set.offsets[stage]];
  1233. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1234. RDD::BoundUniform const &uniform = uniforms[i];
  1235. UniformInfo ui = set.uniforms[i];
  1236. BindingInfo *bi = ui.bindings.getptr(stage);
  1237. if (bi == nullptr) {
  1238. // No binding for this stage.
  1239. continue;
  1240. }
  1241. if ((ui.active_stages & stage_usage) == 0) {
  1242. // Not active for this state, so don't bind anything.
  1243. continue;
  1244. }
  1245. switch (uniform.type) {
  1246. case RDD::UNIFORM_TYPE_SAMPLER: {
  1247. size_t count = uniform.ids.size();
  1248. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1249. for (size_t j = 0; j < count; j += 1) {
  1250. objects[j] = rid::get(uniform.ids[j].id);
  1251. }
  1252. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1253. } break;
  1254. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1255. size_t count = uniform.ids.size() / 2;
  1256. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1257. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1258. for (uint32_t j = 0; j < count; j += 1) {
  1259. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1260. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1261. samplers[j] = sampler;
  1262. textures[j] = texture;
  1263. add_usage(texture, stage, bi->usage);
  1264. }
  1265. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1266. if (sbi) {
  1267. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1268. }
  1269. [enc setTextures:textures
  1270. withRange:NSMakeRange(bi->index, count)];
  1271. } break;
  1272. case RDD::UNIFORM_TYPE_TEXTURE: {
  1273. size_t count = uniform.ids.size();
  1274. if (count == 1) {
  1275. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1276. [enc setTexture:obj atIndex:bi->index];
  1277. add_usage(obj, stage, bi->usage);
  1278. } else {
  1279. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1280. for (size_t j = 0; j < count; j += 1) {
  1281. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1282. objects[j] = obj;
  1283. add_usage(obj, stage, bi->usage);
  1284. }
  1285. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1286. }
  1287. } break;
  1288. case RDD::UNIFORM_TYPE_IMAGE: {
  1289. size_t count = uniform.ids.size();
  1290. if (count == 1) {
  1291. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1292. [enc setTexture:obj atIndex:bi->index];
  1293. add_usage(obj, stage, bi->usage);
  1294. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1295. if (sbi) {
  1296. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1297. id<MTLBuffer> buf = tex.buffer;
  1298. if (buf) {
  1299. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1300. }
  1301. }
  1302. } else {
  1303. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1304. for (size_t j = 0; j < count; j += 1) {
  1305. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1306. objects[j] = obj;
  1307. add_usage(obj, stage, bi->usage);
  1308. }
  1309. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1310. }
  1311. } break;
  1312. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1313. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1314. } break;
  1315. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1316. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1317. } break;
  1318. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1319. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1320. } break;
  1321. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1322. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1323. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1324. add_usage(buffer, stage, bi->usage);
  1325. } break;
  1326. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1327. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1328. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1329. add_usage(buffer, stage, bi->usage);
  1330. } break;
  1331. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1332. size_t count = uniform.ids.size();
  1333. if (count == 1) {
  1334. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1335. [enc setTexture:obj atIndex:bi->index];
  1336. add_usage(obj, stage, bi->usage);
  1337. } else {
  1338. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1339. for (size_t j = 0; j < count; j += 1) {
  1340. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1341. objects[j] = obj;
  1342. add_usage(obj, stage, bi->usage);
  1343. }
  1344. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1345. }
  1346. } break;
  1347. default: {
  1348. DEV_ASSERT(false);
  1349. }
  1350. }
  1351. }
  1352. }
  1353. }
  1354. ResourceUsageMap usage_to_resources;
  1355. for (KeyValue<id<MTLResource>, StageResourceUsage> const &keyval : bound_resources) {
  1356. ResourceVector *resources = usage_to_resources.getptr(keyval.value);
  1357. if (resources == nullptr) {
  1358. resources = &usage_to_resources.insert(keyval.value, ResourceVector())->value;
  1359. }
  1360. int64_t pos = resources->span().bisect(keyval.key, true);
  1361. if (pos == resources->size() || (*resources)[pos] != keyval.key) {
  1362. resources->insert(pos, keyval.key);
  1363. }
  1364. }
  1365. BoundUniformSet bs = { .buffer = enc_buffer, .usage_to_resources = usage_to_resources };
  1366. bound_uniforms.insert(p_shader, bs);
  1367. bs.merge_into(p_resource_usage);
  1368. return bound_uniforms.get(p_shader);
  1369. }
  1370. MTLFmtCaps MDSubpass::getRequiredFmtCapsForAttachmentAt(uint32_t p_index) const {
  1371. MTLFmtCaps caps = kMTLFmtCapsNone;
  1372. for (RDD::AttachmentReference const &ar : input_references) {
  1373. if (ar.attachment == p_index) {
  1374. flags::set(caps, kMTLFmtCapsRead);
  1375. break;
  1376. }
  1377. }
  1378. for (RDD::AttachmentReference const &ar : color_references) {
  1379. if (ar.attachment == p_index) {
  1380. flags::set(caps, kMTLFmtCapsColorAtt);
  1381. break;
  1382. }
  1383. }
  1384. for (RDD::AttachmentReference const &ar : resolve_references) {
  1385. if (ar.attachment == p_index) {
  1386. flags::set(caps, kMTLFmtCapsResolve);
  1387. break;
  1388. }
  1389. }
  1390. if (depth_stencil_reference.attachment == p_index) {
  1391. flags::set(caps, kMTLFmtCapsDSAtt);
  1392. }
  1393. return caps;
  1394. }
  1395. void MDAttachment::linkToSubpass(const MDRenderPass &p_pass) {
  1396. firstUseSubpassIndex = UINT32_MAX;
  1397. lastUseSubpassIndex = 0;
  1398. for (MDSubpass const &subpass : p_pass.subpasses) {
  1399. MTLFmtCaps reqCaps = subpass.getRequiredFmtCapsForAttachmentAt(index);
  1400. if (reqCaps) {
  1401. firstUseSubpassIndex = MIN(subpass.subpass_index, firstUseSubpassIndex);
  1402. lastUseSubpassIndex = MAX(subpass.subpass_index, lastUseSubpassIndex);
  1403. }
  1404. }
  1405. }
  1406. MTLStoreAction MDAttachment::getMTLStoreAction(MDSubpass const &p_subpass,
  1407. bool p_is_rendering_entire_area,
  1408. bool p_has_resolve,
  1409. bool p_can_resolve,
  1410. bool p_is_stencil) const {
  1411. if (!p_is_rendering_entire_area || !isLastUseOf(p_subpass)) {
  1412. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1413. }
  1414. switch (p_is_stencil ? stencilStoreAction : storeAction) {
  1415. case MTLStoreActionStore:
  1416. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1417. case MTLStoreActionDontCare:
  1418. return p_has_resolve ? (p_can_resolve ? MTLStoreActionMultisampleResolve : MTLStoreActionStore) : MTLStoreActionDontCare;
  1419. default:
  1420. return MTLStoreActionStore;
  1421. }
  1422. }
  1423. bool MDAttachment::configureDescriptor(MTLRenderPassAttachmentDescriptor *p_desc,
  1424. PixelFormats &p_pf,
  1425. MDSubpass const &p_subpass,
  1426. id<MTLTexture> p_attachment,
  1427. bool p_is_rendering_entire_area,
  1428. bool p_has_resolve,
  1429. bool p_can_resolve,
  1430. bool p_is_stencil) const {
  1431. p_desc.texture = p_attachment;
  1432. MTLLoadAction load;
  1433. if (!p_is_rendering_entire_area || !isFirstUseOf(p_subpass)) {
  1434. load = MTLLoadActionLoad;
  1435. } else {
  1436. load = p_is_stencil ? stencilLoadAction : loadAction;
  1437. }
  1438. p_desc.loadAction = load;
  1439. MTLPixelFormat mtlFmt = p_attachment.pixelFormat;
  1440. bool isDepthFormat = p_pf.isDepthFormat(mtlFmt);
  1441. bool isStencilFormat = p_pf.isStencilFormat(mtlFmt);
  1442. if (isStencilFormat && !p_is_stencil && !isDepthFormat) {
  1443. p_desc.storeAction = MTLStoreActionDontCare;
  1444. } else {
  1445. p_desc.storeAction = getMTLStoreAction(p_subpass, p_is_rendering_entire_area, p_has_resolve, p_can_resolve, p_is_stencil);
  1446. }
  1447. return load == MTLLoadActionClear;
  1448. }
  1449. bool MDAttachment::shouldClear(const MDSubpass &p_subpass, bool p_is_stencil) const {
  1450. // If the subpass is not the first subpass to use this attachment, don't clear this attachment.
  1451. if (p_subpass.subpass_index != firstUseSubpassIndex) {
  1452. return false;
  1453. }
  1454. return (p_is_stencil ? stencilLoadAction : loadAction) == MTLLoadActionClear;
  1455. }
  1456. MDRenderPass::MDRenderPass(Vector<MDAttachment> &p_attachments, Vector<MDSubpass> &p_subpasses) :
  1457. attachments(p_attachments), subpasses(p_subpasses) {
  1458. for (MDAttachment &att : attachments) {
  1459. att.linkToSubpass(*this);
  1460. }
  1461. }
  1462. #pragma mark - Resource Factory
  1463. id<MTLFunction> MDResourceFactory::new_func(NSString *p_source, NSString *p_name, NSError **p_error) {
  1464. @autoreleasepool {
  1465. NSError *err = nil;
  1466. MTLCompileOptions *options = [MTLCompileOptions new];
  1467. id<MTLDevice> device = device_driver->get_device();
  1468. id<MTLLibrary> mtlLib = [device newLibraryWithSource:p_source
  1469. options:options
  1470. error:&err];
  1471. if (err) {
  1472. if (p_error != nil) {
  1473. *p_error = err;
  1474. }
  1475. }
  1476. return [mtlLib newFunctionWithName:p_name];
  1477. }
  1478. }
  1479. id<MTLFunction> MDResourceFactory::new_clear_vert_func(ClearAttKey &p_key) {
  1480. @autoreleasepool {
  1481. NSString *msl = [NSString stringWithFormat:@R"(
  1482. #include <metal_stdlib>
  1483. using namespace metal;
  1484. typedef struct {
  1485. float4 a_position [[attribute(0)]];
  1486. } AttributesPos;
  1487. typedef struct {
  1488. float4 colors[9];
  1489. } ClearColorsIn;
  1490. typedef struct {
  1491. float4 v_position [[position]];
  1492. uint layer%s;
  1493. } VaryingsPos;
  1494. vertex VaryingsPos vertClear(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1495. VaryingsPos varyings;
  1496. varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[%d].r, 1.0);
  1497. varyings.layer = uint(attributes.a_position.w);
  1498. return varyings;
  1499. }
  1500. )", p_key.is_layered_rendering_enabled() ? " [[render_target_array_index]]" : "", ClearAttKey::DEPTH_INDEX];
  1501. return new_func(msl, @"vertClear", nil);
  1502. }
  1503. }
  1504. id<MTLFunction> MDResourceFactory::new_clear_frag_func(ClearAttKey &p_key) {
  1505. @autoreleasepool {
  1506. NSMutableString *msl = [NSMutableString stringWithCapacity:2048];
  1507. [msl appendFormat:@R"(
  1508. #include <metal_stdlib>
  1509. using namespace metal;
  1510. typedef struct {
  1511. float4 v_position [[position]];
  1512. } VaryingsPos;
  1513. typedef struct {
  1514. float4 colors[9];
  1515. } ClearColorsIn;
  1516. typedef struct {
  1517. )"];
  1518. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1519. if (p_key.is_enabled(caIdx)) {
  1520. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1521. [msl appendFormat:@" %@4 color%u [[color(%u)]];\n", typeStr, caIdx, caIdx];
  1522. }
  1523. }
  1524. [msl appendFormat:@R"(} ClearColorsOut;
  1525. fragment ClearColorsOut fragClear(VaryingsPos varyings [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1526. ClearColorsOut ccOut;
  1527. )"];
  1528. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1529. if (p_key.is_enabled(caIdx)) {
  1530. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1531. [msl appendFormat:@" ccOut.color%u = %@4(ccIn.colors[%u]);\n", caIdx, typeStr, caIdx];
  1532. }
  1533. }
  1534. [msl appendString:@R"( return ccOut;
  1535. })"];
  1536. return new_func(msl, @"fragClear", nil);
  1537. }
  1538. }
  1539. NSString *MDResourceFactory::get_format_type_string(MTLPixelFormat p_fmt) {
  1540. switch (device_driver->get_pixel_formats().getFormatType(p_fmt)) {
  1541. case MTLFormatType::ColorInt8:
  1542. case MTLFormatType::ColorInt16:
  1543. return @"short";
  1544. case MTLFormatType::ColorUInt8:
  1545. case MTLFormatType::ColorUInt16:
  1546. return @"ushort";
  1547. case MTLFormatType::ColorInt32:
  1548. return @"int";
  1549. case MTLFormatType::ColorUInt32:
  1550. return @"uint";
  1551. case MTLFormatType::ColorHalf:
  1552. return @"half";
  1553. case MTLFormatType::ColorFloat:
  1554. case MTLFormatType::DepthStencil:
  1555. case MTLFormatType::Compressed:
  1556. return @"float";
  1557. case MTLFormatType::None:
  1558. return @"unexpected_MTLPixelFormatInvalid";
  1559. }
  1560. }
  1561. id<MTLDepthStencilState> MDResourceFactory::new_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1562. MTLDepthStencilDescriptor *dsDesc = [MTLDepthStencilDescriptor new];
  1563. dsDesc.depthCompareFunction = MTLCompareFunctionAlways;
  1564. dsDesc.depthWriteEnabled = p_use_depth;
  1565. if (p_use_stencil) {
  1566. MTLStencilDescriptor *sDesc = [MTLStencilDescriptor new];
  1567. sDesc.stencilCompareFunction = MTLCompareFunctionAlways;
  1568. sDesc.stencilFailureOperation = MTLStencilOperationReplace;
  1569. sDesc.depthFailureOperation = MTLStencilOperationReplace;
  1570. sDesc.depthStencilPassOperation = MTLStencilOperationReplace;
  1571. dsDesc.frontFaceStencil = sDesc;
  1572. dsDesc.backFaceStencil = sDesc;
  1573. } else {
  1574. dsDesc.frontFaceStencil = nil;
  1575. dsDesc.backFaceStencil = nil;
  1576. }
  1577. return [device_driver->get_device() newDepthStencilStateWithDescriptor:dsDesc];
  1578. }
  1579. id<MTLRenderPipelineState> MDResourceFactory::new_clear_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1580. PixelFormats &pixFmts = device_driver->get_pixel_formats();
  1581. id<MTLFunction> vtxFunc = new_clear_vert_func(p_key);
  1582. id<MTLFunction> fragFunc = new_clear_frag_func(p_key);
  1583. MTLRenderPipelineDescriptor *plDesc = [MTLRenderPipelineDescriptor new];
  1584. plDesc.label = @"ClearRenderAttachments";
  1585. plDesc.vertexFunction = vtxFunc;
  1586. plDesc.fragmentFunction = fragFunc;
  1587. plDesc.rasterSampleCount = p_key.sample_count;
  1588. plDesc.inputPrimitiveTopology = MTLPrimitiveTopologyClassTriangle;
  1589. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1590. MTLRenderPipelineColorAttachmentDescriptor *colorDesc = plDesc.colorAttachments[caIdx];
  1591. colorDesc.pixelFormat = (MTLPixelFormat)p_key.pixel_formats[caIdx];
  1592. colorDesc.writeMask = p_key.is_enabled(caIdx) ? MTLColorWriteMaskAll : MTLColorWriteMaskNone;
  1593. }
  1594. MTLPixelFormat mtlDepthFormat = p_key.depth_format();
  1595. if (pixFmts.isDepthFormat(mtlDepthFormat)) {
  1596. plDesc.depthAttachmentPixelFormat = mtlDepthFormat;
  1597. }
  1598. MTLPixelFormat mtlStencilFormat = p_key.stencil_format();
  1599. if (pixFmts.isStencilFormat(mtlStencilFormat)) {
  1600. plDesc.stencilAttachmentPixelFormat = mtlStencilFormat;
  1601. }
  1602. MTLVertexDescriptor *vtxDesc = plDesc.vertexDescriptor;
  1603. // Vertex attribute descriptors.
  1604. MTLVertexAttributeDescriptorArray *vaDescArray = vtxDesc.attributes;
  1605. MTLVertexAttributeDescriptor *vaDesc;
  1606. NSUInteger vtxBuffIdx = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX);
  1607. NSUInteger vtxStride = 0;
  1608. // Vertex location.
  1609. vaDesc = vaDescArray[0];
  1610. vaDesc.format = MTLVertexFormatFloat4;
  1611. vaDesc.bufferIndex = vtxBuffIdx;
  1612. vaDesc.offset = vtxStride;
  1613. vtxStride += sizeof(simd::float4);
  1614. // Vertex attribute buffer.
  1615. MTLVertexBufferLayoutDescriptorArray *vbDescArray = vtxDesc.layouts;
  1616. MTLVertexBufferLayoutDescriptor *vbDesc = vbDescArray[vtxBuffIdx];
  1617. vbDesc.stepFunction = MTLVertexStepFunctionPerVertex;
  1618. vbDesc.stepRate = 1;
  1619. vbDesc.stride = vtxStride;
  1620. return [device_driver->get_device() newRenderPipelineStateWithDescriptor:plDesc error:p_error];
  1621. }
  1622. id<MTLRenderPipelineState> MDResourceCache::get_clear_render_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1623. HashMap::ConstIterator it = clear_states.find(p_key);
  1624. if (it != clear_states.end()) {
  1625. return it->value;
  1626. }
  1627. id<MTLRenderPipelineState> state = resource_factory->new_clear_pipeline_state(p_key, p_error);
  1628. clear_states[p_key] = state;
  1629. return state;
  1630. }
  1631. id<MTLDepthStencilState> MDResourceCache::get_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1632. id<MTLDepthStencilState> __strong *val;
  1633. if (p_use_depth && p_use_stencil) {
  1634. val = &clear_depth_stencil_state.all;
  1635. } else if (p_use_depth) {
  1636. val = &clear_depth_stencil_state.depth_only;
  1637. } else if (p_use_stencil) {
  1638. val = &clear_depth_stencil_state.stencil_only;
  1639. } else {
  1640. val = &clear_depth_stencil_state.none;
  1641. }
  1642. DEV_ASSERT(val != nullptr);
  1643. if (*val == nil) {
  1644. *val = resource_factory->new_depth_stencil_state(p_use_depth, p_use_stencil);
  1645. }
  1646. return *val;
  1647. }
  1648. static const char *SHADER_STAGE_NAMES[] = {
  1649. [RD::SHADER_STAGE_VERTEX] = "vert",
  1650. [RD::SHADER_STAGE_FRAGMENT] = "frag",
  1651. [RD::SHADER_STAGE_TESSELATION_CONTROL] = "tess_ctrl",
  1652. [RD::SHADER_STAGE_TESSELATION_EVALUATION] = "tess_eval",
  1653. [RD::SHADER_STAGE_COMPUTE] = "comp",
  1654. };
  1655. void ShaderCacheEntry::notify_free() const {
  1656. owner.shader_cache_free_entry(key);
  1657. }
  1658. @interface MDLibrary ()
  1659. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry;
  1660. @end
  1661. /// Loads the MTLLibrary when the library is first accessed.
  1662. @interface MDLazyLibrary : MDLibrary {
  1663. id<MTLLibrary> _library;
  1664. NSError *_error;
  1665. std::shared_mutex _mu;
  1666. bool _loaded;
  1667. id<MTLDevice> _device;
  1668. NSString *_source;
  1669. MTLCompileOptions *_options;
  1670. }
  1671. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1672. device:(id<MTLDevice>)device
  1673. source:(NSString *)source
  1674. options:(MTLCompileOptions *)options;
  1675. @end
  1676. /// Loads the MTLLibrary immediately on initialization, using an asynchronous API.
  1677. @interface MDImmediateLibrary : MDLibrary {
  1678. id<MTLLibrary> _library;
  1679. NSError *_error;
  1680. std::mutex _cv_mutex;
  1681. std::condition_variable _cv;
  1682. std::atomic<bool> _complete;
  1683. bool _ready;
  1684. }
  1685. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1686. device:(id<MTLDevice>)device
  1687. source:(NSString *)source
  1688. options:(MTLCompileOptions *)options;
  1689. @end
  1690. @implementation MDLibrary
  1691. + (instancetype)newLibraryWithCacheEntry:(ShaderCacheEntry *)entry
  1692. device:(id<MTLDevice>)device
  1693. source:(NSString *)source
  1694. options:(MTLCompileOptions *)options
  1695. strategy:(ShaderLoadStrategy)strategy {
  1696. switch (strategy) {
  1697. case ShaderLoadStrategy::DEFAULT:
  1698. [[fallthrough]];
  1699. default:
  1700. return [[MDImmediateLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1701. case ShaderLoadStrategy::LAZY:
  1702. return [[MDLazyLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1703. }
  1704. }
  1705. - (id<MTLLibrary>)library {
  1706. CRASH_NOW_MSG("Not implemented");
  1707. return nil;
  1708. }
  1709. - (NSError *)error {
  1710. CRASH_NOW_MSG("Not implemented");
  1711. return nil;
  1712. }
  1713. - (void)setLabel:(NSString *)label {
  1714. }
  1715. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry {
  1716. self = [super init];
  1717. _entry = entry;
  1718. _entry->library = self;
  1719. return self;
  1720. }
  1721. - (void)dealloc {
  1722. _entry->notify_free();
  1723. }
  1724. @end
  1725. @implementation MDImmediateLibrary
  1726. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1727. device:(id<MTLDevice>)device
  1728. source:(NSString *)source
  1729. options:(MTLCompileOptions *)options {
  1730. self = [super initWithCacheEntry:entry];
  1731. _complete = false;
  1732. _ready = false;
  1733. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1734. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1735. "shader_name=%{public}s stage=%{public}s hash=%X",
  1736. entry->name.get_data(), SHADER_STAGE_NAMES[entry->stage], entry->key.short_sha());
  1737. [device newLibraryWithSource:source
  1738. options:options
  1739. completionHandler:^(id<MTLLibrary> library, NSError *error) {
  1740. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1741. self->_library = library;
  1742. self->_error = error;
  1743. if (error) {
  1744. ERR_PRINT(vformat(U"Error compiling shader %s: %s", entry->name.get_data(), error.localizedDescription.UTF8String));
  1745. }
  1746. {
  1747. std::lock_guard<std::mutex> lock(self->_cv_mutex);
  1748. _ready = true;
  1749. }
  1750. _cv.notify_all();
  1751. _complete = true;
  1752. }];
  1753. return self;
  1754. }
  1755. - (id<MTLLibrary>)library {
  1756. if (!_complete) {
  1757. std::unique_lock<std::mutex> lock(_cv_mutex);
  1758. _cv.wait(lock, [&] { return _ready; });
  1759. }
  1760. return _library;
  1761. }
  1762. - (NSError *)error {
  1763. if (!_complete) {
  1764. std::unique_lock<std::mutex> lock(_cv_mutex);
  1765. _cv.wait(lock, [&] { return _ready; });
  1766. }
  1767. return _error;
  1768. }
  1769. @end
  1770. @implementation MDLazyLibrary
  1771. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1772. device:(id<MTLDevice>)device
  1773. source:(NSString *)source
  1774. options:(MTLCompileOptions *)options {
  1775. self = [super initWithCacheEntry:entry];
  1776. _device = device;
  1777. _source = source;
  1778. _options = options;
  1779. return self;
  1780. }
  1781. - (void)load {
  1782. {
  1783. std::shared_lock<std::shared_mutex> lock(_mu);
  1784. if (_loaded) {
  1785. return;
  1786. }
  1787. }
  1788. std::unique_lock<std::shared_mutex> lock(_mu);
  1789. if (_loaded) {
  1790. return;
  1791. }
  1792. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1793. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1794. "shader_name=%{public}s stage=%{public}s hash=%X",
  1795. _entry->name.get_data(), SHADER_STAGE_NAMES[_entry->stage], _entry->key.short_sha());
  1796. NSError *error;
  1797. _library = [_device newLibraryWithSource:_source options:_options error:&error];
  1798. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1799. _device = nil;
  1800. _source = nil;
  1801. _options = nil;
  1802. _loaded = true;
  1803. }
  1804. - (id<MTLLibrary>)library {
  1805. [self load];
  1806. return _library;
  1807. }
  1808. - (NSError *)error {
  1809. [self load];
  1810. return _error;
  1811. }
  1812. @end