metal_objects.mm 76 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209
  1. /**************************************************************************/
  2. /* metal_objects.mm */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. /**************************************************************************/
  31. /* */
  32. /* Portions of this code were derived from MoltenVK. */
  33. /* */
  34. /* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. */
  35. /* (http://www.brenwill.com) */
  36. /* */
  37. /* Licensed under the Apache License, Version 2.0 (the "License"); */
  38. /* you may not use this file except in compliance with the License. */
  39. /* You may obtain a copy of the License at */
  40. /* */
  41. /* http://www.apache.org/licenses/LICENSE-2.0 */
  42. /* */
  43. /* Unless required by applicable law or agreed to in writing, software */
  44. /* distributed under the License is distributed on an "AS IS" BASIS, */
  45. /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
  46. /* implied. See the License for the specific language governing */
  47. /* permissions and limitations under the License. */
  48. /**************************************************************************/
  49. #import "metal_objects.h"
  50. #import "metal_utils.h"
  51. #import "pixel_formats.h"
  52. #import "rendering_device_driver_metal.h"
  53. #import "rendering_shader_container_metal.h"
  54. #import <os/signpost.h>
  55. // We have to undefine these macros because they are defined in NSObjCRuntime.h.
  56. #undef MIN
  57. #undef MAX
  58. void MDCommandBuffer::begin() {
  59. DEV_ASSERT(commandBuffer == nil);
  60. commandBuffer = queue.commandBuffer;
  61. }
  62. void MDCommandBuffer::end() {
  63. switch (type) {
  64. case MDCommandBufferStateType::None:
  65. return;
  66. case MDCommandBufferStateType::Render:
  67. return render_end_pass();
  68. case MDCommandBufferStateType::Compute:
  69. return _end_compute_dispatch();
  70. case MDCommandBufferStateType::Blit:
  71. return _end_blit();
  72. }
  73. }
  74. void MDCommandBuffer::commit() {
  75. end();
  76. [commandBuffer commit];
  77. commandBuffer = nil;
  78. }
  79. void MDCommandBuffer::bind_pipeline(RDD::PipelineID p_pipeline) {
  80. MDPipeline *p = (MDPipeline *)(p_pipeline.id);
  81. // End current encoder if it is a compute encoder or blit encoder,
  82. // as they do not have a defined end boundary in the RDD like render.
  83. if (type == MDCommandBufferStateType::Compute) {
  84. _end_compute_dispatch();
  85. } else if (type == MDCommandBufferStateType::Blit) {
  86. _end_blit();
  87. }
  88. if (p->type == MDPipelineType::Render) {
  89. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  90. MDRenderPipeline *rp = (MDRenderPipeline *)p;
  91. if (render.encoder == nil) {
  92. // This error would happen if the render pass failed.
  93. ERR_FAIL_NULL_MSG(render.desc, "Render pass descriptor is null.");
  94. // This condition occurs when there are no attachments when calling render_next_subpass()
  95. // and is due to the SUPPORTS_FRAGMENT_SHADER_WITH_ONLY_SIDE_EFFECTS flag.
  96. render.desc.defaultRasterSampleCount = static_cast<NSUInteger>(rp->sample_count);
  97. // NOTE(sgc): This is to test rdar://FB13605547 and will be deleted once fix is confirmed.
  98. #if 0
  99. if (render.pipeline->sample_count == 4) {
  100. static id<MTLTexture> tex = nil;
  101. static id<MTLTexture> res_tex = nil;
  102. static dispatch_once_t onceToken;
  103. dispatch_once(&onceToken, ^{
  104. Size2i sz = render.frameBuffer->size;
  105. MTLTextureDescriptor *td = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatRGBA8Unorm width:sz.width height:sz.height mipmapped:NO];
  106. td.textureType = MTLTextureType2DMultisample;
  107. td.storageMode = MTLStorageModeMemoryless;
  108. td.usage = MTLTextureUsageRenderTarget;
  109. td.sampleCount = render.pipeline->sample_count;
  110. tex = [device_driver->get_device() newTextureWithDescriptor:td];
  111. td.textureType = MTLTextureType2D;
  112. td.storageMode = MTLStorageModePrivate;
  113. td.usage = MTLTextureUsageShaderWrite;
  114. td.sampleCount = 1;
  115. res_tex = [device_driver->get_device() newTextureWithDescriptor:td];
  116. });
  117. render.desc.colorAttachments[0].texture = tex;
  118. render.desc.colorAttachments[0].loadAction = MTLLoadActionClear;
  119. render.desc.colorAttachments[0].storeAction = MTLStoreActionMultisampleResolve;
  120. render.desc.colorAttachments[0].resolveTexture = res_tex;
  121. }
  122. #endif
  123. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:render.desc];
  124. }
  125. if (render.pipeline != rp) {
  126. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_RASTER));
  127. // Mark all uniforms as dirty, as variants of a shader pipeline may have a different entry point ABI,
  128. // due to setting force_active_argument_buffer_resources = true for spirv_cross::CompilerMSL::Options.
  129. // As a result, uniform sets with the same layout will generate redundant binding warnings when
  130. // capturing a Metal frame in Xcode.
  131. //
  132. // If we don't mark as dirty, then some bindings will generate a validation error.
  133. render.mark_uniforms_dirty();
  134. if (render.pipeline != nullptr && render.pipeline->depth_stencil != rp->depth_stencil) {
  135. render.dirty.set_flag(RenderState::DIRTY_DEPTH);
  136. }
  137. if (rp->raster_state.blend.enabled) {
  138. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  139. }
  140. render.pipeline = rp;
  141. }
  142. } else if (p->type == MDPipelineType::Compute) {
  143. DEV_ASSERT(type == MDCommandBufferStateType::None);
  144. type = MDCommandBufferStateType::Compute;
  145. compute.pipeline = (MDComputePipeline *)p;
  146. compute.encoder = commandBuffer.computeCommandEncoder;
  147. [compute.encoder setComputePipelineState:compute.pipeline->state];
  148. }
  149. }
  150. id<MTLBlitCommandEncoder> MDCommandBuffer::blit_command_encoder() {
  151. switch (type) {
  152. case MDCommandBufferStateType::None:
  153. break;
  154. case MDCommandBufferStateType::Render:
  155. render_end_pass();
  156. break;
  157. case MDCommandBufferStateType::Compute:
  158. _end_compute_dispatch();
  159. break;
  160. case MDCommandBufferStateType::Blit:
  161. return blit.encoder;
  162. }
  163. type = MDCommandBufferStateType::Blit;
  164. blit.encoder = commandBuffer.blitCommandEncoder;
  165. return blit.encoder;
  166. }
  167. void MDCommandBuffer::encodeRenderCommandEncoderWithDescriptor(MTLRenderPassDescriptor *p_desc, NSString *p_label) {
  168. switch (type) {
  169. case MDCommandBufferStateType::None:
  170. break;
  171. case MDCommandBufferStateType::Render:
  172. render_end_pass();
  173. break;
  174. case MDCommandBufferStateType::Compute:
  175. _end_compute_dispatch();
  176. break;
  177. case MDCommandBufferStateType::Blit:
  178. _end_blit();
  179. break;
  180. }
  181. id<MTLRenderCommandEncoder> enc = [commandBuffer renderCommandEncoderWithDescriptor:p_desc];
  182. if (p_label != nil) {
  183. [enc pushDebugGroup:p_label];
  184. [enc popDebugGroup];
  185. }
  186. [enc endEncoding];
  187. }
  188. #pragma mark - Render Commands
  189. void MDCommandBuffer::render_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  190. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  191. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  192. if (render.uniform_sets.size() <= p_set_index) {
  193. uint32_t s = render.uniform_sets.size();
  194. render.uniform_sets.resize(p_set_index + 1);
  195. // Set intermediate values to null.
  196. std::fill(&render.uniform_sets[s], &render.uniform_sets[p_set_index] + 1, nullptr);
  197. }
  198. if (render.uniform_sets[p_set_index] != set) {
  199. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  200. render.uniform_set_mask |= 1ULL << p_set_index;
  201. render.uniform_sets[p_set_index] = set;
  202. }
  203. }
  204. void MDCommandBuffer::render_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  205. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  206. for (size_t i = 0; i < p_set_count; ++i) {
  207. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  208. uint32_t index = p_first_set_index + i;
  209. if (render.uniform_sets.size() <= index) {
  210. uint32_t s = render.uniform_sets.size();
  211. render.uniform_sets.resize(index + 1);
  212. // Set intermediate values to null.
  213. std::fill(&render.uniform_sets[s], &render.uniform_sets[index] + 1, nullptr);
  214. }
  215. if (render.uniform_sets[index] != set) {
  216. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  217. render.uniform_set_mask |= 1ULL << index;
  218. render.uniform_sets[index] = set;
  219. }
  220. }
  221. }
  222. void MDCommandBuffer::render_clear_attachments(VectorView<RDD::AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) {
  223. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  224. const MDSubpass &subpass = render.get_subpass();
  225. uint32_t vertex_count = p_rects.size() * 6 * subpass.view_count;
  226. simd::float4 *vertices = ALLOCA_ARRAY(simd::float4, vertex_count);
  227. simd::float4 clear_colors[ClearAttKey::ATTACHMENT_COUNT];
  228. Size2i size = render.frameBuffer->size;
  229. Rect2i render_area = render.clip_to_render_area({ { 0, 0 }, size });
  230. size = Size2i(render_area.position.x + render_area.size.width, render_area.position.y + render_area.size.height);
  231. _populate_vertices(vertices, size, p_rects);
  232. ClearAttKey key;
  233. key.sample_count = render.pass->get_sample_count();
  234. if (subpass.view_count > 1) {
  235. key.enable_layered_rendering();
  236. }
  237. float depth_value = 0;
  238. uint32_t stencil_value = 0;
  239. for (uint32_t i = 0; i < p_attachment_clears.size(); i++) {
  240. RDD::AttachmentClear const &attClear = p_attachment_clears[i];
  241. uint32_t attachment_index;
  242. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  243. attachment_index = attClear.color_attachment;
  244. } else {
  245. attachment_index = subpass.depth_stencil_reference.attachment;
  246. }
  247. MDAttachment const &mda = render.pass->attachments[attachment_index];
  248. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  249. key.set_color_format(attachment_index, mda.format);
  250. clear_colors[attachment_index] = {
  251. attClear.value.color.r,
  252. attClear.value.color.g,
  253. attClear.value.color.b,
  254. attClear.value.color.a
  255. };
  256. }
  257. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT)) {
  258. key.set_depth_format(mda.format);
  259. depth_value = attClear.value.depth;
  260. }
  261. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT)) {
  262. key.set_stencil_format(mda.format);
  263. stencil_value = attClear.value.stencil;
  264. }
  265. }
  266. clear_colors[ClearAttKey::DEPTH_INDEX] = {
  267. depth_value,
  268. depth_value,
  269. depth_value,
  270. depth_value
  271. };
  272. id<MTLRenderCommandEncoder> enc = render.encoder;
  273. MDResourceCache &cache = device_driver->get_resource_cache();
  274. [enc pushDebugGroup:@"ClearAttachments"];
  275. [enc setRenderPipelineState:cache.get_clear_render_pipeline_state(key, nil)];
  276. [enc setDepthStencilState:cache.get_depth_stencil_state(
  277. key.is_depth_enabled(),
  278. key.is_stencil_enabled())];
  279. [enc setStencilReferenceValue:stencil_value];
  280. [enc setCullMode:MTLCullModeNone];
  281. [enc setTriangleFillMode:MTLTriangleFillModeFill];
  282. [enc setDepthBias:0 slopeScale:0 clamp:0];
  283. [enc setViewport:{ 0, 0, (double)size.width, (double)size.height, 0.0, 1.0 }];
  284. [enc setScissorRect:{ 0, 0, (NSUInteger)size.width, (NSUInteger)size.height }];
  285. [enc setVertexBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  286. [enc setFragmentBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  287. [enc setVertexBytes:vertices length:vertex_count * sizeof(vertices[0]) atIndex:device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX)];
  288. [enc drawPrimitives:MTLPrimitiveTypeTriangle vertexStart:0 vertexCount:vertex_count];
  289. [enc popDebugGroup];
  290. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_DEPTH | RenderState::DIRTY_RASTER));
  291. render.mark_uniforms_dirty({ 0 }); // Mark index 0 dirty, if there is already a binding for index 0.
  292. render.mark_viewport_dirty();
  293. render.mark_scissors_dirty();
  294. render.mark_vertex_dirty();
  295. render.mark_blend_dirty();
  296. }
  297. void MDCommandBuffer::_render_set_dirty_state() {
  298. _render_bind_uniform_sets();
  299. MDSubpass const &subpass = render.get_subpass();
  300. if (subpass.view_count > 1) {
  301. uint32_t view_range[2] = { 0, subpass.view_count };
  302. [render.encoder setVertexBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  303. [render.encoder setFragmentBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  304. }
  305. if (render.dirty.has_flag(RenderState::DIRTY_PIPELINE)) {
  306. [render.encoder setRenderPipelineState:render.pipeline->state];
  307. }
  308. if (render.dirty.has_flag(RenderState::DIRTY_VIEWPORT)) {
  309. [render.encoder setViewports:render.viewports.ptr() count:render.viewports.size()];
  310. }
  311. if (render.dirty.has_flag(RenderState::DIRTY_DEPTH)) {
  312. [render.encoder setDepthStencilState:render.pipeline->depth_stencil];
  313. }
  314. if (render.dirty.has_flag(RenderState::DIRTY_RASTER)) {
  315. render.pipeline->raster_state.apply(render.encoder);
  316. }
  317. if (render.dirty.has_flag(RenderState::DIRTY_SCISSOR) && !render.scissors.is_empty()) {
  318. size_t len = render.scissors.size();
  319. MTLScissorRect *rects = ALLOCA_ARRAY(MTLScissorRect, len);
  320. for (size_t i = 0; i < len; i++) {
  321. rects[i] = render.clip_to_render_area(render.scissors[i]);
  322. }
  323. [render.encoder setScissorRects:rects count:len];
  324. }
  325. if (render.dirty.has_flag(RenderState::DIRTY_BLEND) && render.blend_constants.has_value()) {
  326. [render.encoder setBlendColorRed:render.blend_constants->r green:render.blend_constants->g blue:render.blend_constants->b alpha:render.blend_constants->a];
  327. }
  328. if (render.dirty.has_flag(RenderState::DIRTY_VERTEX)) {
  329. uint32_t p_binding_count = render.vertex_buffers.size();
  330. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  331. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  332. offsets:render.vertex_offsets.ptr()
  333. withRange:NSMakeRange(first, p_binding_count)];
  334. }
  335. render.dirty.clear();
  336. }
  337. void MDCommandBuffer::render_set_viewport(VectorView<Rect2i> p_viewports) {
  338. render.viewports.resize(p_viewports.size());
  339. for (uint32_t i = 0; i < p_viewports.size(); i += 1) {
  340. Rect2i const &vp = p_viewports[i];
  341. render.viewports[i] = {
  342. .originX = static_cast<double>(vp.position.x),
  343. .originY = static_cast<double>(vp.position.y),
  344. .width = static_cast<double>(vp.size.width),
  345. .height = static_cast<double>(vp.size.height),
  346. .znear = 0.0,
  347. .zfar = 1.0,
  348. };
  349. }
  350. render.dirty.set_flag(RenderState::DIRTY_VIEWPORT);
  351. }
  352. void MDCommandBuffer::render_set_scissor(VectorView<Rect2i> p_scissors) {
  353. render.scissors.resize(p_scissors.size());
  354. for (uint32_t i = 0; i < p_scissors.size(); i += 1) {
  355. Rect2i const &vp = p_scissors[i];
  356. render.scissors[i] = {
  357. .x = static_cast<NSUInteger>(vp.position.x),
  358. .y = static_cast<NSUInteger>(vp.position.y),
  359. .width = static_cast<NSUInteger>(vp.size.width),
  360. .height = static_cast<NSUInteger>(vp.size.height),
  361. };
  362. }
  363. render.dirty.set_flag(RenderState::DIRTY_SCISSOR);
  364. }
  365. void MDCommandBuffer::render_set_blend_constants(const Color &p_constants) {
  366. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  367. if (render.blend_constants != p_constants) {
  368. render.blend_constants = p_constants;
  369. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  370. }
  371. }
  372. void BoundUniformSet::merge_into(ResourceUsageMap &p_dst) const {
  373. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : usage_to_resources) {
  374. ResourceVector *resources = p_dst.getptr(keyval.key);
  375. if (resources == nullptr) {
  376. resources = &p_dst.insert(keyval.key, ResourceVector())->value;
  377. }
  378. // Reserve space for the new resources, assuming they are all added.
  379. resources->reserve(resources->size() + keyval.value.size());
  380. uint32_t i = 0, j = 0;
  381. __unsafe_unretained id<MTLResource> *resources_ptr = resources->ptr();
  382. const __unsafe_unretained id<MTLResource> *keyval_ptr = keyval.value.ptr();
  383. // 2-way merge.
  384. while (i < resources->size() && j < keyval.value.size()) {
  385. if (resources_ptr[i] < keyval_ptr[j]) {
  386. i++;
  387. } else if (resources_ptr[i] > keyval_ptr[j]) {
  388. resources->insert(i, keyval_ptr[j]);
  389. i++;
  390. j++;
  391. } else {
  392. i++;
  393. j++;
  394. }
  395. }
  396. // Append the remaining resources.
  397. for (; j < keyval.value.size(); j++) {
  398. resources->push_back(keyval_ptr[j]);
  399. }
  400. }
  401. }
  402. void MDCommandBuffer::_render_bind_uniform_sets() {
  403. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  404. if (!render.dirty.has_flag(RenderState::DIRTY_UNIFORMS)) {
  405. return;
  406. }
  407. render.dirty.clear_flag(RenderState::DIRTY_UNIFORMS);
  408. uint64_t set_uniforms = render.uniform_set_mask;
  409. render.uniform_set_mask = 0;
  410. MDRenderShader *shader = render.pipeline->shader;
  411. while (set_uniforms != 0) {
  412. // Find the index of the next set bit.
  413. uint32_t index = (uint32_t)__builtin_ctzll(set_uniforms);
  414. // Clear the set bit.
  415. set_uniforms &= (set_uniforms - 1);
  416. MDUniformSet *set = render.uniform_sets[index];
  417. if (set == nullptr || index >= (uint32_t)shader->sets.size()) {
  418. continue;
  419. }
  420. set->bind_uniforms(shader, render, index);
  421. }
  422. }
  423. void MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, Size2i p_fb_size, VectorView<Rect2i> p_rects) {
  424. uint32_t idx = 0;
  425. for (uint32_t i = 0; i < p_rects.size(); i++) {
  426. Rect2i const &rect = p_rects[i];
  427. idx = _populate_vertices(p_vertices, idx, rect, p_fb_size);
  428. }
  429. }
  430. uint32_t MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, uint32_t p_index, Rect2i const &p_rect, Size2i p_fb_size) {
  431. // Determine the positions of the four edges of the
  432. // clear rectangle as a fraction of the attachment size.
  433. float leftPos = (float)(p_rect.position.x) / (float)p_fb_size.width;
  434. float rightPos = (float)(p_rect.size.width) / (float)p_fb_size.width + leftPos;
  435. float bottomPos = (float)(p_rect.position.y) / (float)p_fb_size.height;
  436. float topPos = (float)(p_rect.size.height) / (float)p_fb_size.height + bottomPos;
  437. // Transform to clip-space coordinates, which are bounded by (-1.0 < p < 1.0) in clip-space.
  438. leftPos = (leftPos * 2.0f) - 1.0f;
  439. rightPos = (rightPos * 2.0f) - 1.0f;
  440. bottomPos = (bottomPos * 2.0f) - 1.0f;
  441. topPos = (topPos * 2.0f) - 1.0f;
  442. simd::float4 vtx;
  443. uint32_t idx = p_index;
  444. uint32_t endLayer = render.get_subpass().view_count;
  445. for (uint32_t layer = 0; layer < endLayer; layer++) {
  446. vtx.z = 0.0;
  447. vtx.w = (float)layer;
  448. // Top left vertex - First triangle.
  449. vtx.y = topPos;
  450. vtx.x = leftPos;
  451. p_vertices[idx++] = vtx;
  452. // Bottom left vertex.
  453. vtx.y = bottomPos;
  454. vtx.x = leftPos;
  455. p_vertices[idx++] = vtx;
  456. // Bottom right vertex.
  457. vtx.y = bottomPos;
  458. vtx.x = rightPos;
  459. p_vertices[idx++] = vtx;
  460. // Bottom right vertex - Second triangle.
  461. p_vertices[idx++] = vtx;
  462. // Top right vertex.
  463. vtx.y = topPos;
  464. vtx.x = rightPos;
  465. p_vertices[idx++] = vtx;
  466. // Top left vertex.
  467. vtx.y = topPos;
  468. vtx.x = leftPos;
  469. p_vertices[idx++] = vtx;
  470. }
  471. return idx;
  472. }
  473. void MDCommandBuffer::render_begin_pass(RDD::RenderPassID p_render_pass, RDD::FramebufferID p_frameBuffer, RDD::CommandBufferType p_cmd_buffer_type, const Rect2i &p_rect, VectorView<RDD::RenderPassClearValue> p_clear_values) {
  474. DEV_ASSERT(commandBuffer != nil);
  475. end();
  476. MDRenderPass *pass = (MDRenderPass *)(p_render_pass.id);
  477. MDFrameBuffer *fb = (MDFrameBuffer *)(p_frameBuffer.id);
  478. type = MDCommandBufferStateType::Render;
  479. render.pass = pass;
  480. render.current_subpass = UINT32_MAX;
  481. render.render_area = p_rect;
  482. render.clear_values.resize(p_clear_values.size());
  483. for (uint32_t i = 0; i < p_clear_values.size(); i++) {
  484. render.clear_values[i] = p_clear_values[i];
  485. }
  486. render.is_rendering_entire_area = (p_rect.position == Point2i(0, 0)) && p_rect.size == fb->size;
  487. render.frameBuffer = fb;
  488. render_next_subpass();
  489. }
  490. void MDCommandBuffer::_end_render_pass() {
  491. MDFrameBuffer const &fb_info = *render.frameBuffer;
  492. MDSubpass const &subpass = render.get_subpass();
  493. PixelFormats &pf = device_driver->get_pixel_formats();
  494. for (uint32_t i = 0; i < subpass.resolve_references.size(); i++) {
  495. uint32_t color_index = subpass.color_references[i].attachment;
  496. uint32_t resolve_index = subpass.resolve_references[i].attachment;
  497. DEV_ASSERT((color_index == RDD::AttachmentReference::UNUSED) == (resolve_index == RDD::AttachmentReference::UNUSED));
  498. if (color_index == RDD::AttachmentReference::UNUSED || !fb_info.has_texture(color_index)) {
  499. continue;
  500. }
  501. id<MTLTexture> resolve_tex = fb_info.get_texture(resolve_index);
  502. CRASH_COND_MSG(!flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve), "not implemented: unresolvable texture types");
  503. // see: https://github.com/KhronosGroup/MoltenVK/blob/d20d13fe2735adb845636a81522df1b9d89c0fba/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm#L407
  504. }
  505. render.end_encoding();
  506. }
  507. void MDCommandBuffer::_render_clear_render_area() {
  508. MDRenderPass const &pass = *render.pass;
  509. MDSubpass const &subpass = render.get_subpass();
  510. uint32_t ds_index = subpass.depth_stencil_reference.attachment;
  511. bool clear_depth = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, false));
  512. bool clear_stencil = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, true));
  513. uint32_t color_count = subpass.color_references.size();
  514. uint32_t clears_size = color_count + (clear_depth || clear_stencil ? 1 : 0);
  515. if (clears_size == 0) {
  516. return;
  517. }
  518. RDD::AttachmentClear *clears = ALLOCA_ARRAY(RDD::AttachmentClear, clears_size);
  519. uint32_t clears_count = 0;
  520. for (uint32_t i = 0; i < color_count; i++) {
  521. uint32_t idx = subpass.color_references[i].attachment;
  522. if (idx != RDD::AttachmentReference::UNUSED && pass.attachments[idx].shouldClear(subpass, false)) {
  523. clears[clears_count++] = { .aspect = RDD::TEXTURE_ASPECT_COLOR_BIT, .color_attachment = idx, .value = render.clear_values[idx] };
  524. }
  525. }
  526. if (clear_depth || clear_stencil) {
  527. MDAttachment const &attachment = pass.attachments[ds_index];
  528. BitField<RDD::TextureAspectBits> bits = {};
  529. if (clear_depth && attachment.type & MDAttachmentType::Depth) {
  530. bits.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  531. }
  532. if (clear_stencil && attachment.type & MDAttachmentType::Stencil) {
  533. bits.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
  534. }
  535. clears[clears_count++] = { .aspect = bits, .color_attachment = ds_index, .value = render.clear_values[ds_index] };
  536. }
  537. if (clears_count == 0) {
  538. return;
  539. }
  540. render_clear_attachments(VectorView(clears, clears_count), { render.render_area });
  541. }
  542. void MDCommandBuffer::render_next_subpass() {
  543. DEV_ASSERT(commandBuffer != nil);
  544. if (render.current_subpass == UINT32_MAX) {
  545. render.current_subpass = 0;
  546. } else {
  547. _end_render_pass();
  548. render.current_subpass++;
  549. }
  550. MDFrameBuffer const &fb = *render.frameBuffer;
  551. MDRenderPass const &pass = *render.pass;
  552. MDSubpass const &subpass = render.get_subpass();
  553. MTLRenderPassDescriptor *desc = MTLRenderPassDescriptor.renderPassDescriptor;
  554. if (subpass.view_count > 1) {
  555. desc.renderTargetArrayLength = subpass.view_count;
  556. }
  557. PixelFormats &pf = device_driver->get_pixel_formats();
  558. uint32_t attachmentCount = 0;
  559. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  560. uint32_t idx = subpass.color_references[i].attachment;
  561. if (idx == RDD::AttachmentReference::UNUSED) {
  562. continue;
  563. }
  564. attachmentCount += 1;
  565. MTLRenderPassColorAttachmentDescriptor *ca = desc.colorAttachments[i];
  566. uint32_t resolveIdx = subpass.resolve_references.is_empty() ? RDD::AttachmentReference::UNUSED : subpass.resolve_references[i].attachment;
  567. bool has_resolve = resolveIdx != RDD::AttachmentReference::UNUSED;
  568. bool can_resolve = true;
  569. if (resolveIdx != RDD::AttachmentReference::UNUSED) {
  570. id<MTLTexture> resolve_tex = fb.get_texture(resolveIdx);
  571. can_resolve = flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve);
  572. if (can_resolve) {
  573. ca.resolveTexture = resolve_tex;
  574. } else {
  575. CRASH_NOW_MSG("unimplemented: using a texture format that is not supported for resolve");
  576. }
  577. }
  578. MDAttachment const &attachment = pass.attachments[idx];
  579. id<MTLTexture> tex = fb.get_texture(idx);
  580. ERR_FAIL_NULL_MSG(tex, "Frame buffer color texture is null.");
  581. if ((attachment.type & MDAttachmentType::Color)) {
  582. if (attachment.configureDescriptor(ca, pf, subpass, tex, render.is_rendering_entire_area, has_resolve, can_resolve, false)) {
  583. Color clearColor = render.clear_values[idx].color;
  584. ca.clearColor = MTLClearColorMake(clearColor.r, clearColor.g, clearColor.b, clearColor.a);
  585. }
  586. }
  587. }
  588. if (subpass.depth_stencil_reference.attachment != RDD::AttachmentReference::UNUSED) {
  589. attachmentCount += 1;
  590. uint32_t idx = subpass.depth_stencil_reference.attachment;
  591. MDAttachment const &attachment = pass.attachments[idx];
  592. id<MTLTexture> tex = fb.get_texture(idx);
  593. ERR_FAIL_NULL_MSG(tex, "Frame buffer depth / stencil texture is null.");
  594. if (attachment.type & MDAttachmentType::Depth) {
  595. MTLRenderPassDepthAttachmentDescriptor *da = desc.depthAttachment;
  596. if (attachment.configureDescriptor(da, pf, subpass, tex, render.is_rendering_entire_area, false, false, false)) {
  597. da.clearDepth = render.clear_values[idx].depth;
  598. }
  599. }
  600. if (attachment.type & MDAttachmentType::Stencil) {
  601. MTLRenderPassStencilAttachmentDescriptor *sa = desc.stencilAttachment;
  602. if (attachment.configureDescriptor(sa, pf, subpass, tex, render.is_rendering_entire_area, false, false, true)) {
  603. sa.clearStencil = render.clear_values[idx].stencil;
  604. }
  605. }
  606. }
  607. desc.renderTargetWidth = MAX((NSUInteger)MIN(render.render_area.position.x + render.render_area.size.width, fb.size.width), 1u);
  608. desc.renderTargetHeight = MAX((NSUInteger)MIN(render.render_area.position.y + render.render_area.size.height, fb.size.height), 1u);
  609. if (attachmentCount == 0) {
  610. // If there are no attachments, delay the creation of the encoder,
  611. // so we can use a matching sample count for the pipeline, by setting
  612. // the defaultRasterSampleCount from the pipeline's sample count.
  613. render.desc = desc;
  614. } else {
  615. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:desc];
  616. if (!render.is_rendering_entire_area) {
  617. _render_clear_render_area();
  618. }
  619. // With a new encoder, all state is dirty.
  620. render.dirty.set_flag(RenderState::DIRTY_ALL);
  621. }
  622. }
  623. void MDCommandBuffer::render_draw(uint32_t p_vertex_count,
  624. uint32_t p_instance_count,
  625. uint32_t p_base_vertex,
  626. uint32_t p_first_instance) {
  627. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  628. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  629. _render_set_dirty_state();
  630. MDSubpass const &subpass = render.get_subpass();
  631. if (subpass.view_count > 1) {
  632. p_instance_count *= subpass.view_count;
  633. }
  634. DEV_ASSERT(render.dirty == 0);
  635. id<MTLRenderCommandEncoder> enc = render.encoder;
  636. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  637. vertexStart:p_base_vertex
  638. vertexCount:p_vertex_count
  639. instanceCount:p_instance_count
  640. baseInstance:p_first_instance];
  641. }
  642. void MDCommandBuffer::render_bind_vertex_buffers(uint32_t p_binding_count, const RDD::BufferID *p_buffers, const uint64_t *p_offsets) {
  643. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  644. render.vertex_buffers.resize(p_binding_count);
  645. render.vertex_offsets.resize(p_binding_count);
  646. // Reverse the buffers, as their bindings are assigned in descending order.
  647. for (uint32_t i = 0; i < p_binding_count; i += 1) {
  648. render.vertex_buffers[i] = rid::get(p_buffers[p_binding_count - i - 1]);
  649. render.vertex_offsets[i] = p_offsets[p_binding_count - i - 1];
  650. }
  651. if (render.encoder) {
  652. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  653. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  654. offsets:render.vertex_offsets.ptr()
  655. withRange:NSMakeRange(first, p_binding_count)];
  656. render.dirty.clear_flag(RenderState::DIRTY_VERTEX);
  657. } else {
  658. render.dirty.set_flag(RenderState::DIRTY_VERTEX);
  659. }
  660. }
  661. void MDCommandBuffer::render_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint64_t p_offset) {
  662. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  663. render.index_buffer = rid::get(p_buffer);
  664. render.index_type = p_format == RDD::IndexBufferFormat::INDEX_BUFFER_FORMAT_UINT16 ? MTLIndexTypeUInt16 : MTLIndexTypeUInt32;
  665. render.index_offset = p_offset;
  666. }
  667. void MDCommandBuffer::render_draw_indexed(uint32_t p_index_count,
  668. uint32_t p_instance_count,
  669. uint32_t p_first_index,
  670. int32_t p_vertex_offset,
  671. uint32_t p_first_instance) {
  672. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  673. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  674. _render_set_dirty_state();
  675. MDSubpass const &subpass = render.get_subpass();
  676. if (subpass.view_count > 1) {
  677. p_instance_count *= subpass.view_count;
  678. }
  679. id<MTLRenderCommandEncoder> enc = render.encoder;
  680. uint32_t index_offset = render.index_offset;
  681. index_offset += p_first_index * (render.index_type == MTLIndexTypeUInt16 ? sizeof(uint16_t) : sizeof(uint32_t));
  682. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  683. indexCount:p_index_count
  684. indexType:render.index_type
  685. indexBuffer:render.index_buffer
  686. indexBufferOffset:index_offset
  687. instanceCount:p_instance_count
  688. baseVertex:p_vertex_offset
  689. baseInstance:p_first_instance];
  690. }
  691. void MDCommandBuffer::render_draw_indexed_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  692. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  693. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  694. _render_set_dirty_state();
  695. id<MTLRenderCommandEncoder> enc = render.encoder;
  696. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  697. NSUInteger indirect_offset = p_offset;
  698. for (uint32_t i = 0; i < p_draw_count; i++) {
  699. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  700. indexType:render.index_type
  701. indexBuffer:render.index_buffer
  702. indexBufferOffset:0
  703. indirectBuffer:indirect_buffer
  704. indirectBufferOffset:indirect_offset];
  705. indirect_offset += p_stride;
  706. }
  707. }
  708. void MDCommandBuffer::render_draw_indexed_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  709. ERR_FAIL_MSG("not implemented");
  710. }
  711. void MDCommandBuffer::render_draw_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  712. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  713. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  714. _render_set_dirty_state();
  715. id<MTLRenderCommandEncoder> enc = render.encoder;
  716. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  717. NSUInteger indirect_offset = p_offset;
  718. for (uint32_t i = 0; i < p_draw_count; i++) {
  719. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  720. indirectBuffer:indirect_buffer
  721. indirectBufferOffset:indirect_offset];
  722. indirect_offset += p_stride;
  723. }
  724. }
  725. void MDCommandBuffer::render_draw_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  726. ERR_FAIL_MSG("not implemented");
  727. }
  728. void MDCommandBuffer::render_end_pass() {
  729. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  730. render.end_encoding();
  731. render.reset();
  732. type = MDCommandBufferStateType::None;
  733. }
  734. #pragma mark - RenderState
  735. void MDCommandBuffer::RenderState::reset() {
  736. pass = nil;
  737. frameBuffer = nil;
  738. pipeline = nil;
  739. current_subpass = UINT32_MAX;
  740. render_area = {};
  741. is_rendering_entire_area = false;
  742. desc = nil;
  743. encoder = nil;
  744. index_buffer = nil;
  745. index_type = MTLIndexTypeUInt16;
  746. dirty = DIRTY_NONE;
  747. uniform_sets.clear();
  748. uniform_set_mask = 0;
  749. clear_values.clear();
  750. viewports.clear();
  751. scissors.clear();
  752. blend_constants.reset();
  753. vertex_buffers.clear();
  754. vertex_offsets.clear();
  755. // Keep the keys, as they are likely to be used again.
  756. for (KeyValue<StageResourceUsage, LocalVector<__unsafe_unretained id<MTLResource>>> &kv : resource_usage) {
  757. kv.value.clear();
  758. }
  759. }
  760. void MDCommandBuffer::RenderState::end_encoding() {
  761. if (encoder == nil) {
  762. return;
  763. }
  764. // Bind all resources.
  765. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  766. if (keyval.value.is_empty()) {
  767. continue;
  768. }
  769. MTLResourceUsage vert_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_VERTEX);
  770. MTLResourceUsage frag_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_FRAGMENT);
  771. if (vert_usage == frag_usage) {
  772. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex | MTLRenderStageFragment];
  773. } else {
  774. if (vert_usage != 0) {
  775. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex];
  776. }
  777. if (frag_usage != 0) {
  778. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:frag_usage stages:MTLRenderStageFragment];
  779. }
  780. }
  781. }
  782. [encoder endEncoding];
  783. encoder = nil;
  784. }
  785. #pragma mark - ComputeState
  786. void MDCommandBuffer::ComputeState::end_encoding() {
  787. if (encoder == nil) {
  788. return;
  789. }
  790. // Bind all resources.
  791. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  792. if (keyval.value.is_empty()) {
  793. continue;
  794. }
  795. MTLResourceUsage usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_COMPUTE);
  796. if (usage != 0) {
  797. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:usage];
  798. }
  799. }
  800. [encoder endEncoding];
  801. encoder = nil;
  802. }
  803. #pragma mark - Compute
  804. void MDCommandBuffer::compute_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  805. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  806. MDShader *shader = (MDShader *)(p_shader.id);
  807. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  808. set->bind_uniforms(shader, compute, p_set_index);
  809. }
  810. void MDCommandBuffer::compute_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  811. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  812. MDShader *shader = (MDShader *)(p_shader.id);
  813. // TODO(sgc): Bind multiple buffers using [encoder setBuffers:offsets:withRange:]
  814. for (size_t i = 0u; i < p_set_count; ++i) {
  815. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  816. set->bind_uniforms(shader, compute, p_first_set_index + i);
  817. }
  818. }
  819. void MDCommandBuffer::compute_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  820. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  821. MTLRegion region = MTLRegionMake3D(0, 0, 0, p_x_groups, p_y_groups, p_z_groups);
  822. id<MTLComputeCommandEncoder> enc = compute.encoder;
  823. [enc dispatchThreadgroups:region.size threadsPerThreadgroup:compute.pipeline->compute_state.local];
  824. }
  825. void MDCommandBuffer::compute_dispatch_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset) {
  826. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  827. id<MTLBuffer> indirectBuffer = rid::get(p_indirect_buffer);
  828. id<MTLComputeCommandEncoder> enc = compute.encoder;
  829. [enc dispatchThreadgroupsWithIndirectBuffer:indirectBuffer indirectBufferOffset:p_offset threadsPerThreadgroup:compute.pipeline->compute_state.local];
  830. }
  831. void MDCommandBuffer::_end_compute_dispatch() {
  832. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  833. compute.end_encoding();
  834. compute.reset();
  835. type = MDCommandBufferStateType::None;
  836. }
  837. void MDCommandBuffer::_end_blit() {
  838. DEV_ASSERT(type == MDCommandBufferStateType::Blit);
  839. [blit.encoder endEncoding];
  840. blit.reset();
  841. type = MDCommandBufferStateType::None;
  842. }
  843. MDComputeShader::MDComputeShader(CharString p_name,
  844. Vector<UniformSet> p_sets,
  845. bool p_uses_argument_buffers,
  846. MDLibrary *p_kernel) :
  847. MDShader(p_name, p_sets, p_uses_argument_buffers), kernel(p_kernel) {
  848. }
  849. void MDComputeShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  850. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Compute);
  851. if (push_constants.binding == (uint32_t)-1) {
  852. return;
  853. }
  854. id<MTLComputeCommandEncoder> enc = p_cb->compute.encoder;
  855. void const *ptr = p_data.ptr();
  856. size_t length = p_data.size() * sizeof(uint32_t);
  857. [enc setBytes:ptr length:length atIndex:push_constants.binding];
  858. }
  859. MDRenderShader::MDRenderShader(CharString p_name,
  860. Vector<UniformSet> p_sets,
  861. bool p_needs_view_mask_buffer,
  862. bool p_uses_argument_buffers,
  863. MDLibrary *_Nonnull p_vert, MDLibrary *_Nonnull p_frag) :
  864. MDShader(p_name, p_sets, p_uses_argument_buffers),
  865. needs_view_mask_buffer(p_needs_view_mask_buffer),
  866. vert(p_vert),
  867. frag(p_frag) {
  868. }
  869. void MDRenderShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  870. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Render);
  871. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_cb->render.encoder;
  872. void const *ptr = p_data.ptr();
  873. size_t length = p_data.size() * sizeof(uint32_t);
  874. if (push_constants.vert.binding > -1) {
  875. [enc setVertexBytes:ptr length:length atIndex:push_constants.vert.binding];
  876. }
  877. if (push_constants.frag.binding > -1) {
  878. [enc setFragmentBytes:ptr length:length atIndex:push_constants.frag.binding];
  879. }
  880. }
  881. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::RenderState &p_state, uint32_t p_set_index) {
  882. DEV_ASSERT(p_shader->uses_argument_buffers);
  883. DEV_ASSERT(p_state.encoder != nil);
  884. UniformSet const &set_info = p_shader->sets[p_set_index];
  885. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  886. id<MTLDevice> __unsafe_unretained device = enc.device;
  887. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage, p_set_index);
  888. // Set the buffer for the vertex stage.
  889. {
  890. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_VERTEX);
  891. if (offset) {
  892. [enc setVertexBuffer:bus.buffer offset:*offset atIndex:p_set_index];
  893. }
  894. }
  895. // Set the buffer for the fragment stage.
  896. {
  897. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_FRAGMENT);
  898. if (offset) {
  899. [enc setFragmentBuffer:bus.buffer offset:*offset atIndex:p_set_index];
  900. }
  901. }
  902. }
  903. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::RenderState &p_state, uint32_t p_set_index) {
  904. DEV_ASSERT(!p_shader->uses_argument_buffers);
  905. DEV_ASSERT(p_state.encoder != nil);
  906. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  907. UniformSet const &set = p_shader->sets[p_set_index];
  908. for (uint32_t i = 0; i < MIN(uniforms.size(), set.uniforms.size()); i++) {
  909. RDD::BoundUniform const &uniform = uniforms[i];
  910. const UniformInfo &ui = set.uniforms[i];
  911. static const RDC::ShaderStage stage_usages[2] = { RDC::ShaderStage::SHADER_STAGE_VERTEX, RDC::ShaderStage::SHADER_STAGE_FRAGMENT };
  912. for (const RDC::ShaderStage stage : stage_usages) {
  913. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  914. const BindingInfo *bi = ui.bindings.getptr(stage);
  915. if (bi == nullptr) {
  916. // No binding for this stage.
  917. continue;
  918. }
  919. if ((ui.active_stages & stage_usage) == 0) {
  920. // Not active for this state, so don't bind anything.
  921. continue;
  922. }
  923. switch (uniform.type) {
  924. case RDD::UNIFORM_TYPE_SAMPLER: {
  925. size_t count = uniform.ids.size();
  926. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  927. for (size_t j = 0; j < count; j += 1) {
  928. objects[j] = rid::get(uniform.ids[j].id);
  929. }
  930. if (stage == RDD::SHADER_STAGE_VERTEX) {
  931. [enc setVertexSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  932. } else {
  933. [enc setFragmentSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  934. }
  935. } break;
  936. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  937. size_t count = uniform.ids.size() / 2;
  938. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  939. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  940. for (uint32_t j = 0; j < count; j += 1) {
  941. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  942. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  943. samplers[j] = sampler;
  944. textures[j] = texture;
  945. }
  946. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  947. if (sbi) {
  948. if (stage == RDD::SHADER_STAGE_VERTEX) {
  949. [enc setVertexSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  950. } else {
  951. [enc setFragmentSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  952. }
  953. }
  954. if (stage == RDD::SHADER_STAGE_VERTEX) {
  955. [enc setVertexTextures:textures withRange:NSMakeRange(bi->index, count)];
  956. } else {
  957. [enc setFragmentTextures:textures withRange:NSMakeRange(bi->index, count)];
  958. }
  959. } break;
  960. case RDD::UNIFORM_TYPE_TEXTURE: {
  961. size_t count = uniform.ids.size();
  962. if (count == 1) {
  963. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  964. if (stage == RDD::SHADER_STAGE_VERTEX) {
  965. [enc setVertexTexture:obj atIndex:bi->index];
  966. } else {
  967. [enc setFragmentTexture:obj atIndex:bi->index];
  968. }
  969. } else {
  970. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  971. for (size_t j = 0; j < count; j += 1) {
  972. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  973. objects[j] = obj;
  974. }
  975. if (stage == RDD::SHADER_STAGE_VERTEX) {
  976. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  977. } else {
  978. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  979. }
  980. }
  981. } break;
  982. case RDD::UNIFORM_TYPE_IMAGE: {
  983. size_t count = uniform.ids.size();
  984. if (count == 1) {
  985. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  986. if (stage == RDD::SHADER_STAGE_VERTEX) {
  987. [enc setVertexTexture:obj atIndex:bi->index];
  988. } else {
  989. [enc setFragmentTexture:obj atIndex:bi->index];
  990. }
  991. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  992. if (sbi) {
  993. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  994. id<MTLBuffer> buf = tex.buffer;
  995. if (buf) {
  996. if (stage == RDD::SHADER_STAGE_VERTEX) {
  997. [enc setVertexBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  998. } else {
  999. [enc setFragmentBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1000. }
  1001. }
  1002. }
  1003. } else {
  1004. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1005. for (size_t j = 0; j < count; j += 1) {
  1006. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1007. objects[j] = obj;
  1008. }
  1009. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1010. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1011. } else {
  1012. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1013. }
  1014. }
  1015. } break;
  1016. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1017. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1018. } break;
  1019. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1020. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1021. } break;
  1022. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1023. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1024. } break;
  1025. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1026. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1027. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1028. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1029. } else {
  1030. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1031. }
  1032. } break;
  1033. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1034. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1035. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1036. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1037. } else {
  1038. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1039. }
  1040. } break;
  1041. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1042. size_t count = uniform.ids.size();
  1043. if (count == 1) {
  1044. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1045. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1046. [enc setVertexTexture:obj atIndex:bi->index];
  1047. } else {
  1048. [enc setFragmentTexture:obj atIndex:bi->index];
  1049. }
  1050. } else {
  1051. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1052. for (size_t j = 0; j < count; j += 1) {
  1053. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1054. objects[j] = obj;
  1055. }
  1056. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1057. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1058. } else {
  1059. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1060. }
  1061. }
  1062. } break;
  1063. default: {
  1064. DEV_ASSERT(false);
  1065. }
  1066. }
  1067. }
  1068. }
  1069. }
  1070. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::RenderState &p_state, uint32_t p_set_index) {
  1071. if (p_shader->uses_argument_buffers) {
  1072. bind_uniforms_argument_buffers(p_shader, p_state, p_set_index);
  1073. } else {
  1074. bind_uniforms_direct(p_shader, p_state, p_set_index);
  1075. }
  1076. }
  1077. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state, uint32_t p_set_index) {
  1078. DEV_ASSERT(p_shader->uses_argument_buffers);
  1079. DEV_ASSERT(p_state.encoder != nil);
  1080. UniformSet const &set_info = p_shader->sets[p_set_index];
  1081. id<MTLComputeCommandEncoder> enc = p_state.encoder;
  1082. id<MTLDevice> device = enc.device;
  1083. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage, p_set_index);
  1084. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_COMPUTE);
  1085. if (offset) {
  1086. [enc setBuffer:bus.buffer offset:*offset atIndex:p_set_index];
  1087. }
  1088. }
  1089. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state, uint32_t p_set_index) {
  1090. DEV_ASSERT(!p_shader->uses_argument_buffers);
  1091. DEV_ASSERT(p_state.encoder != nil);
  1092. id<MTLComputeCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  1093. UniformSet const &set = p_shader->sets[p_set_index];
  1094. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1095. RDD::BoundUniform const &uniform = uniforms[i];
  1096. const UniformInfo &ui = set.uniforms[i];
  1097. const RDC::ShaderStage stage = RDC::ShaderStage::SHADER_STAGE_COMPUTE;
  1098. const ShaderStageUsage stage_usage = ShaderStageUsage(1 << stage);
  1099. const BindingInfo *bi = ui.bindings.getptr(stage);
  1100. if (bi == nullptr) {
  1101. // No binding for this stage.
  1102. continue;
  1103. }
  1104. if ((ui.active_stages & stage_usage) == 0) {
  1105. // Not active for this state, so don't bind anything.
  1106. continue;
  1107. }
  1108. switch (uniform.type) {
  1109. case RDD::UNIFORM_TYPE_SAMPLER: {
  1110. size_t count = uniform.ids.size();
  1111. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1112. for (size_t j = 0; j < count; j += 1) {
  1113. objects[j] = rid::get(uniform.ids[j].id);
  1114. }
  1115. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1116. } break;
  1117. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1118. size_t count = uniform.ids.size() / 2;
  1119. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1120. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1121. for (uint32_t j = 0; j < count; j += 1) {
  1122. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1123. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1124. samplers[j] = sampler;
  1125. textures[j] = texture;
  1126. }
  1127. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1128. if (sbi) {
  1129. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1130. }
  1131. [enc setTextures:textures withRange:NSMakeRange(bi->index, count)];
  1132. } break;
  1133. case RDD::UNIFORM_TYPE_TEXTURE: {
  1134. size_t count = uniform.ids.size();
  1135. if (count == 1) {
  1136. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1137. [enc setTexture:obj atIndex:bi->index];
  1138. } else {
  1139. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1140. for (size_t j = 0; j < count; j += 1) {
  1141. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1142. objects[j] = obj;
  1143. }
  1144. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1145. }
  1146. } break;
  1147. case RDD::UNIFORM_TYPE_IMAGE: {
  1148. size_t count = uniform.ids.size();
  1149. if (count == 1) {
  1150. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1151. [enc setTexture:obj atIndex:bi->index];
  1152. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1153. if (sbi) {
  1154. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1155. id<MTLBuffer> buf = tex.buffer;
  1156. if (buf) {
  1157. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1158. }
  1159. }
  1160. } else {
  1161. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1162. for (size_t j = 0; j < count; j += 1) {
  1163. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1164. objects[j] = obj;
  1165. }
  1166. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1167. }
  1168. } break;
  1169. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1170. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1171. } break;
  1172. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1173. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1174. } break;
  1175. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1176. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1177. } break;
  1178. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1179. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1180. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1181. } break;
  1182. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1183. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1184. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1185. } break;
  1186. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1187. size_t count = uniform.ids.size();
  1188. if (count == 1) {
  1189. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1190. [enc setTexture:obj atIndex:bi->index];
  1191. } else {
  1192. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1193. for (size_t j = 0; j < count; j += 1) {
  1194. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1195. objects[j] = obj;
  1196. }
  1197. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1198. }
  1199. } break;
  1200. default: {
  1201. DEV_ASSERT(false);
  1202. }
  1203. }
  1204. }
  1205. }
  1206. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state, uint32_t p_set_index) {
  1207. if (p_shader->uses_argument_buffers) {
  1208. bind_uniforms_argument_buffers(p_shader, p_state, p_set_index);
  1209. } else {
  1210. bind_uniforms_direct(p_shader, p_state, p_set_index);
  1211. }
  1212. }
  1213. BoundUniformSet &MDUniformSet::bound_uniform_set(MDShader *p_shader, id<MTLDevice> p_device, ResourceUsageMap &p_resource_usage, uint32_t p_set_index) {
  1214. BoundUniformSet *sus = bound_uniforms.getptr(p_shader);
  1215. if (sus != nullptr) {
  1216. sus->merge_into(p_resource_usage);
  1217. return *sus;
  1218. }
  1219. UniformSet const &set = p_shader->sets[p_set_index];
  1220. HashMap<id<MTLResource>, StageResourceUsage> bound_resources;
  1221. auto add_usage = [&bound_resources](id<MTLResource> __unsafe_unretained res, RDD::ShaderStage stage, MTLResourceUsage usage) {
  1222. StageResourceUsage *sru = bound_resources.getptr(res);
  1223. if (sru == nullptr) {
  1224. bound_resources.insert(res, stage_resource_usage(stage, usage));
  1225. } else {
  1226. *sru |= stage_resource_usage(stage, usage);
  1227. }
  1228. };
  1229. id<MTLBuffer> enc_buffer = nil;
  1230. if (set.buffer_size > 0) {
  1231. MTLResourceOptions options = MTLResourceStorageModeShared | MTLResourceHazardTrackingModeTracked;
  1232. enc_buffer = [p_device newBufferWithLength:set.buffer_size options:options];
  1233. for (KeyValue<RDC::ShaderStage, id<MTLArgumentEncoder>> const &kv : set.encoders) {
  1234. RDD::ShaderStage const stage = kv.key;
  1235. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  1236. id<MTLArgumentEncoder> const enc = kv.value;
  1237. [enc setArgumentBuffer:enc_buffer offset:set.offsets[stage]];
  1238. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1239. RDD::BoundUniform const &uniform = uniforms[i];
  1240. const UniformInfo &ui = set.uniforms[i];
  1241. const BindingInfo *bi = ui.bindings.getptr(stage);
  1242. if (bi == nullptr) {
  1243. // No binding for this stage.
  1244. continue;
  1245. }
  1246. if ((ui.active_stages & stage_usage) == 0) {
  1247. // Not active for this state, so don't bind anything.
  1248. continue;
  1249. }
  1250. switch (uniform.type) {
  1251. case RDD::UNIFORM_TYPE_SAMPLER: {
  1252. size_t count = uniform.ids.size();
  1253. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1254. for (size_t j = 0; j < count; j += 1) {
  1255. objects[j] = rid::get(uniform.ids[j].id);
  1256. }
  1257. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1258. } break;
  1259. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1260. size_t count = uniform.ids.size() / 2;
  1261. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1262. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1263. for (uint32_t j = 0; j < count; j += 1) {
  1264. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1265. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1266. samplers[j] = sampler;
  1267. textures[j] = texture;
  1268. add_usage(texture, stage, bi->usage);
  1269. }
  1270. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1271. if (sbi) {
  1272. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1273. }
  1274. [enc setTextures:textures
  1275. withRange:NSMakeRange(bi->index, count)];
  1276. } break;
  1277. case RDD::UNIFORM_TYPE_TEXTURE: {
  1278. size_t count = uniform.ids.size();
  1279. if (count == 1) {
  1280. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1281. [enc setTexture:obj atIndex:bi->index];
  1282. add_usage(obj, stage, bi->usage);
  1283. } else {
  1284. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1285. for (size_t j = 0; j < count; j += 1) {
  1286. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1287. objects[j] = obj;
  1288. add_usage(obj, stage, bi->usage);
  1289. }
  1290. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1291. }
  1292. } break;
  1293. case RDD::UNIFORM_TYPE_IMAGE: {
  1294. size_t count = uniform.ids.size();
  1295. if (count == 1) {
  1296. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1297. [enc setTexture:obj atIndex:bi->index];
  1298. add_usage(obj, stage, bi->usage);
  1299. const BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1300. if (sbi) {
  1301. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1302. id<MTLBuffer> buf = tex.buffer;
  1303. if (buf) {
  1304. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1305. }
  1306. }
  1307. } else {
  1308. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1309. for (size_t j = 0; j < count; j += 1) {
  1310. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1311. objects[j] = obj;
  1312. add_usage(obj, stage, bi->usage);
  1313. }
  1314. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1315. }
  1316. } break;
  1317. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1318. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1319. } break;
  1320. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1321. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1322. } break;
  1323. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1324. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1325. } break;
  1326. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1327. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1328. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1329. add_usage(buffer, stage, bi->usage);
  1330. } break;
  1331. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1332. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1333. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1334. add_usage(buffer, stage, bi->usage);
  1335. } break;
  1336. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1337. size_t count = uniform.ids.size();
  1338. if (count == 1) {
  1339. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1340. [enc setTexture:obj atIndex:bi->index];
  1341. add_usage(obj, stage, bi->usage);
  1342. } else {
  1343. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1344. for (size_t j = 0; j < count; j += 1) {
  1345. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1346. objects[j] = obj;
  1347. add_usage(obj, stage, bi->usage);
  1348. }
  1349. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1350. }
  1351. } break;
  1352. default: {
  1353. DEV_ASSERT(false);
  1354. }
  1355. }
  1356. }
  1357. }
  1358. }
  1359. ResourceUsageMap usage_to_resources;
  1360. for (KeyValue<id<MTLResource>, StageResourceUsage> const &keyval : bound_resources) {
  1361. ResourceVector *resources = usage_to_resources.getptr(keyval.value);
  1362. if (resources == nullptr) {
  1363. resources = &usage_to_resources.insert(keyval.value, ResourceVector())->value;
  1364. }
  1365. int64_t pos = resources->span().bisect(keyval.key, true);
  1366. if (pos == resources->size() || (*resources)[pos] != keyval.key) {
  1367. resources->insert(pos, keyval.key);
  1368. }
  1369. }
  1370. BoundUniformSet bs = { .buffer = enc_buffer, .usage_to_resources = usage_to_resources };
  1371. bound_uniforms.insert(p_shader, bs);
  1372. bs.merge_into(p_resource_usage);
  1373. return bound_uniforms.get(p_shader);
  1374. }
  1375. MTLFmtCaps MDSubpass::getRequiredFmtCapsForAttachmentAt(uint32_t p_index) const {
  1376. MTLFmtCaps caps = kMTLFmtCapsNone;
  1377. for (RDD::AttachmentReference const &ar : input_references) {
  1378. if (ar.attachment == p_index) {
  1379. flags::set(caps, kMTLFmtCapsRead);
  1380. break;
  1381. }
  1382. }
  1383. for (RDD::AttachmentReference const &ar : color_references) {
  1384. if (ar.attachment == p_index) {
  1385. flags::set(caps, kMTLFmtCapsColorAtt);
  1386. break;
  1387. }
  1388. }
  1389. for (RDD::AttachmentReference const &ar : resolve_references) {
  1390. if (ar.attachment == p_index) {
  1391. flags::set(caps, kMTLFmtCapsResolve);
  1392. break;
  1393. }
  1394. }
  1395. if (depth_stencil_reference.attachment == p_index) {
  1396. flags::set(caps, kMTLFmtCapsDSAtt);
  1397. }
  1398. return caps;
  1399. }
  1400. void MDAttachment::linkToSubpass(const MDRenderPass &p_pass) {
  1401. firstUseSubpassIndex = UINT32_MAX;
  1402. lastUseSubpassIndex = 0;
  1403. for (MDSubpass const &subpass : p_pass.subpasses) {
  1404. MTLFmtCaps reqCaps = subpass.getRequiredFmtCapsForAttachmentAt(index);
  1405. if (reqCaps) {
  1406. firstUseSubpassIndex = MIN(subpass.subpass_index, firstUseSubpassIndex);
  1407. lastUseSubpassIndex = MAX(subpass.subpass_index, lastUseSubpassIndex);
  1408. }
  1409. }
  1410. }
  1411. MTLStoreAction MDAttachment::getMTLStoreAction(MDSubpass const &p_subpass,
  1412. bool p_is_rendering_entire_area,
  1413. bool p_has_resolve,
  1414. bool p_can_resolve,
  1415. bool p_is_stencil) const {
  1416. if (!p_is_rendering_entire_area || !isLastUseOf(p_subpass)) {
  1417. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1418. }
  1419. switch (p_is_stencil ? stencilStoreAction : storeAction) {
  1420. case MTLStoreActionStore:
  1421. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1422. case MTLStoreActionDontCare:
  1423. return p_has_resolve ? (p_can_resolve ? MTLStoreActionMultisampleResolve : MTLStoreActionStore) : MTLStoreActionDontCare;
  1424. default:
  1425. return MTLStoreActionStore;
  1426. }
  1427. }
  1428. bool MDAttachment::configureDescriptor(MTLRenderPassAttachmentDescriptor *p_desc,
  1429. PixelFormats &p_pf,
  1430. MDSubpass const &p_subpass,
  1431. id<MTLTexture> p_attachment,
  1432. bool p_is_rendering_entire_area,
  1433. bool p_has_resolve,
  1434. bool p_can_resolve,
  1435. bool p_is_stencil) const {
  1436. p_desc.texture = p_attachment;
  1437. MTLLoadAction load;
  1438. if (!p_is_rendering_entire_area || !isFirstUseOf(p_subpass)) {
  1439. load = MTLLoadActionLoad;
  1440. } else {
  1441. load = p_is_stencil ? stencilLoadAction : loadAction;
  1442. }
  1443. p_desc.loadAction = load;
  1444. MTLPixelFormat mtlFmt = p_attachment.pixelFormat;
  1445. bool isDepthFormat = p_pf.isDepthFormat(mtlFmt);
  1446. bool isStencilFormat = p_pf.isStencilFormat(mtlFmt);
  1447. if (isStencilFormat && !p_is_stencil && !isDepthFormat) {
  1448. p_desc.storeAction = MTLStoreActionDontCare;
  1449. } else {
  1450. p_desc.storeAction = getMTLStoreAction(p_subpass, p_is_rendering_entire_area, p_has_resolve, p_can_resolve, p_is_stencil);
  1451. }
  1452. return load == MTLLoadActionClear;
  1453. }
  1454. bool MDAttachment::shouldClear(const MDSubpass &p_subpass, bool p_is_stencil) const {
  1455. // If the subpass is not the first subpass to use this attachment, don't clear this attachment.
  1456. if (p_subpass.subpass_index != firstUseSubpassIndex) {
  1457. return false;
  1458. }
  1459. return (p_is_stencil ? stencilLoadAction : loadAction) == MTLLoadActionClear;
  1460. }
  1461. MDRenderPass::MDRenderPass(Vector<MDAttachment> &p_attachments, Vector<MDSubpass> &p_subpasses) :
  1462. attachments(p_attachments), subpasses(p_subpasses) {
  1463. for (MDAttachment &att : attachments) {
  1464. att.linkToSubpass(*this);
  1465. }
  1466. }
  1467. #pragma mark - Resource Factory
  1468. id<MTLFunction> MDResourceFactory::new_func(NSString *p_source, NSString *p_name, NSError **p_error) {
  1469. @autoreleasepool {
  1470. NSError *err = nil;
  1471. MTLCompileOptions *options = [MTLCompileOptions new];
  1472. id<MTLDevice> device = device_driver->get_device();
  1473. id<MTLLibrary> mtlLib = [device newLibraryWithSource:p_source
  1474. options:options
  1475. error:&err];
  1476. if (err) {
  1477. if (p_error != nil) {
  1478. *p_error = err;
  1479. }
  1480. }
  1481. return [mtlLib newFunctionWithName:p_name];
  1482. }
  1483. }
  1484. id<MTLFunction> MDResourceFactory::new_clear_vert_func(ClearAttKey &p_key) {
  1485. @autoreleasepool {
  1486. NSString *msl = [NSString stringWithFormat:@R"(
  1487. #include <metal_stdlib>
  1488. using namespace metal;
  1489. typedef struct {
  1490. float4 a_position [[attribute(0)]];
  1491. } AttributesPos;
  1492. typedef struct {
  1493. float4 colors[9];
  1494. } ClearColorsIn;
  1495. typedef struct {
  1496. float4 v_position [[position]];
  1497. uint layer%s;
  1498. } VaryingsPos;
  1499. vertex VaryingsPos vertClear(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1500. VaryingsPos varyings;
  1501. varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[%d].r, 1.0);
  1502. varyings.layer = uint(attributes.a_position.w);
  1503. return varyings;
  1504. }
  1505. )", p_key.is_layered_rendering_enabled() ? " [[render_target_array_index]]" : "", ClearAttKey::DEPTH_INDEX];
  1506. return new_func(msl, @"vertClear", nil);
  1507. }
  1508. }
  1509. id<MTLFunction> MDResourceFactory::new_clear_frag_func(ClearAttKey &p_key) {
  1510. @autoreleasepool {
  1511. NSMutableString *msl = [NSMutableString stringWithCapacity:2048];
  1512. [msl appendFormat:@R"(
  1513. #include <metal_stdlib>
  1514. using namespace metal;
  1515. typedef struct {
  1516. float4 v_position [[position]];
  1517. } VaryingsPos;
  1518. typedef struct {
  1519. float4 colors[9];
  1520. } ClearColorsIn;
  1521. typedef struct {
  1522. )"];
  1523. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1524. if (p_key.is_enabled(caIdx)) {
  1525. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1526. [msl appendFormat:@" %@4 color%u [[color(%u)]];\n", typeStr, caIdx, caIdx];
  1527. }
  1528. }
  1529. [msl appendFormat:@R"(} ClearColorsOut;
  1530. fragment ClearColorsOut fragClear(VaryingsPos varyings [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1531. ClearColorsOut ccOut;
  1532. )"];
  1533. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1534. if (p_key.is_enabled(caIdx)) {
  1535. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1536. [msl appendFormat:@" ccOut.color%u = %@4(ccIn.colors[%u]);\n", caIdx, typeStr, caIdx];
  1537. }
  1538. }
  1539. [msl appendString:@R"( return ccOut;
  1540. })"];
  1541. return new_func(msl, @"fragClear", nil);
  1542. }
  1543. }
  1544. NSString *MDResourceFactory::get_format_type_string(MTLPixelFormat p_fmt) {
  1545. switch (device_driver->get_pixel_formats().getFormatType(p_fmt)) {
  1546. case MTLFormatType::ColorInt8:
  1547. case MTLFormatType::ColorInt16:
  1548. return @"short";
  1549. case MTLFormatType::ColorUInt8:
  1550. case MTLFormatType::ColorUInt16:
  1551. return @"ushort";
  1552. case MTLFormatType::ColorInt32:
  1553. return @"int";
  1554. case MTLFormatType::ColorUInt32:
  1555. return @"uint";
  1556. case MTLFormatType::ColorHalf:
  1557. return @"half";
  1558. case MTLFormatType::ColorFloat:
  1559. case MTLFormatType::DepthStencil:
  1560. case MTLFormatType::Compressed:
  1561. return @"float";
  1562. case MTLFormatType::None:
  1563. return @"unexpected_MTLPixelFormatInvalid";
  1564. }
  1565. }
  1566. id<MTLDepthStencilState> MDResourceFactory::new_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1567. MTLDepthStencilDescriptor *dsDesc = [MTLDepthStencilDescriptor new];
  1568. dsDesc.depthCompareFunction = MTLCompareFunctionAlways;
  1569. dsDesc.depthWriteEnabled = p_use_depth;
  1570. if (p_use_stencil) {
  1571. MTLStencilDescriptor *sDesc = [MTLStencilDescriptor new];
  1572. sDesc.stencilCompareFunction = MTLCompareFunctionAlways;
  1573. sDesc.stencilFailureOperation = MTLStencilOperationReplace;
  1574. sDesc.depthFailureOperation = MTLStencilOperationReplace;
  1575. sDesc.depthStencilPassOperation = MTLStencilOperationReplace;
  1576. dsDesc.frontFaceStencil = sDesc;
  1577. dsDesc.backFaceStencil = sDesc;
  1578. } else {
  1579. dsDesc.frontFaceStencil = nil;
  1580. dsDesc.backFaceStencil = nil;
  1581. }
  1582. return [device_driver->get_device() newDepthStencilStateWithDescriptor:dsDesc];
  1583. }
  1584. id<MTLRenderPipelineState> MDResourceFactory::new_clear_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1585. PixelFormats &pixFmts = device_driver->get_pixel_formats();
  1586. id<MTLFunction> vtxFunc = new_clear_vert_func(p_key);
  1587. id<MTLFunction> fragFunc = new_clear_frag_func(p_key);
  1588. MTLRenderPipelineDescriptor *plDesc = [MTLRenderPipelineDescriptor new];
  1589. plDesc.label = @"ClearRenderAttachments";
  1590. plDesc.vertexFunction = vtxFunc;
  1591. plDesc.fragmentFunction = fragFunc;
  1592. plDesc.rasterSampleCount = p_key.sample_count;
  1593. plDesc.inputPrimitiveTopology = MTLPrimitiveTopologyClassTriangle;
  1594. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1595. MTLRenderPipelineColorAttachmentDescriptor *colorDesc = plDesc.colorAttachments[caIdx];
  1596. colorDesc.pixelFormat = (MTLPixelFormat)p_key.pixel_formats[caIdx];
  1597. colorDesc.writeMask = p_key.is_enabled(caIdx) ? MTLColorWriteMaskAll : MTLColorWriteMaskNone;
  1598. }
  1599. MTLPixelFormat mtlDepthFormat = p_key.depth_format();
  1600. if (pixFmts.isDepthFormat(mtlDepthFormat)) {
  1601. plDesc.depthAttachmentPixelFormat = mtlDepthFormat;
  1602. }
  1603. MTLPixelFormat mtlStencilFormat = p_key.stencil_format();
  1604. if (pixFmts.isStencilFormat(mtlStencilFormat)) {
  1605. plDesc.stencilAttachmentPixelFormat = mtlStencilFormat;
  1606. }
  1607. MTLVertexDescriptor *vtxDesc = plDesc.vertexDescriptor;
  1608. // Vertex attribute descriptors.
  1609. MTLVertexAttributeDescriptorArray *vaDescArray = vtxDesc.attributes;
  1610. MTLVertexAttributeDescriptor *vaDesc;
  1611. NSUInteger vtxBuffIdx = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX);
  1612. NSUInteger vtxStride = 0;
  1613. // Vertex location.
  1614. vaDesc = vaDescArray[0];
  1615. vaDesc.format = MTLVertexFormatFloat4;
  1616. vaDesc.bufferIndex = vtxBuffIdx;
  1617. vaDesc.offset = vtxStride;
  1618. vtxStride += sizeof(simd::float4);
  1619. // Vertex attribute buffer.
  1620. MTLVertexBufferLayoutDescriptorArray *vbDescArray = vtxDesc.layouts;
  1621. MTLVertexBufferLayoutDescriptor *vbDesc = vbDescArray[vtxBuffIdx];
  1622. vbDesc.stepFunction = MTLVertexStepFunctionPerVertex;
  1623. vbDesc.stepRate = 1;
  1624. vbDesc.stride = vtxStride;
  1625. return [device_driver->get_device() newRenderPipelineStateWithDescriptor:plDesc error:p_error];
  1626. }
  1627. id<MTLRenderPipelineState> MDResourceCache::get_clear_render_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1628. HashMap::ConstIterator it = clear_states.find(p_key);
  1629. if (it != clear_states.end()) {
  1630. return it->value;
  1631. }
  1632. id<MTLRenderPipelineState> state = resource_factory->new_clear_pipeline_state(p_key, p_error);
  1633. clear_states[p_key] = state;
  1634. return state;
  1635. }
  1636. id<MTLDepthStencilState> MDResourceCache::get_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1637. id<MTLDepthStencilState> __strong *val;
  1638. if (p_use_depth && p_use_stencil) {
  1639. val = &clear_depth_stencil_state.all;
  1640. } else if (p_use_depth) {
  1641. val = &clear_depth_stencil_state.depth_only;
  1642. } else if (p_use_stencil) {
  1643. val = &clear_depth_stencil_state.stencil_only;
  1644. } else {
  1645. val = &clear_depth_stencil_state.none;
  1646. }
  1647. DEV_ASSERT(val != nullptr);
  1648. if (*val == nil) {
  1649. *val = resource_factory->new_depth_stencil_state(p_use_depth, p_use_stencil);
  1650. }
  1651. return *val;
  1652. }
  1653. static const char *SHADER_STAGE_NAMES[] = {
  1654. [RD::SHADER_STAGE_VERTEX] = "vert",
  1655. [RD::SHADER_STAGE_FRAGMENT] = "frag",
  1656. [RD::SHADER_STAGE_TESSELATION_CONTROL] = "tess_ctrl",
  1657. [RD::SHADER_STAGE_TESSELATION_EVALUATION] = "tess_eval",
  1658. [RD::SHADER_STAGE_COMPUTE] = "comp",
  1659. };
  1660. void ShaderCacheEntry::notify_free() const {
  1661. owner.shader_cache_free_entry(key);
  1662. }
  1663. @interface MDLibrary ()
  1664. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1665. #ifdef DEV_ENABLED
  1666. source:(NSString *)source;
  1667. #endif
  1668. ;
  1669. @end
  1670. /// Loads the MTLLibrary when the library is first accessed.
  1671. @interface MDLazyLibrary : MDLibrary {
  1672. id<MTLLibrary> _library;
  1673. NSError *_error;
  1674. std::shared_mutex _mu;
  1675. bool _loaded;
  1676. id<MTLDevice> _device;
  1677. NSString *_source;
  1678. MTLCompileOptions *_options;
  1679. }
  1680. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1681. device:(id<MTLDevice>)device
  1682. source:(NSString *)source
  1683. options:(MTLCompileOptions *)options;
  1684. @end
  1685. /// Loads the MTLLibrary immediately on initialization, using an asynchronous API.
  1686. @interface MDImmediateLibrary : MDLibrary {
  1687. id<MTLLibrary> _library;
  1688. NSError *_error;
  1689. std::mutex _cv_mutex;
  1690. std::condition_variable _cv;
  1691. std::atomic<bool> _complete;
  1692. bool _ready;
  1693. }
  1694. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1695. device:(id<MTLDevice>)device
  1696. source:(NSString *)source
  1697. options:(MTLCompileOptions *)options;
  1698. @end
  1699. @interface MDBinaryLibrary : MDLibrary {
  1700. id<MTLLibrary> _library;
  1701. NSError *_error;
  1702. }
  1703. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1704. device:(id<MTLDevice>)device
  1705. #ifdef DEV_ENABLED
  1706. source:(NSString *)source
  1707. #endif
  1708. data:(dispatch_data_t)data;
  1709. @end
  1710. @implementation MDLibrary
  1711. + (instancetype)newLibraryWithCacheEntry:(ShaderCacheEntry *)entry
  1712. device:(id<MTLDevice>)device
  1713. source:(NSString *)source
  1714. options:(MTLCompileOptions *)options
  1715. strategy:(ShaderLoadStrategy)strategy {
  1716. switch (strategy) {
  1717. case ShaderLoadStrategy::IMMEDIATE:
  1718. [[fallthrough]];
  1719. default:
  1720. return [[MDImmediateLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1721. case ShaderLoadStrategy::LAZY:
  1722. return [[MDLazyLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1723. }
  1724. }
  1725. + (instancetype)newLibraryWithCacheEntry:(ShaderCacheEntry *)entry
  1726. device:(id<MTLDevice>)device
  1727. #ifdef DEV_ENABLED
  1728. source:(NSString *)source
  1729. #endif
  1730. data:(dispatch_data_t)data {
  1731. return [[MDBinaryLibrary alloc] initWithCacheEntry:entry
  1732. device:device
  1733. #ifdef DEV_ENABLED
  1734. source:source
  1735. #endif
  1736. data:data];
  1737. }
  1738. #ifdef DEV_ENABLED
  1739. - (NSString *)originalSource {
  1740. return _original_source;
  1741. }
  1742. #endif
  1743. - (id<MTLLibrary>)library {
  1744. CRASH_NOW_MSG("Not implemented");
  1745. return nil;
  1746. }
  1747. - (NSError *)error {
  1748. CRASH_NOW_MSG("Not implemented");
  1749. return nil;
  1750. }
  1751. - (void)setLabel:(NSString *)label {
  1752. }
  1753. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1754. #ifdef DEV_ENABLED
  1755. source:(NSString *)source
  1756. #endif
  1757. {
  1758. self = [super init];
  1759. _entry = entry;
  1760. _entry->library = self;
  1761. #ifdef DEV_ENABLED
  1762. _original_source = source;
  1763. #endif
  1764. return self;
  1765. }
  1766. - (void)dealloc {
  1767. _entry->notify_free();
  1768. }
  1769. @end
  1770. @implementation MDImmediateLibrary
  1771. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1772. device:(id<MTLDevice>)device
  1773. source:(NSString *)source
  1774. options:(MTLCompileOptions *)options {
  1775. self = [super initWithCacheEntry:entry
  1776. #ifdef DEV_ENABLED
  1777. source:source
  1778. #endif
  1779. ];
  1780. _complete = false;
  1781. _ready = false;
  1782. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1783. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1784. "shader_name=%{public}s stage=%{public}s hash=%X",
  1785. entry->name.get_data(), SHADER_STAGE_NAMES[entry->stage], entry->key.short_sha());
  1786. [device newLibraryWithSource:source
  1787. options:options
  1788. completionHandler:^(id<MTLLibrary> library, NSError *error) {
  1789. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1790. self->_library = library;
  1791. self->_error = error;
  1792. if (error) {
  1793. ERR_PRINT(vformat(U"Error compiling shader %s: %s", entry->name.get_data(), error.localizedDescription.UTF8String));
  1794. }
  1795. {
  1796. std::lock_guard<std::mutex> lock(self->_cv_mutex);
  1797. _ready = true;
  1798. }
  1799. _cv.notify_all();
  1800. _complete = true;
  1801. }];
  1802. return self;
  1803. }
  1804. - (id<MTLLibrary>)library {
  1805. if (!_complete) {
  1806. std::unique_lock<std::mutex> lock(_cv_mutex);
  1807. _cv.wait(lock, [&] { return _ready; });
  1808. }
  1809. return _library;
  1810. }
  1811. - (NSError *)error {
  1812. if (!_complete) {
  1813. std::unique_lock<std::mutex> lock(_cv_mutex);
  1814. _cv.wait(lock, [&] { return _ready; });
  1815. }
  1816. return _error;
  1817. }
  1818. @end
  1819. @implementation MDLazyLibrary
  1820. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1821. device:(id<MTLDevice>)device
  1822. source:(NSString *)source
  1823. options:(MTLCompileOptions *)options {
  1824. self = [super initWithCacheEntry:entry
  1825. #ifdef DEV_ENABLED
  1826. source:source
  1827. #endif
  1828. ];
  1829. _device = device;
  1830. _source = source;
  1831. _options = options;
  1832. return self;
  1833. }
  1834. - (void)load {
  1835. {
  1836. std::shared_lock<std::shared_mutex> lock(_mu);
  1837. if (_loaded) {
  1838. return;
  1839. }
  1840. }
  1841. std::unique_lock<std::shared_mutex> lock(_mu);
  1842. if (_loaded) {
  1843. return;
  1844. }
  1845. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1846. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1847. "shader_name=%{public}s stage=%{public}s hash=%X",
  1848. _entry->name.get_data(), SHADER_STAGE_NAMES[_entry->stage], _entry->key.short_sha());
  1849. NSError *error;
  1850. _library = [_device newLibraryWithSource:_source options:_options error:&error];
  1851. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1852. _device = nil;
  1853. _source = nil;
  1854. _options = nil;
  1855. _loaded = true;
  1856. }
  1857. - (id<MTLLibrary>)library {
  1858. [self load];
  1859. return _library;
  1860. }
  1861. - (NSError *)error {
  1862. [self load];
  1863. return _error;
  1864. }
  1865. @end
  1866. @implementation MDBinaryLibrary
  1867. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1868. device:(id<MTLDevice>)device
  1869. #ifdef DEV_ENABLED
  1870. source:(NSString *)source
  1871. #endif
  1872. data:(dispatch_data_t)data {
  1873. self = [super initWithCacheEntry:entry
  1874. #ifdef DEV_ENABLED
  1875. source:source
  1876. #endif
  1877. ];
  1878. NSError *error = nil;
  1879. _library = [device newLibraryWithData:data error:&error];
  1880. if (error != nil) {
  1881. _error = error;
  1882. NSString *desc = [error description];
  1883. ERR_PRINT(vformat("Unable to load shader library: %s", desc.UTF8String));
  1884. }
  1885. return self;
  1886. }
  1887. - (id<MTLLibrary>)library {
  1888. return _library;
  1889. }
  1890. - (NSError *)error {
  1891. return _error;
  1892. }
  1893. @end