obvh_traversal.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * Copyright 2011-2013 Blender Foundation
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /* This is a template BVH traversal function, where various features can be
  17. * enabled/disabled. This way we can compile optimized versions for each case
  18. * without new features slowing things down.
  19. *
  20. * BVH_INSTANCING: object instancing
  21. * BVH_HAIR: hair curve rendering
  22. * BVH_MOTION: motion blur rendering
  23. */
  24. #if BVH_FEATURE(BVH_HAIR)
  25. # define NODE_INTERSECT obvh_node_intersect
  26. #else
  27. # define NODE_INTERSECT obvh_aligned_node_intersect
  28. #endif
  29. ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg,
  30. const Ray *ray,
  31. Intersection *isect,
  32. const uint visibility)
  33. {
  34. /* Traversal stack in CUDA thread-local memory. */
  35. OBVHStackItem traversal_stack[BVH_OSTACK_SIZE];
  36. traversal_stack[0].addr = ENTRYPOINT_SENTINEL;
  37. traversal_stack[0].dist = -FLT_MAX;
  38. /* Traversal variables in registers. */
  39. int stack_ptr = 0;
  40. int node_addr = kernel_data.bvh.root;
  41. float node_dist = -FLT_MAX;
  42. /* Ray parameters in registers. */
  43. float3 P = ray->P;
  44. float3 dir = bvh_clamp_direction(ray->D);
  45. float3 idir = bvh_inverse_direction(dir);
  46. int object = OBJECT_NONE;
  47. #if BVH_FEATURE(BVH_MOTION)
  48. Transform ob_itfm;
  49. #endif
  50. isect->t = ray->t;
  51. isect->u = 0.0f;
  52. isect->v = 0.0f;
  53. isect->prim = PRIM_NONE;
  54. isect->object = OBJECT_NONE;
  55. BVH_DEBUG_INIT();
  56. avxf tnear(0.0f), tfar(ray->t);
  57. #if BVH_FEATURE(BVH_HAIR)
  58. avx3f dir4(avxf(dir.x), avxf(dir.y), avxf(dir.z));
  59. #endif
  60. avx3f idir4(avxf(idir.x), avxf(idir.y), avxf(idir.z));
  61. #ifdef __KERNEL_AVX2__
  62. float3 P_idir = P * idir;
  63. avx3f P_idir4 = avx3f(P_idir.x, P_idir.y, P_idir.z);
  64. #endif
  65. #if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
  66. avx3f org4 = avx3f(avxf(P.x), avxf(P.y), avxf(P.z));
  67. #endif
  68. /* Offsets to select the side that becomes the lower or upper bound. */
  69. int near_x, near_y, near_z;
  70. int far_x, far_y, far_z;
  71. obvh_near_far_idx_calc(idir, &near_x, &near_y, &near_z, &far_x, &far_y, &far_z);
  72. /* Traversal loop. */
  73. do {
  74. do {
  75. /* Traverse internal nodes. */
  76. while (node_addr >= 0 && node_addr != ENTRYPOINT_SENTINEL) {
  77. float4 inodes = kernel_tex_fetch(__bvh_nodes, node_addr + 0);
  78. (void)inodes;
  79. if (UNLIKELY(node_dist > isect->t)
  80. #if BVH_FEATURE(BVH_MOTION)
  81. || UNLIKELY(ray->time < inodes.y) || UNLIKELY(ray->time > inodes.z)
  82. #endif
  83. #ifdef __VISIBILITY_FLAG__
  84. || (__float_as_uint(inodes.x) & visibility) == 0
  85. #endif
  86. ) {
  87. /* Pop. */
  88. node_addr = traversal_stack[stack_ptr].addr;
  89. node_dist = traversal_stack[stack_ptr].dist;
  90. --stack_ptr;
  91. continue;
  92. }
  93. int child_mask;
  94. avxf dist;
  95. BVH_DEBUG_NEXT_NODE();
  96. {
  97. child_mask = NODE_INTERSECT(kg,
  98. tnear,
  99. tfar,
  100. #ifdef __KERNEL_AVX2__
  101. P_idir4,
  102. #endif
  103. #if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
  104. org4,
  105. #endif
  106. #if BVH_FEATURE(BVH_HAIR)
  107. dir4,
  108. #endif
  109. idir4,
  110. near_x,
  111. near_y,
  112. near_z,
  113. far_x,
  114. far_y,
  115. far_z,
  116. node_addr,
  117. &dist);
  118. }
  119. if (child_mask != 0) {
  120. avxf cnodes;
  121. /* TODO(sergey): Investigate whether moving cnodes upwards
  122. * gives a speedup (will be different cache pattern but will
  123. * avoid extra check here).
  124. */
  125. #if BVH_FEATURE(BVH_HAIR)
  126. if (__float_as_uint(inodes.x) & PATH_RAY_NODE_UNALIGNED) {
  127. cnodes = kernel_tex_fetch_avxf(__bvh_nodes, node_addr + 26);
  128. }
  129. else
  130. #endif
  131. {
  132. cnodes = kernel_tex_fetch_avxf(__bvh_nodes, node_addr + 14);
  133. }
  134. /* One child is hit, continue with that child. */
  135. int r = __bscf(child_mask);
  136. float d0 = ((float *)&dist)[r];
  137. if (child_mask == 0) {
  138. node_addr = __float_as_int(cnodes[r]);
  139. node_dist = d0;
  140. continue;
  141. }
  142. /* Two children are hit, push far child, and continue with
  143. * closer child.
  144. */
  145. int c0 = __float_as_int(cnodes[r]);
  146. r = __bscf(child_mask);
  147. int c1 = __float_as_int(cnodes[r]);
  148. float d1 = ((float *)&dist)[r];
  149. if (child_mask == 0) {
  150. if (d1 < d0) {
  151. node_addr = c1;
  152. node_dist = d1;
  153. ++stack_ptr;
  154. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  155. traversal_stack[stack_ptr].addr = c0;
  156. traversal_stack[stack_ptr].dist = d0;
  157. continue;
  158. }
  159. else {
  160. node_addr = c0;
  161. node_dist = d0;
  162. ++stack_ptr;
  163. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  164. traversal_stack[stack_ptr].addr = c1;
  165. traversal_stack[stack_ptr].dist = d1;
  166. continue;
  167. }
  168. }
  169. /* Here starts the slow path for 3 or 4 hit children. We push
  170. * all nodes onto the stack to sort them there.
  171. */
  172. ++stack_ptr;
  173. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  174. traversal_stack[stack_ptr].addr = c1;
  175. traversal_stack[stack_ptr].dist = d1;
  176. ++stack_ptr;
  177. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  178. traversal_stack[stack_ptr].addr = c0;
  179. traversal_stack[stack_ptr].dist = d0;
  180. /* Three children are hit, push all onto stack and sort 3
  181. * stack items, continue with closest child.
  182. */
  183. r = __bscf(child_mask);
  184. int c2 = __float_as_int(cnodes[r]);
  185. float d2 = ((float *)&dist)[r];
  186. if (child_mask == 0) {
  187. ++stack_ptr;
  188. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  189. traversal_stack[stack_ptr].addr = c2;
  190. traversal_stack[stack_ptr].dist = d2;
  191. obvh_stack_sort(&traversal_stack[stack_ptr],
  192. &traversal_stack[stack_ptr - 1],
  193. &traversal_stack[stack_ptr - 2]);
  194. node_addr = traversal_stack[stack_ptr].addr;
  195. node_dist = traversal_stack[stack_ptr].dist;
  196. --stack_ptr;
  197. continue;
  198. }
  199. /* Four children are hit, push all onto stack and sort 4
  200. * stack items, continue with closest child.
  201. */
  202. r = __bscf(child_mask);
  203. int c3 = __float_as_int(cnodes[r]);
  204. float d3 = ((float *)&dist)[r];
  205. if (child_mask == 0) {
  206. ++stack_ptr;
  207. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  208. traversal_stack[stack_ptr].addr = c3;
  209. traversal_stack[stack_ptr].dist = d3;
  210. ++stack_ptr;
  211. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  212. traversal_stack[stack_ptr].addr = c2;
  213. traversal_stack[stack_ptr].dist = d2;
  214. obvh_stack_sort(&traversal_stack[stack_ptr],
  215. &traversal_stack[stack_ptr - 1],
  216. &traversal_stack[stack_ptr - 2],
  217. &traversal_stack[stack_ptr - 3]);
  218. node_addr = traversal_stack[stack_ptr].addr;
  219. node_dist = traversal_stack[stack_ptr].dist;
  220. --stack_ptr;
  221. continue;
  222. }
  223. ++stack_ptr;
  224. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  225. traversal_stack[stack_ptr].addr = c3;
  226. traversal_stack[stack_ptr].dist = d3;
  227. ++stack_ptr;
  228. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  229. traversal_stack[stack_ptr].addr = c2;
  230. traversal_stack[stack_ptr].dist = d2;
  231. /* Five children are hit, push all onto stack and sort 5
  232. * stack items, continue with closest child.
  233. */
  234. r = __bscf(child_mask);
  235. int c4 = __float_as_int(cnodes[r]);
  236. float d4 = ((float *)&dist)[r];
  237. if (child_mask == 0) {
  238. ++stack_ptr;
  239. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  240. traversal_stack[stack_ptr].addr = c4;
  241. traversal_stack[stack_ptr].dist = d4;
  242. obvh_stack_sort(&traversal_stack[stack_ptr],
  243. &traversal_stack[stack_ptr - 1],
  244. &traversal_stack[stack_ptr - 2],
  245. &traversal_stack[stack_ptr - 3],
  246. &traversal_stack[stack_ptr - 4]);
  247. node_addr = traversal_stack[stack_ptr].addr;
  248. node_dist = traversal_stack[stack_ptr].dist;
  249. --stack_ptr;
  250. continue;
  251. }
  252. /* Six children are hit, push all onto stack and sort 6
  253. * stack items, continue with closest child.
  254. */
  255. r = __bscf(child_mask);
  256. int c5 = __float_as_int(cnodes[r]);
  257. float d5 = ((float *)&dist)[r];
  258. if (child_mask == 0) {
  259. ++stack_ptr;
  260. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  261. traversal_stack[stack_ptr].addr = c5;
  262. traversal_stack[stack_ptr].dist = d5;
  263. ++stack_ptr;
  264. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  265. traversal_stack[stack_ptr].addr = c4;
  266. traversal_stack[stack_ptr].dist = d4;
  267. obvh_stack_sort(&traversal_stack[stack_ptr],
  268. &traversal_stack[stack_ptr - 1],
  269. &traversal_stack[stack_ptr - 2],
  270. &traversal_stack[stack_ptr - 3],
  271. &traversal_stack[stack_ptr - 4],
  272. &traversal_stack[stack_ptr - 5]);
  273. node_addr = traversal_stack[stack_ptr].addr;
  274. node_dist = traversal_stack[stack_ptr].dist;
  275. --stack_ptr;
  276. continue;
  277. }
  278. ++stack_ptr;
  279. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  280. traversal_stack[stack_ptr].addr = c5;
  281. traversal_stack[stack_ptr].dist = d5;
  282. ++stack_ptr;
  283. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  284. traversal_stack[stack_ptr].addr = c4;
  285. traversal_stack[stack_ptr].dist = d4;
  286. /* Seven children are hit, push all onto stack and sort 7
  287. * stack items, continue with closest child.
  288. */
  289. r = __bscf(child_mask);
  290. int c6 = __float_as_int(cnodes[r]);
  291. float d6 = ((float *)&dist)[r];
  292. if (child_mask == 0) {
  293. ++stack_ptr;
  294. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  295. traversal_stack[stack_ptr].addr = c6;
  296. traversal_stack[stack_ptr].dist = d6;
  297. obvh_stack_sort(&traversal_stack[stack_ptr],
  298. &traversal_stack[stack_ptr - 1],
  299. &traversal_stack[stack_ptr - 2],
  300. &traversal_stack[stack_ptr - 3],
  301. &traversal_stack[stack_ptr - 4],
  302. &traversal_stack[stack_ptr - 5],
  303. &traversal_stack[stack_ptr - 6]);
  304. node_addr = traversal_stack[stack_ptr].addr;
  305. node_dist = traversal_stack[stack_ptr].dist;
  306. --stack_ptr;
  307. continue;
  308. }
  309. /* Eight children are hit, push all onto stack and sort 8
  310. * stack items, continue with closest child.
  311. */
  312. r = __bscf(child_mask);
  313. int c7 = __float_as_int(cnodes[r]);
  314. float d7 = ((float *)&dist)[r];
  315. ++stack_ptr;
  316. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  317. traversal_stack[stack_ptr].addr = c7;
  318. traversal_stack[stack_ptr].dist = d7;
  319. ++stack_ptr;
  320. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  321. traversal_stack[stack_ptr].addr = c6;
  322. traversal_stack[stack_ptr].dist = d6;
  323. obvh_stack_sort(&traversal_stack[stack_ptr],
  324. &traversal_stack[stack_ptr - 1],
  325. &traversal_stack[stack_ptr - 2],
  326. &traversal_stack[stack_ptr - 3],
  327. &traversal_stack[stack_ptr - 4],
  328. &traversal_stack[stack_ptr - 5],
  329. &traversal_stack[stack_ptr - 6],
  330. &traversal_stack[stack_ptr - 7]);
  331. node_addr = traversal_stack[stack_ptr].addr;
  332. node_dist = traversal_stack[stack_ptr].dist;
  333. --stack_ptr;
  334. continue;
  335. }
  336. node_addr = traversal_stack[stack_ptr].addr;
  337. node_dist = traversal_stack[stack_ptr].dist;
  338. --stack_ptr;
  339. }
  340. /* If node is leaf, fetch triangle list. */
  341. if (node_addr < 0) {
  342. float4 leaf = kernel_tex_fetch(__bvh_leaf_nodes, (-node_addr - 1));
  343. #ifdef __VISIBILITY_FLAG__
  344. if (UNLIKELY((node_dist > isect->t) || ((__float_as_uint(leaf.z) & visibility) == 0)))
  345. #else
  346. if (UNLIKELY((node_dist > isect->t)))
  347. #endif
  348. {
  349. /* Pop. */
  350. node_addr = traversal_stack[stack_ptr].addr;
  351. node_dist = traversal_stack[stack_ptr].dist;
  352. --stack_ptr;
  353. continue;
  354. }
  355. int prim_addr = __float_as_int(leaf.x);
  356. #if BVH_FEATURE(BVH_INSTANCING)
  357. if (prim_addr >= 0) {
  358. #endif
  359. int prim_addr2 = __float_as_int(leaf.y);
  360. const uint type = __float_as_int(leaf.w);
  361. /* Pop. */
  362. node_addr = traversal_stack[stack_ptr].addr;
  363. node_dist = traversal_stack[stack_ptr].dist;
  364. --stack_ptr;
  365. /* Primitive intersection. */
  366. switch (type & PRIMITIVE_ALL) {
  367. case PRIMITIVE_TRIANGLE: {
  368. int prim_count = prim_addr2 - prim_addr;
  369. if (prim_count < 3) {
  370. for (; prim_addr < prim_addr2; prim_addr++) {
  371. BVH_DEBUG_NEXT_INTERSECTION();
  372. kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
  373. if (triangle_intersect(kg, isect, P, dir, visibility, object, prim_addr)) {
  374. tfar = avxf(isect->t);
  375. /* Shadow ray early termination. */
  376. if (visibility == PATH_RAY_SHADOW_OPAQUE) {
  377. return true;
  378. }
  379. }
  380. } // for
  381. }
  382. else {
  383. kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
  384. if (triangle_intersect8(kg,
  385. &isect,
  386. P,
  387. dir,
  388. visibility,
  389. object,
  390. prim_addr,
  391. prim_count,
  392. 0,
  393. 0,
  394. NULL,
  395. 0.0f)) {
  396. tfar = avxf(isect->t);
  397. if (visibility == PATH_RAY_SHADOW_OPAQUE) {
  398. return true;
  399. }
  400. }
  401. } // prim count
  402. break;
  403. }
  404. #if BVH_FEATURE(BVH_MOTION)
  405. case PRIMITIVE_MOTION_TRIANGLE: {
  406. for (; prim_addr < prim_addr2; prim_addr++) {
  407. BVH_DEBUG_NEXT_INTERSECTION();
  408. kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
  409. if (motion_triangle_intersect(
  410. kg, isect, P, dir, ray->time, visibility, object, prim_addr)) {
  411. tfar = avxf(isect->t);
  412. /* Shadow ray early termination. */
  413. if (visibility == PATH_RAY_SHADOW_OPAQUE) {
  414. return true;
  415. }
  416. }
  417. }
  418. break;
  419. }
  420. #endif /* BVH_FEATURE(BVH_MOTION) */
  421. #if BVH_FEATURE(BVH_HAIR)
  422. case PRIMITIVE_CURVE:
  423. case PRIMITIVE_MOTION_CURVE: {
  424. for (; prim_addr < prim_addr2; prim_addr++) {
  425. BVH_DEBUG_NEXT_INTERSECTION();
  426. const uint curve_type = kernel_tex_fetch(__prim_type, prim_addr);
  427. kernel_assert((curve_type & PRIMITIVE_ALL) == (type & PRIMITIVE_ALL));
  428. bool hit;
  429. if (kernel_data.curve.curveflags & CURVE_KN_INTERPOLATE) {
  430. hit = cardinal_curve_intersect(
  431. kg, isect, P, dir, visibility, object, prim_addr, ray->time, curve_type);
  432. }
  433. else {
  434. hit = curve_intersect(
  435. kg, isect, P, dir, visibility, object, prim_addr, ray->time, curve_type);
  436. }
  437. if (hit) {
  438. tfar = avxf(isect->t);
  439. /* Shadow ray early termination. */
  440. if (visibility == PATH_RAY_SHADOW_OPAQUE) {
  441. return true;
  442. }
  443. }
  444. }
  445. break;
  446. }
  447. #endif /* BVH_FEATURE(BVH_HAIR) */
  448. }
  449. }
  450. #if BVH_FEATURE(BVH_INSTANCING)
  451. else {
  452. /* Instance push. */
  453. object = kernel_tex_fetch(__prim_object, -prim_addr - 1);
  454. # if BVH_FEATURE(BVH_MOTION)
  455. qbvh_instance_motion_push(
  456. kg, object, ray, &P, &dir, &idir, &isect->t, &node_dist, &ob_itfm);
  457. # else
  458. qbvh_instance_push(kg, object, ray, &P, &dir, &idir, &isect->t, &node_dist);
  459. # endif
  460. obvh_near_far_idx_calc(idir, &near_x, &near_y, &near_z, &far_x, &far_y, &far_z);
  461. tfar = avxf(isect->t);
  462. # if BVH_FEATURE(BVH_HAIR)
  463. dir4 = avx3f(avxf(dir.x), avxf(dir.y), avxf(dir.z));
  464. # endif
  465. idir4 = avx3f(avxf(idir.x), avxf(idir.y), avxf(idir.z));
  466. # ifdef __KERNEL_AVX2__
  467. P_idir = P * idir;
  468. P_idir4 = avx3f(P_idir.x, P_idir.y, P_idir.z);
  469. # endif
  470. # if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
  471. org4 = avx3f(avxf(P.x), avxf(P.y), avxf(P.z));
  472. # endif
  473. ++stack_ptr;
  474. kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
  475. traversal_stack[stack_ptr].addr = ENTRYPOINT_SENTINEL;
  476. traversal_stack[stack_ptr].dist = -FLT_MAX;
  477. node_addr = kernel_tex_fetch(__object_node, object);
  478. BVH_DEBUG_NEXT_INSTANCE();
  479. }
  480. }
  481. #endif /* FEATURE(BVH_INSTANCING) */
  482. } while (node_addr != ENTRYPOINT_SENTINEL);
  483. #if BVH_FEATURE(BVH_INSTANCING)
  484. if (stack_ptr >= 0) {
  485. kernel_assert(object != OBJECT_NONE);
  486. /* Instance pop. */
  487. # if BVH_FEATURE(BVH_MOTION)
  488. isect->t = bvh_instance_motion_pop(kg, object, ray, &P, &dir, &idir, isect->t, &ob_itfm);
  489. # else
  490. isect->t = bvh_instance_pop(kg, object, ray, &P, &dir, &idir, isect->t);
  491. # endif
  492. obvh_near_far_idx_calc(idir, &near_x, &near_y, &near_z, &far_x, &far_y, &far_z);
  493. tfar = avxf(isect->t);
  494. # if BVH_FEATURE(BVH_HAIR)
  495. dir4 = avx3f(avxf(dir.x), avxf(dir.y), avxf(dir.z));
  496. # endif
  497. idir4 = avx3f(avxf(idir.x), avxf(idir.y), avxf(idir.z));
  498. # ifdef __KERNEL_AVX2__
  499. P_idir = P * idir;
  500. P_idir4 = avx3f(P_idir.x, P_idir.y, P_idir.z);
  501. # endif
  502. # if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
  503. org4 = avx3f(avxf(P.x), avxf(P.y), avxf(P.z));
  504. # endif
  505. object = OBJECT_NONE;
  506. node_addr = traversal_stack[stack_ptr].addr;
  507. node_dist = traversal_stack[stack_ptr].dist;
  508. --stack_ptr;
  509. }
  510. #endif /* FEATURE(BVH_INSTANCING) */
  511. } while (node_addr != ENTRYPOINT_SENTINEL);
  512. return (isect->prim != PRIM_NONE);
  513. }
  514. #undef NODE_INTERSECT