kernel_buffer_update.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /*
  2. * Copyright 2011-2015 Blender Foundation
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. CCL_NAMESPACE_BEGIN
  17. /* This kernel takes care of rays that hit the background (sceneintersect
  18. * kernel), and for the rays of state RAY_UPDATE_BUFFER it updates the ray's
  19. * accumulated radiance in the output buffer. This kernel also takes care of
  20. * rays that have been determined to-be-regenerated.
  21. *
  22. * We will empty QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue in this kernel.
  23. *
  24. * Typically all rays that are in state RAY_HIT_BACKGROUND, RAY_UPDATE_BUFFER
  25. * will be eventually set to RAY_TO_REGENERATE state in this kernel.
  26. * Finally all rays of ray_state RAY_TO_REGENERATE will be regenerated and put
  27. * in queue QUEUE_ACTIVE_AND_REGENERATED_RAYS.
  28. *
  29. * State of queues when this kernel is called:
  30. * At entry,
  31. * - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE rays.
  32. * - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
  33. * RAY_UPDATE_BUFFER, RAY_HIT_BACKGROUND, RAY_TO_REGENERATE rays.
  34. * At exit,
  35. * - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE and
  36. * RAY_REGENERATED rays.
  37. * - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be empty.
  38. */
  39. ccl_device void kernel_buffer_update(KernelGlobals *kg,
  40. ccl_local_param unsigned int *local_queue_atomics)
  41. {
  42. if (ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
  43. *local_queue_atomics = 0;
  44. }
  45. ccl_barrier(CCL_LOCAL_MEM_FENCE);
  46. int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
  47. if (ray_index == 0) {
  48. /* We will empty this queue in this kernel. */
  49. kernel_split_params.queue_index[QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS] = 0;
  50. }
  51. char enqueue_flag = 0;
  52. ray_index = get_ray_index(kg,
  53. ray_index,
  54. QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
  55. kernel_split_state.queue_data,
  56. kernel_split_params.queue_size,
  57. 1);
  58. #ifdef __COMPUTE_DEVICE_GPU__
  59. /* If we are executing on a GPU device, we exit all threads that are not
  60. * required.
  61. *
  62. * If we are executing on a CPU device, then we need to keep all threads
  63. * active since we have barrier() calls later in the kernel. CPU devices,
  64. * expect all threads to execute barrier statement.
  65. */
  66. if (ray_index == QUEUE_EMPTY_SLOT) {
  67. return;
  68. }
  69. #endif
  70. #ifndef __COMPUTE_DEVICE_GPU__
  71. if (ray_index != QUEUE_EMPTY_SLOT) {
  72. #endif
  73. ccl_global char *ray_state = kernel_split_state.ray_state;
  74. ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
  75. PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
  76. ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
  77. ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
  78. bool ray_was_updated = false;
  79. if (IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER)) {
  80. ray_was_updated = true;
  81. uint sample = state->sample;
  82. uint buffer_offset = kernel_split_state.buffer_offset[ray_index];
  83. ccl_global float *buffer = kernel_split_params.tile.buffer + buffer_offset;
  84. /* accumulate result in output buffer */
  85. kernel_write_result(kg, buffer, sample, L);
  86. ASSIGN_RAY_STATE(ray_state, ray_index, RAY_TO_REGENERATE);
  87. }
  88. if (kernel_data.film.cryptomatte_passes) {
  89. /* Make sure no thread is writing to the buffers. */
  90. ccl_barrier(CCL_LOCAL_MEM_FENCE);
  91. if (ray_was_updated && state->sample - 1 == kernel_data.integrator.aa_samples) {
  92. uint buffer_offset = kernel_split_state.buffer_offset[ray_index];
  93. ccl_global float *buffer = kernel_split_params.tile.buffer + buffer_offset;
  94. ccl_global float *cryptomatte_buffer = buffer + kernel_data.film.pass_cryptomatte;
  95. kernel_sort_id_slots(cryptomatte_buffer, 2 * kernel_data.film.cryptomatte_depth);
  96. }
  97. }
  98. if (IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
  99. /* We have completed current work; So get next work */
  100. ccl_global uint *work_pools = kernel_split_params.work_pools;
  101. uint total_work_size = kernel_split_params.total_work_size;
  102. uint work_index;
  103. if (!get_next_work(kg, work_pools, total_work_size, ray_index, &work_index)) {
  104. /* If work is invalid, this means no more work is available and the thread may exit */
  105. ASSIGN_RAY_STATE(ray_state, ray_index, RAY_INACTIVE);
  106. }
  107. if (IS_STATE(ray_state, ray_index, RAY_TO_REGENERATE)) {
  108. ccl_global WorkTile *tile = &kernel_split_params.tile;
  109. uint x, y, sample;
  110. get_work_pixel(tile, work_index, &x, &y, &sample);
  111. /* Store buffer offset for writing to passes. */
  112. uint buffer_offset = (tile->offset + x + y * tile->stride) * kernel_data.film.pass_stride;
  113. kernel_split_state.buffer_offset[ray_index] = buffer_offset;
  114. /* Initialize random numbers and ray. */
  115. uint rng_hash;
  116. kernel_path_trace_setup(kg, sample, x, y, &rng_hash, ray);
  117. if (ray->t != 0.0f) {
  118. /* Initialize throughput, path radiance, Ray, PathState;
  119. * These rays proceed with path-iteration.
  120. */
  121. *throughput = make_float3(1.0f, 1.0f, 1.0f);
  122. path_radiance_init(L, kernel_data.film.use_light_pass);
  123. path_state_init(kg,
  124. AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]),
  125. state,
  126. rng_hash,
  127. sample,
  128. ray);
  129. #ifdef __SUBSURFACE__
  130. kernel_path_subsurface_init_indirect(&kernel_split_state.ss_rays[ray_index]);
  131. #endif
  132. ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
  133. enqueue_flag = 1;
  134. }
  135. else {
  136. ASSIGN_RAY_STATE(ray_state, ray_index, RAY_TO_REGENERATE);
  137. }
  138. }
  139. }
  140. #ifndef __COMPUTE_DEVICE_GPU__
  141. }
  142. #endif
  143. /* Enqueue RAY_REGENERATED rays into QUEUE_ACTIVE_AND_REGENERATED_RAYS;
  144. * These rays will be made active during next SceneIntersectkernel.
  145. */
  146. enqueue_ray_index_local(ray_index,
  147. QUEUE_ACTIVE_AND_REGENERATED_RAYS,
  148. enqueue_flag,
  149. kernel_split_params.queue_size,
  150. local_queue_atomics,
  151. kernel_split_state.queue_data,
  152. kernel_split_params.queue_index);
  153. }
  154. CCL_NAMESPACE_END