kernel.cu 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * Copyright 2011-2013 Blender Foundation
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /* CUDA kernel entry points */
  17. #ifdef __CUDA_ARCH__
  18. #include "kernel/kernel_compat_cuda.h"
  19. #include "kernel_config.h"
  20. #include "util/util_atomic.h"
  21. #include "kernel/kernel_math.h"
  22. #include "kernel/kernel_types.h"
  23. #include "kernel/kernel_globals.h"
  24. #include "kernel/kernel_color.h"
  25. #include "kernel/kernels/cuda/kernel_cuda_image.h"
  26. #include "kernel/kernel_film.h"
  27. #include "kernel/kernel_path.h"
  28. #include "kernel/kernel_path_branched.h"
  29. #include "kernel/kernel_bake.h"
  30. #include "kernel/kernel_work_stealing.h"
  31. /* kernels */
  32. extern "C" __global__ void
  33. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
  34. kernel_cuda_path_trace(WorkTile *tile, uint total_work_size)
  35. {
  36. int work_index = ccl_global_id(0);
  37. bool thread_is_active = work_index < total_work_size;
  38. uint x, y, sample;
  39. KernelGlobals kg;
  40. if(thread_is_active) {
  41. get_work_pixel(tile, work_index, &x, &y, &sample);
  42. kernel_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
  43. }
  44. if(kernel_data.film.cryptomatte_passes) {
  45. __syncthreads();
  46. if(thread_is_active) {
  47. kernel_cryptomatte_post(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
  48. }
  49. }
  50. }
  51. #ifdef __BRANCHED_PATH__
  52. extern "C" __global__ void
  53. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_BRANCHED_MAX_REGISTERS)
  54. kernel_cuda_branched_path_trace(WorkTile *tile, uint total_work_size)
  55. {
  56. int work_index = ccl_global_id(0);
  57. bool thread_is_active = work_index < total_work_size;
  58. uint x, y, sample;
  59. KernelGlobals kg;
  60. if(thread_is_active) {
  61. get_work_pixel(tile, work_index, &x, &y, &sample);
  62. kernel_branched_path_trace(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
  63. }
  64. if(kernel_data.film.cryptomatte_passes) {
  65. __syncthreads();
  66. if(thread_is_active) {
  67. kernel_cryptomatte_post(&kg, tile->buffer, sample, x, y, tile->offset, tile->stride);
  68. }
  69. }
  70. }
  71. #endif
  72. extern "C" __global__ void
  73. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
  74. kernel_cuda_convert_to_byte(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
  75. {
  76. int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
  77. int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
  78. if(x < sx + sw && y < sy + sh) {
  79. kernel_film_convert_to_byte(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
  80. }
  81. }
  82. extern "C" __global__ void
  83. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
  84. kernel_cuda_convert_to_half_float(uchar4 *rgba, float *buffer, float sample_scale, int sx, int sy, int sw, int sh, int offset, int stride)
  85. {
  86. int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
  87. int y = sy + blockDim.y*blockIdx.y + threadIdx.y;
  88. if(x < sx + sw && y < sy + sh) {
  89. kernel_film_convert_to_half_float(NULL, rgba, buffer, sample_scale, x, y, offset, stride);
  90. }
  91. }
  92. extern "C" __global__ void
  93. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
  94. kernel_cuda_displace(uint4 *input,
  95. float4 *output,
  96. int type,
  97. int sx,
  98. int sw,
  99. int offset,
  100. int sample)
  101. {
  102. int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
  103. if(x < sx + sw) {
  104. KernelGlobals kg;
  105. kernel_displace_evaluate(&kg, input, output, x);
  106. }
  107. }
  108. extern "C" __global__ void
  109. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
  110. kernel_cuda_background(uint4 *input,
  111. float4 *output,
  112. int type,
  113. int sx,
  114. int sw,
  115. int offset,
  116. int sample)
  117. {
  118. int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
  119. if(x < sx + sw) {
  120. KernelGlobals kg;
  121. kernel_background_evaluate(&kg, input, output, x);
  122. }
  123. }
  124. #ifdef __BAKING__
  125. extern "C" __global__ void
  126. CUDA_LAUNCH_BOUNDS(CUDA_THREADS_BLOCK_WIDTH, CUDA_KERNEL_MAX_REGISTERS)
  127. kernel_cuda_bake(uint4 *input, float4 *output, int type, int filter, int sx, int sw, int offset, int sample)
  128. {
  129. int x = sx + blockDim.x*blockIdx.x + threadIdx.x;
  130. if(x < sx + sw) {
  131. KernelGlobals kg;
  132. kernel_bake_evaluate(&kg, input, output, (ShaderEvalType)type, filter, x, offset, sample);
  133. }
  134. }
  135. #endif
  136. #endif