kernel_camera.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * Copyright 2011-2013 Blender Foundation
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. CCL_NAMESPACE_BEGIN
  17. /* Perspective Camera */
  18. ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
  19. {
  20. float blades = cam->blades;
  21. float2 bokeh;
  22. if (blades == 0.0f) {
  23. /* sample disk */
  24. bokeh = concentric_sample_disk(u, v);
  25. }
  26. else {
  27. /* sample polygon */
  28. float rotation = cam->bladesrotation;
  29. bokeh = regular_polygon_sample(blades, rotation, u, v);
  30. }
  31. /* anamorphic lens bokeh */
  32. bokeh.x *= cam->inv_aperture_ratio;
  33. return bokeh;
  34. }
  35. ccl_device void camera_sample_perspective(KernelGlobals *kg,
  36. float raster_x,
  37. float raster_y,
  38. float lens_u,
  39. float lens_v,
  40. ccl_addr_space Ray *ray)
  41. {
  42. /* create ray form raster position */
  43. ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
  44. float3 raster = make_float3(raster_x, raster_y, 0.0f);
  45. float3 Pcamera = transform_perspective(&rastertocamera, raster);
  46. #ifdef __CAMERA_MOTION__
  47. if (kernel_data.cam.have_perspective_motion) {
  48. /* TODO(sergey): Currently we interpolate projected coordinate which
  49. * gives nice looking result and which is simple, but is in fact a bit
  50. * different comparing to constructing projective matrix from an
  51. * interpolated field of view.
  52. */
  53. if (ray->time < 0.5f) {
  54. ProjectionTransform rastertocamera_pre = kernel_data.cam.perspective_pre;
  55. float3 Pcamera_pre = transform_perspective(&rastertocamera_pre, raster);
  56. Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
  57. }
  58. else {
  59. ProjectionTransform rastertocamera_post = kernel_data.cam.perspective_post;
  60. float3 Pcamera_post = transform_perspective(&rastertocamera_post, raster);
  61. Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
  62. }
  63. }
  64. #endif
  65. float3 P = make_float3(0.0f, 0.0f, 0.0f);
  66. float3 D = Pcamera;
  67. /* modify ray for depth of field */
  68. float aperturesize = kernel_data.cam.aperturesize;
  69. if (aperturesize > 0.0f) {
  70. /* sample point on aperture */
  71. float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
  72. /* compute point on plane of focus */
  73. float ft = kernel_data.cam.focaldistance / D.z;
  74. float3 Pfocus = D * ft;
  75. /* update ray for effect of lens */
  76. P = make_float3(lensuv.x, lensuv.y, 0.0f);
  77. D = normalize(Pfocus - P);
  78. }
  79. /* transform ray from camera to world */
  80. Transform cameratoworld = kernel_data.cam.cameratoworld;
  81. #ifdef __CAMERA_MOTION__
  82. if (kernel_data.cam.num_motion_steps) {
  83. transform_motion_array_interpolate(&cameratoworld,
  84. kernel_tex_array(__camera_motion),
  85. kernel_data.cam.num_motion_steps,
  86. ray->time);
  87. }
  88. #endif
  89. P = transform_point(&cameratoworld, P);
  90. D = normalize(transform_direction(&cameratoworld, D));
  91. bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
  92. if (!use_stereo) {
  93. /* No stereo */
  94. ray->P = P;
  95. ray->D = D;
  96. #ifdef __RAY_DIFFERENTIALS__
  97. float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
  98. ray->dP = differential3_zero();
  99. ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
  100. ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
  101. #endif
  102. }
  103. else {
  104. /* Spherical stereo */
  105. spherical_stereo_transform(&kernel_data.cam, &P, &D);
  106. ray->P = P;
  107. ray->D = D;
  108. #ifdef __RAY_DIFFERENTIALS__
  109. /* Ray differentials, computed from scratch using the raster coordinates
  110. * because we don't want to be affected by depth of field. We compute
  111. * ray origin and direction for the center and two neighboring pixels
  112. * and simply take their differences. */
  113. float3 Pnostereo = transform_point(&cameratoworld, make_float3(0.0f, 0.0f, 0.0f));
  114. float3 Pcenter = Pnostereo;
  115. float3 Dcenter = Pcamera;
  116. Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
  117. spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
  118. float3 Px = Pnostereo;
  119. float3 Dx = transform_perspective(&rastertocamera,
  120. make_float3(raster_x + 1.0f, raster_y, 0.0f));
  121. Dx = normalize(transform_direction(&cameratoworld, Dx));
  122. spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
  123. ray->dP.dx = Px - Pcenter;
  124. ray->dD.dx = Dx - Dcenter;
  125. float3 Py = Pnostereo;
  126. float3 Dy = transform_perspective(&rastertocamera,
  127. make_float3(raster_x, raster_y + 1.0f, 0.0f));
  128. Dy = normalize(transform_direction(&cameratoworld, Dy));
  129. spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
  130. ray->dP.dy = Py - Pcenter;
  131. ray->dD.dy = Dy - Dcenter;
  132. #endif
  133. }
  134. #ifdef __CAMERA_CLIPPING__
  135. /* clipping */
  136. float z_inv = 1.0f / normalize(Pcamera).z;
  137. float nearclip = kernel_data.cam.nearclip * z_inv;
  138. ray->P += nearclip * ray->D;
  139. ray->dP.dx += nearclip * ray->dD.dx;
  140. ray->dP.dy += nearclip * ray->dD.dy;
  141. ray->t = kernel_data.cam.cliplength * z_inv;
  142. #else
  143. ray->t = FLT_MAX;
  144. #endif
  145. }
  146. /* Orthographic Camera */
  147. ccl_device void camera_sample_orthographic(KernelGlobals *kg,
  148. float raster_x,
  149. float raster_y,
  150. float lens_u,
  151. float lens_v,
  152. ccl_addr_space Ray *ray)
  153. {
  154. /* create ray form raster position */
  155. ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
  156. float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
  157. float3 P;
  158. float3 D = make_float3(0.0f, 0.0f, 1.0f);
  159. /* modify ray for depth of field */
  160. float aperturesize = kernel_data.cam.aperturesize;
  161. if (aperturesize > 0.0f) {
  162. /* sample point on aperture */
  163. float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v) * aperturesize;
  164. /* compute point on plane of focus */
  165. float3 Pfocus = D * kernel_data.cam.focaldistance;
  166. /* update ray for effect of lens */
  167. float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
  168. P = Pcamera + lensuvw;
  169. D = normalize(Pfocus - lensuvw);
  170. }
  171. else {
  172. P = Pcamera;
  173. }
  174. /* transform ray from camera to world */
  175. Transform cameratoworld = kernel_data.cam.cameratoworld;
  176. #ifdef __CAMERA_MOTION__
  177. if (kernel_data.cam.num_motion_steps) {
  178. transform_motion_array_interpolate(&cameratoworld,
  179. kernel_tex_array(__camera_motion),
  180. kernel_data.cam.num_motion_steps,
  181. ray->time);
  182. }
  183. #endif
  184. ray->P = transform_point(&cameratoworld, P);
  185. ray->D = normalize(transform_direction(&cameratoworld, D));
  186. #ifdef __RAY_DIFFERENTIALS__
  187. /* ray differential */
  188. ray->dP.dx = float4_to_float3(kernel_data.cam.dx);
  189. ray->dP.dy = float4_to_float3(kernel_data.cam.dy);
  190. ray->dD = differential3_zero();
  191. #endif
  192. #ifdef __CAMERA_CLIPPING__
  193. /* clipping */
  194. ray->t = kernel_data.cam.cliplength;
  195. #else
  196. ray->t = FLT_MAX;
  197. #endif
  198. }
  199. /* Panorama Camera */
  200. ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
  201. const ccl_global DecomposedTransform *cam_motion,
  202. float raster_x,
  203. float raster_y,
  204. float lens_u,
  205. float lens_v,
  206. ccl_addr_space Ray *ray)
  207. {
  208. ProjectionTransform rastertocamera = cam->rastertocamera;
  209. float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
  210. /* create ray form raster position */
  211. float3 P = make_float3(0.0f, 0.0f, 0.0f);
  212. float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
  213. /* indicates ray should not receive any light, outside of the lens */
  214. if (is_zero(D)) {
  215. ray->t = 0.0f;
  216. return;
  217. }
  218. /* modify ray for depth of field */
  219. float aperturesize = cam->aperturesize;
  220. if (aperturesize > 0.0f) {
  221. /* sample point on aperture */
  222. float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v) * aperturesize;
  223. /* compute point on plane of focus */
  224. float3 Dfocus = normalize(D);
  225. float3 Pfocus = Dfocus * cam->focaldistance;
  226. /* calculate orthonormal coordinates perpendicular to Dfocus */
  227. float3 U, V;
  228. U = normalize(make_float3(1.0f, 0.0f, 0.0f) - Dfocus.x * Dfocus);
  229. V = normalize(cross(Dfocus, U));
  230. /* update ray for effect of lens */
  231. P = U * lensuv.x + V * lensuv.y;
  232. D = normalize(Pfocus - P);
  233. }
  234. /* transform ray from camera to world */
  235. Transform cameratoworld = cam->cameratoworld;
  236. #ifdef __CAMERA_MOTION__
  237. if (cam->num_motion_steps) {
  238. transform_motion_array_interpolate(
  239. &cameratoworld, cam_motion, cam->num_motion_steps, ray->time);
  240. }
  241. #endif
  242. P = transform_point(&cameratoworld, P);
  243. D = normalize(transform_direction(&cameratoworld, D));
  244. /* Stereo transform */
  245. bool use_stereo = cam->interocular_offset != 0.0f;
  246. if (use_stereo) {
  247. spherical_stereo_transform(cam, &P, &D);
  248. }
  249. ray->P = P;
  250. ray->D = D;
  251. #ifdef __RAY_DIFFERENTIALS__
  252. /* Ray differentials, computed from scratch using the raster coordinates
  253. * because we don't want to be affected by depth of field. We compute
  254. * ray origin and direction for the center and two neighboring pixels
  255. * and simply take their differences. */
  256. float3 Pcenter = Pcamera;
  257. float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
  258. Pcenter = transform_point(&cameratoworld, Pcenter);
  259. Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
  260. if (use_stereo) {
  261. spherical_stereo_transform(cam, &Pcenter, &Dcenter);
  262. }
  263. float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
  264. float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
  265. Px = transform_point(&cameratoworld, Px);
  266. Dx = normalize(transform_direction(&cameratoworld, Dx));
  267. if (use_stereo) {
  268. spherical_stereo_transform(cam, &Px, &Dx);
  269. }
  270. ray->dP.dx = Px - Pcenter;
  271. ray->dD.dx = Dx - Dcenter;
  272. float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
  273. float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
  274. Py = transform_point(&cameratoworld, Py);
  275. Dy = normalize(transform_direction(&cameratoworld, Dy));
  276. if (use_stereo) {
  277. spherical_stereo_transform(cam, &Py, &Dy);
  278. }
  279. ray->dP.dy = Py - Pcenter;
  280. ray->dD.dy = Dy - Dcenter;
  281. #endif
  282. #ifdef __CAMERA_CLIPPING__
  283. /* clipping */
  284. float nearclip = cam->nearclip;
  285. ray->P += nearclip * ray->D;
  286. ray->dP.dx += nearclip * ray->dD.dx;
  287. ray->dP.dy += nearclip * ray->dD.dy;
  288. ray->t = cam->cliplength;
  289. #else
  290. ray->t = FLT_MAX;
  291. #endif
  292. }
  293. /* Common */
  294. ccl_device_inline void camera_sample(KernelGlobals *kg,
  295. int x,
  296. int y,
  297. float filter_u,
  298. float filter_v,
  299. float lens_u,
  300. float lens_v,
  301. float time,
  302. ccl_addr_space Ray *ray)
  303. {
  304. /* pixel filter */
  305. int filter_table_offset = kernel_data.film.filter_table_offset;
  306. float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
  307. float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
  308. #ifdef __CAMERA_MOTION__
  309. /* motion blur */
  310. if (kernel_data.cam.shuttertime == -1.0f) {
  311. ray->time = 0.5f;
  312. }
  313. else {
  314. /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
  315. * effect in use but rolling shutter duration is set to 0.0.
  316. */
  317. const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
  318. ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
  319. /* TODO(sergey): Currently single rolling shutter effect type only
  320. * where scan-lines are acquired from top to bottom and whole scanline
  321. * is acquired at once (no delay in acquisition happens between pixels
  322. * of single scan-line).
  323. *
  324. * Might want to support more models in the future.
  325. */
  326. if (kernel_data.cam.rolling_shutter_type) {
  327. /* Time corresponding to a fully rolling shutter only effect:
  328. * top of the frame is time 0.0, bottom of the frame is time 1.0.
  329. */
  330. const float time = 1.0f - (float)y / kernel_data.cam.height;
  331. const float duration = kernel_data.cam.rolling_shutter_duration;
  332. if (duration != 0.0f) {
  333. /* This isn't fully physical correct, but lets us to have simple
  334. * controls in the interface. The idea here is basically sort of
  335. * linear interpolation between how much rolling shutter effect
  336. * exist on the frame and how much of it is a motion blur effect.
  337. */
  338. ray->time = (ray->time - 0.5f) * duration;
  339. ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
  340. }
  341. else {
  342. ray->time = time;
  343. }
  344. }
  345. }
  346. #endif
  347. /* sample */
  348. if (kernel_data.cam.type == CAMERA_PERSPECTIVE) {
  349. camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
  350. }
  351. else if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
  352. camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
  353. }
  354. else {
  355. const ccl_global DecomposedTransform *cam_motion = kernel_tex_array(__camera_motion);
  356. camera_sample_panorama(&kernel_data.cam, cam_motion, raster_x, raster_y, lens_u, lens_v, ray);
  357. }
  358. }
  359. /* Utilities */
  360. ccl_device_inline float3 camera_position(KernelGlobals *kg)
  361. {
  362. Transform cameratoworld = kernel_data.cam.cameratoworld;
  363. return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
  364. }
  365. ccl_device_inline float camera_distance(KernelGlobals *kg, float3 P)
  366. {
  367. Transform cameratoworld = kernel_data.cam.cameratoworld;
  368. float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
  369. if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
  370. float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
  371. return fabsf(dot((P - camP), camD));
  372. }
  373. else
  374. return len(P - camP);
  375. }
  376. ccl_device_inline float3 camera_direction_from_point(KernelGlobals *kg, float3 P)
  377. {
  378. Transform cameratoworld = kernel_data.cam.cameratoworld;
  379. if (kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
  380. float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
  381. return -camD;
  382. }
  383. else {
  384. float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
  385. return normalize(camP - P);
  386. }
  387. }
  388. ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd, float3 P)
  389. {
  390. if (kernel_data.cam.type != CAMERA_PANORAMA) {
  391. /* perspective / ortho */
  392. if (sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
  393. P += camera_position(kg);
  394. ProjectionTransform tfm = kernel_data.cam.worldtondc;
  395. return transform_perspective(&tfm, P);
  396. }
  397. else {
  398. /* panorama */
  399. Transform tfm = kernel_data.cam.worldtocamera;
  400. if (sd->object != OBJECT_NONE)
  401. P = normalize(transform_point(&tfm, P));
  402. else
  403. P = normalize(transform_direction(&tfm, P));
  404. float2 uv = direction_to_panorama(&kernel_data.cam, P);
  405. return make_float3(uv.x, uv.y, 0.0f);
  406. }
  407. }
  408. CCL_NAMESPACE_END