camera.cpp 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777
  1. /*
  2. * Copyright 2011-2013 Blender Foundation
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "render/camera.h"
  17. #include "render/mesh.h"
  18. #include "render/object.h"
  19. #include "render/scene.h"
  20. #include "render/tables.h"
  21. #include "device/device.h"
  22. #include "util/util_foreach.h"
  23. #include "util/util_function.h"
  24. #include "util/util_logging.h"
  25. #include "util/util_math_cdf.h"
  26. #include "util/util_vector.h"
  27. /* needed for calculating differentials */
  28. #include "kernel/kernel_compat_cpu.h"
  29. #include "kernel/split/kernel_split_data.h"
  30. #include "kernel/kernel_globals.h"
  31. #include "kernel/kernel_projection.h"
  32. #include "kernel/kernel_differential.h"
  33. #include "kernel/kernel_montecarlo.h"
  34. #include "kernel/kernel_camera.h"
  35. CCL_NAMESPACE_BEGIN
  36. static float shutter_curve_eval(float x, array<float> &shutter_curve)
  37. {
  38. if (shutter_curve.size() == 0) {
  39. return 1.0f;
  40. }
  41. x *= shutter_curve.size();
  42. int index = (int)x;
  43. float frac = x - index;
  44. if (index < shutter_curve.size() - 1) {
  45. return lerp(shutter_curve[index], shutter_curve[index + 1], frac);
  46. }
  47. else {
  48. return shutter_curve[shutter_curve.size() - 1];
  49. }
  50. }
  51. NODE_DEFINE(Camera)
  52. {
  53. NodeType *type = NodeType::add("camera", create);
  54. SOCKET_FLOAT(shuttertime, "Shutter Time", 1.0f);
  55. static NodeEnum motion_position_enum;
  56. motion_position_enum.insert("start", MOTION_POSITION_START);
  57. motion_position_enum.insert("center", MOTION_POSITION_CENTER);
  58. motion_position_enum.insert("end", MOTION_POSITION_END);
  59. SOCKET_ENUM(motion_position, "Motion Position", motion_position_enum, MOTION_POSITION_CENTER);
  60. static NodeEnum rolling_shutter_type_enum;
  61. rolling_shutter_type_enum.insert("none", ROLLING_SHUTTER_NONE);
  62. rolling_shutter_type_enum.insert("top", ROLLING_SHUTTER_TOP);
  63. SOCKET_ENUM(rolling_shutter_type,
  64. "Rolling Shutter Type",
  65. rolling_shutter_type_enum,
  66. ROLLING_SHUTTER_NONE);
  67. SOCKET_FLOAT(rolling_shutter_duration, "Rolling Shutter Duration", 0.1f);
  68. SOCKET_FLOAT_ARRAY(shutter_curve, "Shutter Curve", array<float>());
  69. SOCKET_FLOAT(aperturesize, "Aperture Size", 0.0f);
  70. SOCKET_FLOAT(focaldistance, "Focal Distance", 10.0f);
  71. SOCKET_UINT(blades, "Blades", 0);
  72. SOCKET_FLOAT(bladesrotation, "Blades Rotation", 0.0f);
  73. SOCKET_TRANSFORM(matrix, "Matrix", transform_identity());
  74. SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
  75. SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f);
  76. static NodeEnum type_enum;
  77. type_enum.insert("perspective", CAMERA_PERSPECTIVE);
  78. type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC);
  79. type_enum.insert("panorama", CAMERA_PANORAMA);
  80. SOCKET_ENUM(type, "Type", type_enum, CAMERA_PERSPECTIVE);
  81. static NodeEnum panorama_type_enum;
  82. panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR);
  83. panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL);
  84. panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT);
  85. panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID);
  86. SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR);
  87. SOCKET_FLOAT(fisheye_fov, "Fisheye FOV", M_PI_F);
  88. SOCKET_FLOAT(fisheye_lens, "Fisheye Lens", 10.5f);
  89. SOCKET_FLOAT(latitude_min, "Latitude Min", -M_PI_2_F);
  90. SOCKET_FLOAT(latitude_max, "Latitude Max", M_PI_2_F);
  91. SOCKET_FLOAT(longitude_min, "Longitude Min", -M_PI_F);
  92. SOCKET_FLOAT(longitude_max, "Longitude Max", M_PI_F);
  93. SOCKET_FLOAT(fov, "FOV", M_PI_4_F);
  94. SOCKET_FLOAT(fov_pre, "FOV Pre", M_PI_4_F);
  95. SOCKET_FLOAT(fov_post, "FOV Post", M_PI_4_F);
  96. static NodeEnum stereo_eye_enum;
  97. stereo_eye_enum.insert("none", STEREO_NONE);
  98. stereo_eye_enum.insert("left", STEREO_LEFT);
  99. stereo_eye_enum.insert("right", STEREO_RIGHT);
  100. SOCKET_ENUM(stereo_eye, "Stereo Eye", stereo_eye_enum, STEREO_NONE);
  101. SOCKET_BOOLEAN(use_spherical_stereo, "Use Spherical Stereo", false);
  102. SOCKET_FLOAT(interocular_distance, "Interocular Distance", 0.065f);
  103. SOCKET_FLOAT(convergence_distance, "Convergence Distance", 30.0f * 0.065f);
  104. SOCKET_BOOLEAN(use_pole_merge, "Use Pole Merge", false);
  105. SOCKET_FLOAT(pole_merge_angle_from, "Pole Merge Angle From", 60.0f * M_PI_F / 180.0f);
  106. SOCKET_FLOAT(pole_merge_angle_to, "Pole Merge Angle To", 75.0f * M_PI_F / 180.0f);
  107. SOCKET_FLOAT(sensorwidth, "Sensor Width", 0.036f);
  108. SOCKET_FLOAT(sensorheight, "Sensor Height", 0.024f);
  109. SOCKET_FLOAT(nearclip, "Near Clip", 1e-5f);
  110. SOCKET_FLOAT(farclip, "Far Clip", 1e5f);
  111. SOCKET_FLOAT(viewplane.left, "Viewplane Left", 0);
  112. SOCKET_FLOAT(viewplane.right, "Viewplane Right", 0);
  113. SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0);
  114. SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0);
  115. SOCKET_FLOAT(border.left, "Border Left", 0);
  116. SOCKET_FLOAT(border.right, "Border Right", 0);
  117. SOCKET_FLOAT(border.bottom, "Border Bottom", 0);
  118. SOCKET_FLOAT(border.top, "Border Top", 0);
  119. SOCKET_FLOAT(offscreen_dicing_scale, "Offscreen Dicing Scale", 1.0f);
  120. return type;
  121. }
  122. Camera::Camera() : Node(node_type)
  123. {
  124. shutter_table_offset = TABLE_OFFSET_INVALID;
  125. width = 1024;
  126. height = 512;
  127. resolution = 1;
  128. use_perspective_motion = false;
  129. shutter_curve.resize(RAMP_TABLE_SIZE);
  130. for (int i = 0; i < shutter_curve.size(); ++i) {
  131. shutter_curve[i] = 1.0f;
  132. }
  133. compute_auto_viewplane();
  134. screentoworld = projection_identity();
  135. rastertoworld = projection_identity();
  136. ndctoworld = projection_identity();
  137. rastertocamera = projection_identity();
  138. cameratoworld = transform_identity();
  139. worldtoraster = projection_identity();
  140. full_rastertocamera = projection_identity();
  141. dx = make_float3(0.0f, 0.0f, 0.0f);
  142. dy = make_float3(0.0f, 0.0f, 0.0f);
  143. need_update = true;
  144. need_device_update = true;
  145. need_flags_update = true;
  146. previous_need_motion = -1;
  147. memset((void *)&kernel_camera, 0, sizeof(kernel_camera));
  148. }
  149. Camera::~Camera()
  150. {
  151. }
  152. void Camera::compute_auto_viewplane()
  153. {
  154. if (type == CAMERA_PANORAMA) {
  155. viewplane.left = 0.0f;
  156. viewplane.right = 1.0f;
  157. viewplane.bottom = 0.0f;
  158. viewplane.top = 1.0f;
  159. }
  160. else {
  161. float aspect = (float)width / (float)height;
  162. if (width >= height) {
  163. viewplane.left = -aspect;
  164. viewplane.right = aspect;
  165. viewplane.bottom = -1.0f;
  166. viewplane.top = 1.0f;
  167. }
  168. else {
  169. viewplane.left = -1.0f;
  170. viewplane.right = 1.0f;
  171. viewplane.bottom = -1.0f / aspect;
  172. viewplane.top = 1.0f / aspect;
  173. }
  174. }
  175. }
  176. void Camera::update(Scene *scene)
  177. {
  178. Scene::MotionType need_motion = scene->need_motion();
  179. if (previous_need_motion != need_motion) {
  180. /* scene's motion model could have been changed since previous device
  181. * camera update this could happen for example in case when one render
  182. * layer has got motion pass and another not */
  183. need_device_update = true;
  184. }
  185. if (!need_update)
  186. return;
  187. /* Full viewport to camera border in the viewport. */
  188. Transform fulltoborder = transform_from_viewplane(viewport_camera_border);
  189. Transform bordertofull = transform_inverse(fulltoborder);
  190. /* ndc to raster */
  191. Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull;
  192. Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull;
  193. /* raster to screen */
  194. Transform screentondc = fulltoborder * transform_from_viewplane(viewplane);
  195. Transform screentoraster = ndctoraster * screentondc;
  196. Transform rastertoscreen = transform_inverse(screentoraster);
  197. Transform full_screentoraster = full_ndctoraster * screentondc;
  198. Transform full_rastertoscreen = transform_inverse(full_screentoraster);
  199. /* screen to camera */
  200. ProjectionTransform cameratoscreen;
  201. if (type == CAMERA_PERSPECTIVE)
  202. cameratoscreen = projection_perspective(fov, nearclip, farclip);
  203. else if (type == CAMERA_ORTHOGRAPHIC)
  204. cameratoscreen = projection_orthographic(nearclip, farclip);
  205. else
  206. cameratoscreen = projection_identity();
  207. ProjectionTransform screentocamera = projection_inverse(cameratoscreen);
  208. rastertocamera = screentocamera * rastertoscreen;
  209. full_rastertocamera = screentocamera * full_rastertoscreen;
  210. cameratoraster = screentoraster * cameratoscreen;
  211. cameratoworld = matrix;
  212. screentoworld = cameratoworld * screentocamera;
  213. rastertoworld = cameratoworld * rastertocamera;
  214. ndctoworld = rastertoworld * ndctoraster;
  215. /* note we recompose matrices instead of taking inverses of the above, this
  216. * is needed to avoid inverting near degenerate matrices that happen due to
  217. * precision issues with large scenes */
  218. worldtocamera = transform_inverse(matrix);
  219. worldtoscreen = cameratoscreen * worldtocamera;
  220. worldtondc = screentondc * worldtoscreen;
  221. worldtoraster = ndctoraster * worldtondc;
  222. /* differentials */
  223. if (type == CAMERA_ORTHOGRAPHIC) {
  224. dx = transform_perspective_direction(&rastertocamera, make_float3(1, 0, 0));
  225. dy = transform_perspective_direction(&rastertocamera, make_float3(0, 1, 0));
  226. full_dx = transform_perspective_direction(&full_rastertocamera, make_float3(1, 0, 0));
  227. full_dy = transform_perspective_direction(&full_rastertocamera, make_float3(0, 1, 0));
  228. }
  229. else if (type == CAMERA_PERSPECTIVE) {
  230. dx = transform_perspective(&rastertocamera, make_float3(1, 0, 0)) -
  231. transform_perspective(&rastertocamera, make_float3(0, 0, 0));
  232. dy = transform_perspective(&rastertocamera, make_float3(0, 1, 0)) -
  233. transform_perspective(&rastertocamera, make_float3(0, 0, 0));
  234. full_dx = transform_perspective(&full_rastertocamera, make_float3(1, 0, 0)) -
  235. transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
  236. full_dy = transform_perspective(&full_rastertocamera, make_float3(0, 1, 0)) -
  237. transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
  238. }
  239. else {
  240. dx = make_float3(0.0f, 0.0f, 0.0f);
  241. dy = make_float3(0.0f, 0.0f, 0.0f);
  242. }
  243. dx = transform_direction(&cameratoworld, dx);
  244. dy = transform_direction(&cameratoworld, dy);
  245. full_dx = transform_direction(&cameratoworld, full_dx);
  246. full_dy = transform_direction(&cameratoworld, full_dy);
  247. if (type == CAMERA_PERSPECTIVE) {
  248. float3 v = transform_perspective(&full_rastertocamera,
  249. make_float3(full_width, full_height, 1.0f));
  250. frustum_right_normal = normalize(make_float3(v.z, 0.0f, -v.x));
  251. frustum_top_normal = normalize(make_float3(0.0f, v.z, -v.y));
  252. }
  253. /* Compute kernel camera data. */
  254. KernelCamera *kcam = &kernel_camera;
  255. /* store matrices */
  256. kcam->screentoworld = screentoworld;
  257. kcam->rastertoworld = rastertoworld;
  258. kcam->rastertocamera = rastertocamera;
  259. kcam->cameratoworld = cameratoworld;
  260. kcam->worldtocamera = worldtocamera;
  261. kcam->worldtoscreen = worldtoscreen;
  262. kcam->worldtoraster = worldtoraster;
  263. kcam->worldtondc = worldtondc;
  264. kcam->ndctoworld = ndctoworld;
  265. /* camera motion */
  266. kcam->num_motion_steps = 0;
  267. kcam->have_perspective_motion = 0;
  268. kernel_camera_motion.clear();
  269. /* Test if any of the transforms are actually different. */
  270. bool have_motion = false;
  271. for (size_t i = 0; i < motion.size(); i++) {
  272. have_motion = have_motion || motion[i] != matrix;
  273. }
  274. if (need_motion == Scene::MOTION_PASS) {
  275. /* TODO(sergey): Support perspective (zoom, fov) motion. */
  276. if (type == CAMERA_PANORAMA) {
  277. if (have_motion) {
  278. kcam->motion_pass_pre = transform_inverse(motion[0]);
  279. kcam->motion_pass_post = transform_inverse(motion[motion.size() - 1]);
  280. }
  281. else {
  282. kcam->motion_pass_pre = kcam->worldtocamera;
  283. kcam->motion_pass_post = kcam->worldtocamera;
  284. }
  285. }
  286. else {
  287. if (have_motion) {
  288. kcam->perspective_pre = cameratoraster * transform_inverse(motion[0]);
  289. kcam->perspective_post = cameratoraster * transform_inverse(motion[motion.size() - 1]);
  290. }
  291. else {
  292. kcam->perspective_pre = worldtoraster;
  293. kcam->perspective_post = worldtoraster;
  294. }
  295. }
  296. }
  297. else if (need_motion == Scene::MOTION_BLUR) {
  298. if (have_motion) {
  299. kernel_camera_motion.resize(motion.size());
  300. transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size());
  301. kcam->num_motion_steps = motion.size();
  302. }
  303. /* TODO(sergey): Support other types of camera. */
  304. if (use_perspective_motion && type == CAMERA_PERSPECTIVE) {
  305. /* TODO(sergey): Move to an utility function and de-duplicate with
  306. * calculation above.
  307. */
  308. ProjectionTransform screentocamera_pre = projection_inverse(
  309. projection_perspective(fov_pre, nearclip, farclip));
  310. ProjectionTransform screentocamera_post = projection_inverse(
  311. projection_perspective(fov_post, nearclip, farclip));
  312. kcam->perspective_pre = screentocamera_pre * rastertoscreen;
  313. kcam->perspective_post = screentocamera_post * rastertoscreen;
  314. kcam->have_perspective_motion = 1;
  315. }
  316. }
  317. /* depth of field */
  318. kcam->aperturesize = aperturesize;
  319. kcam->focaldistance = focaldistance;
  320. kcam->blades = (blades < 3) ? 0.0f : blades;
  321. kcam->bladesrotation = bladesrotation;
  322. /* motion blur */
  323. kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime : -1.0f;
  324. /* type */
  325. kcam->type = type;
  326. /* anamorphic lens bokeh */
  327. kcam->inv_aperture_ratio = 1.0f / aperture_ratio;
  328. /* panorama */
  329. kcam->panorama_type = panorama_type;
  330. kcam->fisheye_fov = fisheye_fov;
  331. kcam->fisheye_lens = fisheye_lens;
  332. kcam->equirectangular_range = make_float4(longitude_min - longitude_max,
  333. -longitude_min,
  334. latitude_min - latitude_max,
  335. -latitude_min + M_PI_2_F);
  336. switch (stereo_eye) {
  337. case STEREO_LEFT:
  338. kcam->interocular_offset = -interocular_distance * 0.5f;
  339. break;
  340. case STEREO_RIGHT:
  341. kcam->interocular_offset = interocular_distance * 0.5f;
  342. break;
  343. case STEREO_NONE:
  344. default:
  345. kcam->interocular_offset = 0.0f;
  346. break;
  347. }
  348. kcam->convergence_distance = convergence_distance;
  349. if (use_pole_merge) {
  350. kcam->pole_merge_angle_from = pole_merge_angle_from;
  351. kcam->pole_merge_angle_to = pole_merge_angle_to;
  352. }
  353. else {
  354. kcam->pole_merge_angle_from = -1.0f;
  355. kcam->pole_merge_angle_to = -1.0f;
  356. }
  357. /* sensor size */
  358. kcam->sensorwidth = sensorwidth;
  359. kcam->sensorheight = sensorheight;
  360. /* render size */
  361. kcam->width = width;
  362. kcam->height = height;
  363. kcam->resolution = resolution;
  364. /* store differentials */
  365. kcam->dx = float3_to_float4(dx);
  366. kcam->dy = float3_to_float4(dy);
  367. /* clipping */
  368. kcam->nearclip = nearclip;
  369. kcam->cliplength = (farclip == FLT_MAX) ? FLT_MAX : farclip - nearclip;
  370. /* Camera in volume. */
  371. kcam->is_inside_volume = 0;
  372. /* Rolling shutter effect */
  373. kcam->rolling_shutter_type = rolling_shutter_type;
  374. kcam->rolling_shutter_duration = rolling_shutter_duration;
  375. /* Set further update flags */
  376. need_update = false;
  377. need_device_update = true;
  378. need_flags_update = true;
  379. previous_need_motion = need_motion;
  380. }
  381. void Camera::device_update(Device * /* device */, DeviceScene *dscene, Scene *scene)
  382. {
  383. update(scene);
  384. if (!need_device_update)
  385. return;
  386. scene->lookup_tables->remove_table(&shutter_table_offset);
  387. if (kernel_camera.shuttertime != -1.0f) {
  388. vector<float> shutter_table;
  389. util_cdf_inverted(SHUTTER_TABLE_SIZE,
  390. 0.0f,
  391. 1.0f,
  392. function_bind(shutter_curve_eval, _1, shutter_curve),
  393. false,
  394. shutter_table);
  395. shutter_table_offset = scene->lookup_tables->add_table(dscene, shutter_table);
  396. kernel_camera.shutter_table_offset = (int)shutter_table_offset;
  397. }
  398. dscene->data.cam = kernel_camera;
  399. size_t num_motion_steps = kernel_camera_motion.size();
  400. if (num_motion_steps) {
  401. DecomposedTransform *camera_motion = dscene->camera_motion.alloc(num_motion_steps);
  402. memcpy(camera_motion, kernel_camera_motion.data(), sizeof(*camera_motion) * num_motion_steps);
  403. dscene->camera_motion.copy_to_device();
  404. }
  405. else {
  406. dscene->camera_motion.free();
  407. }
  408. }
  409. void Camera::device_update_volume(Device * /*device*/, DeviceScene *dscene, Scene *scene)
  410. {
  411. if (!need_device_update && !need_flags_update) {
  412. return;
  413. }
  414. KernelCamera *kcam = &dscene->data.cam;
  415. BoundBox viewplane_boundbox = viewplane_bounds_get();
  416. for (size_t i = 0; i < scene->objects.size(); ++i) {
  417. Object *object = scene->objects[i];
  418. if (object->mesh->has_volume && viewplane_boundbox.intersects(object->bounds)) {
  419. /* TODO(sergey): Consider adding more grained check. */
  420. VLOG(1) << "Detected camera inside volume.";
  421. kcam->is_inside_volume = 1;
  422. break;
  423. }
  424. }
  425. if (!kcam->is_inside_volume) {
  426. VLOG(1) << "Camera is outside of the volume.";
  427. }
  428. need_device_update = false;
  429. need_flags_update = false;
  430. }
  431. void Camera::device_free(Device * /*device*/, DeviceScene *dscene, Scene *scene)
  432. {
  433. scene->lookup_tables->remove_table(&shutter_table_offset);
  434. dscene->camera_motion.free();
  435. }
  436. bool Camera::modified(const Camera &cam)
  437. {
  438. return !Node::equals(cam);
  439. }
  440. bool Camera::motion_modified(const Camera &cam)
  441. {
  442. return !((motion == cam.motion) && (use_perspective_motion == cam.use_perspective_motion));
  443. }
  444. void Camera::tag_update()
  445. {
  446. need_update = true;
  447. }
  448. float3 Camera::transform_raster_to_world(float raster_x, float raster_y)
  449. {
  450. float3 D, P;
  451. if (type == CAMERA_PERSPECTIVE) {
  452. D = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
  453. float3 Pclip = normalize(D);
  454. P = make_float3(0.0f, 0.0f, 0.0f);
  455. /* TODO(sergey): Aperture support? */
  456. P = transform_point(&cameratoworld, P);
  457. D = normalize(transform_direction(&cameratoworld, D));
  458. /* TODO(sergey): Clipping is conditional in kernel, and hence it could
  459. * be mistakes in here, currently leading to wrong camera-in-volume
  460. * detection.
  461. */
  462. P += nearclip * D / Pclip.z;
  463. }
  464. else if (type == CAMERA_ORTHOGRAPHIC) {
  465. D = make_float3(0.0f, 0.0f, 1.0f);
  466. /* TODO(sergey): Aperture support? */
  467. P = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
  468. P = transform_point(&cameratoworld, P);
  469. D = normalize(transform_direction(&cameratoworld, D));
  470. }
  471. else {
  472. assert(!"unsupported camera type");
  473. }
  474. return P;
  475. }
  476. BoundBox Camera::viewplane_bounds_get()
  477. {
  478. /* TODO(sergey): This is all rather stupid, but is there a way to perform
  479. * checks we need in a more clear and smart fasion?
  480. */
  481. BoundBox bounds = BoundBox::empty;
  482. if (type == CAMERA_PANORAMA) {
  483. if (use_spherical_stereo == false) {
  484. bounds.grow(make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w));
  485. }
  486. else {
  487. float half_eye_distance = interocular_distance * 0.5f;
  488. bounds.grow(make_float3(
  489. cameratoworld.x.w + half_eye_distance, cameratoworld.y.w, cameratoworld.z.w));
  490. bounds.grow(make_float3(
  491. cameratoworld.z.w, cameratoworld.y.w + half_eye_distance, cameratoworld.z.w));
  492. bounds.grow(make_float3(
  493. cameratoworld.x.w - half_eye_distance, cameratoworld.y.w, cameratoworld.z.w));
  494. bounds.grow(make_float3(
  495. cameratoworld.x.w, cameratoworld.y.w - half_eye_distance, cameratoworld.z.w));
  496. }
  497. }
  498. else {
  499. bounds.grow(transform_raster_to_world(0.0f, 0.0f));
  500. bounds.grow(transform_raster_to_world(0.0f, (float)height));
  501. bounds.grow(transform_raster_to_world((float)width, (float)height));
  502. bounds.grow(transform_raster_to_world((float)width, 0.0f));
  503. if (type == CAMERA_PERSPECTIVE) {
  504. /* Center point has the most distance in local Z axis,
  505. * use it to construct bounding box/
  506. */
  507. bounds.grow(transform_raster_to_world(0.5f * width, 0.5f * height));
  508. }
  509. }
  510. return bounds;
  511. }
  512. float Camera::world_to_raster_size(float3 P)
  513. {
  514. float res = 1.0f;
  515. if (type == CAMERA_ORTHOGRAPHIC) {
  516. res = min(len(full_dx), len(full_dy));
  517. if (offscreen_dicing_scale > 1.0f) {
  518. float3 p = transform_point(&worldtocamera, P);
  519. float3 v = transform_perspective(&full_rastertocamera,
  520. make_float3(full_width, full_height, 0.0f));
  521. /* Create point clamped to frustum */
  522. float3 c;
  523. c.x = max(-v.x, min(v.x, p.x));
  524. c.y = max(-v.y, min(v.y, p.y));
  525. c.z = max(0.0f, p.z);
  526. float f_dist = len(p - c) / sqrtf((v.x * v.x + v.y * v.y) * 0.5f);
  527. if (f_dist > 0.0f) {
  528. res += res * f_dist * (offscreen_dicing_scale - 1.0f);
  529. }
  530. }
  531. }
  532. else if (type == CAMERA_PERSPECTIVE) {
  533. /* Calculate as if point is directly ahead of the camera. */
  534. float3 raster = make_float3(0.5f * full_width, 0.5f * full_height, 0.0f);
  535. float3 Pcamera = transform_perspective(&full_rastertocamera, raster);
  536. /* dDdx */
  537. float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
  538. float3 dx = len_squared(full_dx) < len_squared(full_dy) ? full_dx : full_dy;
  539. float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff);
  540. /* dPdx */
  541. float dist = len(transform_point(&worldtocamera, P));
  542. float3 D = normalize(Ddiff);
  543. res = len(dist * dDdx - dot(dist * dDdx, D) * D);
  544. /* Decent approx distance to frustum
  545. * (doesn't handle corners correctly, but not that big of a deal) */
  546. float f_dist = 0.0f;
  547. if (offscreen_dicing_scale > 1.0f) {
  548. float3 p = transform_point(&worldtocamera, P);
  549. /* Distance from the four planes */
  550. float r = dot(p, frustum_right_normal);
  551. float t = dot(p, frustum_top_normal);
  552. p = make_float3(-p.x, -p.y, p.z);
  553. float l = dot(p, frustum_right_normal);
  554. float b = dot(p, frustum_top_normal);
  555. p = make_float3(-p.x, -p.y, p.z);
  556. if (r <= 0.0f && l <= 0.0f && t <= 0.0f && b <= 0.0f) {
  557. /* Point is inside frustum */
  558. f_dist = 0.0f;
  559. }
  560. else if (r > 0.0f && l > 0.0f && t > 0.0f && b > 0.0f) {
  561. /* Point is behind frustum */
  562. f_dist = len(p);
  563. }
  564. else {
  565. /* Point may be behind or off to the side, need to check */
  566. float3 along_right = make_float3(-frustum_right_normal.z, 0.0f, frustum_right_normal.x);
  567. float3 along_left = make_float3(frustum_right_normal.z, 0.0f, frustum_right_normal.x);
  568. float3 along_top = make_float3(0.0f, -frustum_top_normal.z, frustum_top_normal.y);
  569. float3 along_bottom = make_float3(0.0f, frustum_top_normal.z, frustum_top_normal.y);
  570. float dist[] = {r, l, t, b};
  571. float3 along[] = {along_right, along_left, along_top, along_bottom};
  572. bool test_o = false;
  573. float *d = dist;
  574. float3 *a = along;
  575. for (int i = 0; i < 4; i++, d++, a++) {
  576. /* Test if we should check this side at all */
  577. if (*d > 0.0f) {
  578. if (dot(p, *a) >= 0.0f) {
  579. /* We are in front of the back edge of this side of the frustum */
  580. f_dist = max(f_dist, *d);
  581. }
  582. else {
  583. /* Possibly far enough behind the frustum to use distance to origin instead of edge
  584. */
  585. test_o = true;
  586. }
  587. }
  588. }
  589. if (test_o) {
  590. f_dist = (f_dist > 0) ? min(f_dist, len(p)) : len(p);
  591. }
  592. }
  593. if (f_dist > 0.0f) {
  594. res += len(dDdx - dot(dDdx, D) * D) * f_dist * (offscreen_dicing_scale - 1.0f);
  595. }
  596. }
  597. }
  598. else if (type == CAMERA_PANORAMA) {
  599. float3 D = transform_point(&worldtocamera, P);
  600. float dist = len(D);
  601. Ray ray = {{0}};
  602. /* Distortion can become so great that the results become meaningless, there
  603. * may be a better way to do this, but calculating differentials from the
  604. * point directly ahead seems to produce good enough results. */
  605. #if 0
  606. float2 dir = direction_to_panorama(&kernel_camera, kernel_camera_motion.data(), normalize(D));
  607. float3 raster = transform_perspective(&full_cameratoraster, make_float3(dir.x, dir.y, 0.0f));
  608. ray.t = 1.0f;
  609. camera_sample_panorama(
  610. &kernel_camera, kernel_camera_motion.data(), raster.x, raster.y, 0.0f, 0.0f, &ray);
  611. if (ray.t == 0.0f) {
  612. /* No differentials, just use from directly ahead. */
  613. camera_sample_panorama(&kernel_camera,
  614. kernel_camera_motion.data(),
  615. 0.5f * full_width,
  616. 0.5f * full_height,
  617. 0.0f,
  618. 0.0f,
  619. &ray);
  620. }
  621. #else
  622. camera_sample_panorama(&kernel_camera,
  623. kernel_camera_motion.data(),
  624. 0.5f * full_width,
  625. 0.5f * full_height,
  626. 0.0f,
  627. 0.0f,
  628. &ray);
  629. #endif
  630. differential_transfer(&ray.dP, ray.dP, ray.D, ray.dD, ray.D, dist);
  631. return max(len(ray.dP.dx), len(ray.dP.dy));
  632. }
  633. return res;
  634. }
  635. bool Camera::use_motion() const
  636. {
  637. return motion.size() > 1;
  638. }
  639. float Camera::motion_time(int step) const
  640. {
  641. return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
  642. }
  643. int Camera::motion_step(float time) const
  644. {
  645. if (use_motion()) {
  646. for (int step = 0; step < motion.size(); step++) {
  647. if (time == motion_time(step)) {
  648. return step;
  649. }
  650. }
  651. }
  652. return -1;
  653. }
  654. CCL_NAMESPACE_END