clip.py 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. # ##### BEGIN GPL LICENSE BLOCK #####
  2. #
  3. # This program is free software; you can redistribute it and/or
  4. # modify it under the terms of the GNU General Public License
  5. # as published by the Free Software Foundation; either version 2
  6. # of the License, or (at your option) any later version.
  7. #
  8. # This program is distributed in the hope that it will be useful,
  9. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. # GNU General Public License for more details.
  12. #
  13. # You should have received a copy of the GNU General Public License
  14. # along with this program; if not, write to the Free Software Foundation,
  15. # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  16. #
  17. # ##### END GPL LICENSE BLOCK #####
  18. # <pep8 compliant>
  19. import bpy
  20. import os
  21. from bpy.types import Operator
  22. from bpy.props import FloatProperty
  23. from mathutils import (
  24. Vector,
  25. Matrix,
  26. )
  27. def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
  28. screens = bpy.data.screens if all_screens else [context.screen]
  29. for screen in screens:
  30. for area in screen.areas:
  31. if area.type == tarea:
  32. for space in area.spaces:
  33. if space.type == tspace:
  34. callback(space, *args)
  35. def CLIP_set_viewport_background(context, clip, clip_user):
  36. def set_background(cam, clip, user):
  37. bgpic = None
  38. for x in cam.background_images:
  39. if x.source == 'MOVIE_CLIP':
  40. bgpic = x
  41. break
  42. if not bgpic:
  43. bgpic = cam.background_images.new()
  44. bgpic.source = 'MOVIE_CLIP'
  45. bgpic.clip = clip
  46. bgpic.clip_user.proxy_render_size = user.proxy_render_size
  47. bgpic.clip_user.use_render_undistorted = True
  48. bgpic.use_camera_clip = False
  49. cam.show_background_images = True
  50. scene_camera = context.scene.camera
  51. if (not scene_camera) or (scene_camera.type != 'CAMERA'):
  52. return
  53. set_background(scene_camera.data, clip, clip_user)
  54. def CLIP_camera_for_clip(context, clip):
  55. scene = context.scene
  56. camera = scene.camera
  57. for ob in scene.objects:
  58. if ob.type == 'CAMERA':
  59. for con in ob.constraints:
  60. if con.type == 'CAMERA_SOLVER':
  61. cur_clip = scene.active_clip if con.use_active_clip else con.clip
  62. if cur_clip == clip:
  63. return ob
  64. return camera
  65. def CLIP_track_view_selected(sc, track):
  66. if track.select_anchor:
  67. return True
  68. if sc.show_marker_pattern and track.select_pattern:
  69. return True
  70. if sc.show_marker_search and track.select_search:
  71. return True
  72. return False
  73. def CLIP_default_settings_from_track(clip, track, framenr):
  74. settings = clip.tracking.settings
  75. width = clip.size[0]
  76. height = clip.size[1]
  77. marker = track.markers.find_frame(framenr, exact=False)
  78. pattern_bb = marker.pattern_bound_box
  79. pattern = Vector(pattern_bb[1]) - Vector(pattern_bb[0])
  80. search = marker.search_max - marker.search_min
  81. pattern[0] = pattern[0] * width
  82. pattern[1] = pattern[1] * height
  83. search[0] = search[0] * width
  84. search[1] = search[1] * height
  85. settings.default_correlation_min = track.correlation_min
  86. settings.default_pattern_size = max(pattern[0], pattern[1])
  87. settings.default_search_size = max(search[0], search[1])
  88. settings.default_frames_limit = track.frames_limit
  89. settings.default_pattern_match = track.pattern_match
  90. settings.default_margin = track.margin
  91. settings.default_motion_model = track.motion_model
  92. settings.use_default_brute = track.use_brute
  93. settings.use_default_normalization = track.use_normalization
  94. settings.use_default_mask = track.use_mask
  95. settings.use_default_red_channel = track.use_red_channel
  96. settings.use_default_green_channel = track.use_green_channel
  97. settings.use_default_blue_channel = track.use_blue_channel
  98. settings.default_weight = track.weight
  99. class CLIP_OT_filter_tracks(bpy.types.Operator):
  100. """Filter tracks which has weirdly looking spikes in motion curves"""
  101. bl_label = "Filter Tracks"
  102. bl_idname = "clip.filter_tracks"
  103. bl_options = {'UNDO', 'REGISTER'}
  104. track_threshold: FloatProperty(
  105. name="Track Threshold",
  106. description="Filter Threshold to select problematic tracks",
  107. default=5.0,
  108. )
  109. @staticmethod
  110. def _filter_values(context, threshold):
  111. def get_marker_coordinates_in_pixels(clip_size, track, frame_number):
  112. marker = track.markers.find_frame(frame_number)
  113. return Vector((marker.co[0] * clip_size[0], marker.co[1] * clip_size[1]))
  114. def marker_velocity(clip_size, track, frame):
  115. marker_a = get_marker_coordinates_in_pixels(clip_size, track, frame)
  116. marker_b = get_marker_coordinates_in_pixels(clip_size, track, frame - 1)
  117. return marker_a - marker_b
  118. scene = context.scene
  119. frame_start = scene.frame_start
  120. frame_end = scene.frame_end
  121. clip = context.space_data.clip
  122. clip_size = clip.size[:]
  123. bpy.ops.clip.clean_tracks(frames=10, action='DELETE_TRACK')
  124. tracks_to_clean = set()
  125. for frame in range(frame_start, frame_end + 1):
  126. # Find tracks with markers in both this frame and the previous one.
  127. relevant_tracks = [
  128. track for track in clip.tracking.tracks
  129. if (track.markers.find_frame(frame) and
  130. track.markers.find_frame(frame - 1))
  131. ]
  132. if not relevant_tracks:
  133. continue
  134. # Get average velocity and deselect track.
  135. average_velocity = Vector((0.0, 0.0))
  136. for track in relevant_tracks:
  137. track.select = False
  138. average_velocity += marker_velocity(clip_size, track, frame)
  139. if len(relevant_tracks) >= 1:
  140. average_velocity = average_velocity / len(relevant_tracks)
  141. # Then find all markers that behave differently than the average.
  142. for track in relevant_tracks:
  143. track_velocity = marker_velocity(clip_size, track, frame)
  144. distance = (average_velocity - track_velocity).length
  145. if distance > threshold:
  146. tracks_to_clean.add(track)
  147. for track in tracks_to_clean:
  148. track.select = True
  149. return len(tracks_to_clean)
  150. @classmethod
  151. def poll(cls, context):
  152. space = context.space_data
  153. return (space.type == 'CLIP_EDITOR') and space.clip
  154. def execute(self, context):
  155. num_tracks = self._filter_values(context, self.track_threshold)
  156. self.report({'INFO'}, "Identified %d problematic tracks" % num_tracks)
  157. return {'FINISHED'}
  158. class CLIP_OT_set_active_clip(bpy.types.Operator):
  159. bl_label = "Set Active Clip"
  160. bl_idname = "clip.set_active_clip"
  161. @classmethod
  162. def poll(cls, context):
  163. space = context.space_data
  164. return space.type == 'CLIP_EDITOR' and space.clip
  165. def execute(self, context):
  166. clip = context.space_data.clip
  167. scene = context.scene
  168. scene.active_clip = clip
  169. scene.render.resolution_x = clip.size[0]
  170. scene.render.resolution_y = clip.size[1]
  171. return {'FINISHED'}
  172. class CLIP_OT_track_to_empty(Operator):
  173. """Create an Empty object which will be copying movement of active track"""
  174. bl_idname = "clip.track_to_empty"
  175. bl_label = "Link Empty to Track"
  176. bl_options = {'UNDO', 'REGISTER'}
  177. @staticmethod
  178. def _link_track(context, clip, tracking_object, track):
  179. sc = context.space_data
  180. constraint = None
  181. ob = None
  182. ob = bpy.data.objects.new(name=track.name, object_data=None)
  183. context.collection.objects.link(ob)
  184. ob.select_set(True)
  185. context.view_layer.objects.active = ob
  186. for con in ob.constraints:
  187. if con.type == 'FOLLOW_TRACK':
  188. constraint = con
  189. break
  190. if constraint is None:
  191. constraint = ob.constraints.new(type='FOLLOW_TRACK')
  192. constraint.use_active_clip = False
  193. constraint.clip = sc.clip
  194. constraint.track = track.name
  195. constraint.use_3d_position = False
  196. constraint.object = tracking_object.name
  197. constraint.camera = CLIP_camera_for_clip(context, clip)
  198. @classmethod
  199. def poll(cls, context):
  200. space = context.space_data
  201. return space.type == 'CLIP_EDITOR' and space.clip
  202. def execute(self, context):
  203. sc = context.space_data
  204. clip = sc.clip
  205. tracking_object = clip.tracking.objects.active
  206. for track in tracking_object.tracks:
  207. if CLIP_track_view_selected(sc, track):
  208. self._link_track(context, clip, tracking_object, track)
  209. return {'FINISHED'}
  210. class CLIP_OT_bundles_to_mesh(Operator):
  211. """Create vertex cloud using coordinates of reconstructed tracks"""
  212. bl_idname = "clip.bundles_to_mesh"
  213. bl_label = "3D Markers to Mesh"
  214. bl_options = {'UNDO', 'REGISTER'}
  215. @classmethod
  216. def poll(cls, context):
  217. sc = context.space_data
  218. return (sc.type == 'CLIP_EDITOR') and sc.clip
  219. def execute(self, context):
  220. from bpy_extras.io_utils import unpack_list
  221. sc = context.space_data
  222. clip = sc.clip
  223. tracking_object = clip.tracking.objects.active
  224. new_verts = []
  225. scene = context.scene
  226. camera = scene.camera
  227. matrix = Matrix.Identity(4)
  228. if camera:
  229. reconstruction = tracking_object.reconstruction
  230. framenr = scene.frame_current - clip.frame_start + 1
  231. reconstructed_matrix = reconstruction.cameras.matrix_from_frame(frame=framenr)
  232. matrix = camera.matrix_world @ reconstructed_matrix.inverted()
  233. for track in tracking_object.tracks:
  234. if track.has_bundle and track.select:
  235. new_verts.append(track.bundle)
  236. if new_verts:
  237. mesh = bpy.data.meshes.new(name="Tracks")
  238. mesh.vertices.add(len(new_verts))
  239. mesh.vertices.foreach_set("co", unpack_list(new_verts))
  240. ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
  241. ob.matrix_world = matrix
  242. context.collection.objects.link(ob)
  243. ob.select_set(True)
  244. context.view_layer.objects.active = ob
  245. else:
  246. self.report({'WARNING'}, "No usable tracks selected")
  247. return {'FINISHED'}
  248. class CLIP_OT_delete_proxy(Operator):
  249. """Delete movie clip proxy files from the hard drive"""
  250. bl_idname = "clip.delete_proxy"
  251. bl_label = "Delete Proxy"
  252. bl_options = {'REGISTER'}
  253. @classmethod
  254. def poll(cls, context):
  255. if context.space_data.type != 'CLIP_EDITOR':
  256. return False
  257. sc = context.space_data
  258. return sc.clip
  259. def invoke(self, context, event):
  260. wm = context.window_manager
  261. return wm.invoke_confirm(self, event)
  262. @staticmethod
  263. def _rmproxy(abspath):
  264. import shutil
  265. if not os.path.exists(abspath):
  266. return
  267. if os.path.isdir(abspath):
  268. shutil.rmtree(abspath)
  269. else:
  270. os.remove(abspath)
  271. def execute(self, context):
  272. sc = context.space_data
  273. clip = sc.clip
  274. if clip.use_proxy_custom_directory:
  275. proxydir = clip.proxy.directory
  276. else:
  277. clipdir = os.path.dirname(clip.filepath)
  278. proxydir = os.path.join(clipdir, "BL_proxy")
  279. clipfile = os.path.basename(clip.filepath)
  280. proxy = os.path.join(proxydir, clipfile)
  281. absproxy = bpy.path.abspath(proxy)
  282. # proxy_<quality>[_undistorted]
  283. for x in (25, 50, 75, 100):
  284. d = os.path.join(absproxy, "proxy_%d" % x)
  285. self._rmproxy(d)
  286. self._rmproxy(d + "_undistorted")
  287. self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
  288. tc = ("free_run.blen_tc",
  289. "interp_free_run.blen_tc",
  290. "record_run.blen_tc")
  291. for x in tc:
  292. self._rmproxy(os.path.join(absproxy, x))
  293. # Remove proxy per-clip directory.
  294. try:
  295. os.rmdir(absproxy)
  296. except OSError:
  297. pass
  298. # Remove [custom] proxy directory if empty.
  299. try:
  300. absdir = bpy.path.abspath(proxydir)
  301. os.rmdir(absdir)
  302. except OSError:
  303. pass
  304. return {'FINISHED'}
  305. class CLIP_OT_set_viewport_background(Operator):
  306. """Set current movie clip as a camera background in 3D view-port """ \
  307. """(works only when a 3D view-port is visible)"""
  308. bl_idname = "clip.set_viewport_background"
  309. bl_label = "Set as Background"
  310. bl_options = {'REGISTER'}
  311. @classmethod
  312. def poll(cls, context):
  313. if context.space_data.type != 'CLIP_EDITOR':
  314. return False
  315. sc = context.space_data
  316. return sc.clip
  317. def execute(self, context):
  318. sc = context.space_data
  319. CLIP_set_viewport_background(context, sc.clip, sc.clip_user)
  320. return {'FINISHED'}
  321. class CLIP_OT_constraint_to_fcurve(Operator):
  322. """Create F-Curves for object which will copy """ \
  323. """object's movement caused by this constraint"""
  324. bl_idname = "clip.constraint_to_fcurve"
  325. bl_label = "Constraint to F-Curve"
  326. bl_options = {'UNDO', 'REGISTER'}
  327. def _bake_object(self, scene, ob):
  328. con = None
  329. clip = None
  330. sfra = None
  331. efra = None
  332. frame_current = scene.frame_current
  333. matrices = []
  334. # Find constraint which would be converting
  335. # TODO: several camera solvers and track followers would fail,
  336. # but can't think about real work-flow where it'll be useful
  337. for x in ob.constraints:
  338. if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
  339. con = x
  340. if not con:
  341. self.report({'ERROR'},
  342. "Motion Tracking constraint to be converted not found")
  343. return {'CANCELLED'}
  344. # Get clip used for parenting.
  345. if con.use_active_clip:
  346. clip = scene.active_clip
  347. else:
  348. clip = con.clip
  349. if not clip:
  350. self.report({'ERROR'},
  351. "Movie clip to use tracking data from isn't set")
  352. return {'CANCELLED'}
  353. if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
  354. mat = ob.matrix_world.copy()
  355. ob.constraints.remove(con)
  356. ob.matrix_world = mat
  357. return {'FINISHED'}
  358. # Find start and end frames.
  359. for track in clip.tracking.tracks:
  360. if sfra is None:
  361. sfra = track.markers[0].frame
  362. else:
  363. sfra = min(sfra, track.markers[0].frame)
  364. if efra is None:
  365. efra = track.markers[-1].frame
  366. else:
  367. efra = max(efra, track.markers[-1].frame)
  368. if sfra is None or efra is None:
  369. return
  370. # Store object matrices.
  371. for x in range(sfra, efra + 1):
  372. scene.frame_set(x)
  373. matrices.append(ob.matrix_world.copy())
  374. ob.animation_data_create()
  375. # Apply matrices on object and insert key-frames.
  376. i = 0
  377. for x in range(sfra, efra + 1):
  378. scene.frame_set(x)
  379. ob.matrix_world = matrices[i]
  380. ob.keyframe_insert("location")
  381. if ob.rotation_mode == 'QUATERNION':
  382. ob.keyframe_insert("rotation_quaternion")
  383. else:
  384. ob.keyframe_insert("rotation_euler")
  385. i += 1
  386. ob.constraints.remove(con)
  387. scene.frame_set(frame_current)
  388. def execute(self, context):
  389. scene = context.scene
  390. # XXX, should probably use context.selected_editable_objects
  391. # since selected objects can be from a lib or in hidden layer!
  392. for ob in scene.objects:
  393. if ob.select_get():
  394. self._bake_object(scene, ob)
  395. return {'FINISHED'}
  396. class CLIP_OT_setup_tracking_scene(Operator):
  397. """Prepare scene for compositing 3D objects into this footage"""
  398. # TODO: it will be great to integrate with other engines (other than Cycles)
  399. bl_idname = "clip.setup_tracking_scene"
  400. bl_label = "Setup Tracking Scene"
  401. bl_options = {'UNDO', 'REGISTER'}
  402. @classmethod
  403. def poll(cls, context):
  404. sc = context.space_data
  405. if sc.type != 'CLIP_EDITOR':
  406. return False
  407. clip = sc.clip
  408. return clip and clip.tracking.reconstruction.is_valid
  409. @staticmethod
  410. def _setupScene(context):
  411. scene = context.scene
  412. scene.active_clip = context.space_data.clip
  413. scene.render.use_motion_blur = True
  414. @staticmethod
  415. def _setupWorld(context):
  416. scene = context.scene
  417. world = scene.world
  418. if not world:
  419. world = bpy.data.worlds.new(name="World")
  420. scene.world = world
  421. # Having AO enabled is nice for shadow catcher.
  422. world.light_settings.use_ambient_occlusion = True
  423. world.light_settings.distance = 1.0
  424. if hasattr(scene, "cycles"):
  425. world.light_settings.ao_factor = 0.05
  426. @staticmethod
  427. def _findOrCreateCamera(context):
  428. scene = context.scene
  429. if scene.camera:
  430. return scene.camera
  431. cam = bpy.data.cameras.new(name="Camera")
  432. camob = bpy.data.objects.new(name="Camera", object_data=cam)
  433. scene.collection.objects.link(camob)
  434. scene.camera = camob
  435. camob.matrix_local = (
  436. Matrix.Translation((7.481, -6.508, 5.344)) @
  437. Matrix.Rotation(0.815, 4, 'Z') @
  438. Matrix.Rotation(0.011, 4, 'Y') @
  439. Matrix.Rotation(1.109, 4, 'X')
  440. )
  441. return camob
  442. @staticmethod
  443. def _setupCamera(context):
  444. sc = context.space_data
  445. clip = sc.clip
  446. tracking = clip.tracking
  447. camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
  448. cam = camob.data
  449. # Remove all constraints to be sure motion is fine.
  450. camob.constraints.clear()
  451. # Append camera solver constraint.
  452. con = camob.constraints.new(type='CAMERA_SOLVER')
  453. con.use_active_clip = True
  454. con.influence = 1.0
  455. cam.sensor_width = tracking.camera.sensor_width
  456. cam.lens = tracking.camera.focal_length
  457. @staticmethod
  458. def _setupViewport(context):
  459. sc = context.space_data
  460. CLIP_set_viewport_background(context, sc.clip, sc.clip_user)
  461. @staticmethod
  462. def _setupViewLayers(context):
  463. scene = context.scene
  464. view_layers = scene.view_layers
  465. if not view_layers.get("Foreground"):
  466. if len(view_layers) == 1:
  467. fg = view_layers[0]
  468. fg.name = 'Foreground'
  469. else:
  470. fg = view_layers.new("Foreground")
  471. fg.use_sky = True
  472. if not view_layers.get("Background"):
  473. _bg = view_layers.new("Background")
  474. @staticmethod
  475. def createCollection(context, collection_name):
  476. def collection_in_collection(collection, collection_to_query):
  477. """Return true if collection is in any of the children or """
  478. """grandchildren of collection_to_query"""
  479. for child in collection_to_query.children:
  480. if collection == child:
  481. return True
  482. if collection_in_collection(collection, child):
  483. return True
  484. master_collection = context.scene.collection
  485. collection = bpy.data.collections.get(collection_name)
  486. if collection and collection.library:
  487. # We need a local collection instead.
  488. collection = None
  489. if not collection:
  490. collection = bpy.data.collections.new(name=collection_name)
  491. master_collection.children.link(collection)
  492. else:
  493. # see if collection is in the scene
  494. if not collection_in_collection(collection, master_collection):
  495. master_collection.children.link(collection)
  496. def _setupCollections(self, context):
  497. def setup_collection_recursively(collections, collection_name, attr_name):
  498. for collection in collections:
  499. if collection.collection.name == collection_name:
  500. setattr(collection, attr_name, True)
  501. break
  502. else:
  503. setup_collection_recursively(collection.children, collection_name, attr_name)
  504. collections = context.scene.collection.children
  505. vlayers = context.scene.view_layers
  506. if len(collections) == 1:
  507. collections[0].name = "foreground"
  508. self.createCollection(context, "foreground")
  509. self.createCollection(context, "background")
  510. # rendersettings
  511. setup_collection_recursively(
  512. vlayers["Foreground"].layer_collection.children,
  513. "background",
  514. "holdout",
  515. )
  516. setup_collection_recursively(
  517. vlayers["Background"].layer_collection.children,
  518. "foreground",
  519. "indirect_only",
  520. )
  521. @staticmethod
  522. def _wipeDefaultNodes(tree):
  523. if len(tree.nodes) != 2:
  524. return False
  525. types = [node.type for node in tree.nodes]
  526. types.sort()
  527. if types[0] == 'COMPOSITE' and types[1] == 'R_LAYERS':
  528. while tree.nodes:
  529. tree.nodes.remove(tree.nodes[0])
  530. @staticmethod
  531. def _findNode(tree, type):
  532. for node in tree.nodes:
  533. if node.type == type:
  534. return node
  535. return None
  536. @staticmethod
  537. def _findOrCreateNode(tree, type):
  538. node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
  539. if not node:
  540. node = tree.nodes.new(type=type)
  541. return node
  542. @staticmethod
  543. def _needSetupNodes(context):
  544. scene = context.scene
  545. tree = scene.node_tree
  546. if not tree:
  547. # No compositor node tree found, time to create it!
  548. return True
  549. for node in tree.nodes:
  550. if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
  551. return False
  552. return True
  553. @staticmethod
  554. def _offsetNodes(tree):
  555. for a in tree.nodes:
  556. for b in tree.nodes:
  557. if a != b and a.location == b.location:
  558. b.location += Vector((40.0, 20.0))
  559. def _setupNodes(self, context):
  560. if not self._needSetupNodes(context):
  561. # Compositor nodes were already setup or even changes already
  562. # do nothing to prevent nodes damage.
  563. return
  564. # Enable backdrop for all compositor spaces.
  565. def setup_space(space):
  566. space.show_backdrop = True
  567. CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
  568. setup_space)
  569. sc = context.space_data
  570. scene = context.scene
  571. scene.use_nodes = True
  572. tree = scene.node_tree
  573. clip = sc.clip
  574. need_stabilization = False
  575. # Remove all the nodes if they came from default node setup.
  576. # This is simplest way to make it so final node setup is correct.
  577. self._wipeDefaultNodes(tree)
  578. # Create nodes.
  579. rlayer_fg = self._findOrCreateNode(tree, 'CompositorNodeRLayers')
  580. rlayer_bg = tree.nodes.new(type='CompositorNodeRLayers')
  581. composite = self._findOrCreateNode(tree, 'CompositorNodeComposite')
  582. movieclip = tree.nodes.new(type='CompositorNodeMovieClip')
  583. distortion = tree.nodes.new(type='CompositorNodeMovieDistortion')
  584. if need_stabilization:
  585. stabilize = tree.nodes.new(type='CompositorNodeStabilize2D')
  586. scale = tree.nodes.new(type='CompositorNodeScale')
  587. shadowcatcher = tree.nodes.new(type='CompositorNodeAlphaOver')
  588. alphaover = tree.nodes.new(type='CompositorNodeAlphaOver')
  589. viewer = tree.nodes.new(type='CompositorNodeViewer')
  590. # Setup nodes.
  591. movieclip.clip = clip
  592. distortion.clip = clip
  593. distortion.distortion_type = 'UNDISTORT'
  594. if need_stabilization:
  595. stabilize.clip = clip
  596. scale.space = 'RENDER_SIZE'
  597. rlayer_bg.scene = scene
  598. rlayer_bg.layer = "Background"
  599. rlayer_fg.scene = scene
  600. rlayer_fg.layer = "Foreground"
  601. # Create links.
  602. tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
  603. if need_stabilization:
  604. tree.links.new(distortion.outputs["Image"],
  605. stabilize.inputs["Image"])
  606. tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
  607. else:
  608. tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
  609. tree.links.new(scale.outputs["Image"], shadowcatcher.inputs[1])
  610. tree.links.new(rlayer_bg.outputs["Image"], shadowcatcher.inputs[2])
  611. tree.links.new(rlayer_fg.outputs["Image"], alphaover.inputs[2])
  612. tree.links.new(shadowcatcher.outputs["Image"], alphaover.inputs[1])
  613. tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
  614. tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
  615. # Place nodes.
  616. movieclip.location = Vector((-300.0, 350.0))
  617. distortion.location = movieclip.location
  618. distortion.location += Vector((200.0, 0.0))
  619. if need_stabilization:
  620. stabilize.location = distortion.location
  621. stabilize.location += Vector((200.0, 0.0))
  622. scale.location = stabilize.location
  623. scale.location += Vector((200.0, 0.0))
  624. else:
  625. scale.location = distortion.location
  626. scale.location += Vector((200.0, 0.0))
  627. rlayer_bg.location = movieclip.location
  628. rlayer_bg.location -= Vector((0.0, 350.0))
  629. rlayer_fg.location = rlayer_bg.location
  630. rlayer_fg.location -= Vector((0.0, 500.0))
  631. shadowcatcher.location = scale.location
  632. shadowcatcher.location += Vector((250.0, 0.0))
  633. alphaover.location = shadowcatcher.location
  634. alphaover.location += Vector((250.0, -250.0))
  635. composite.location = alphaover.location
  636. composite.location += Vector((300.0, -100.0))
  637. viewer.location = composite.location
  638. composite.location += Vector((0.0, 200.0))
  639. # Ensure no nodes were created on the position of existing node.
  640. self._offsetNodes(tree)
  641. @staticmethod
  642. def _createMesh(collection, name, vertices, faces):
  643. from bpy_extras.io_utils import unpack_list
  644. mesh = bpy.data.meshes.new(name=name)
  645. mesh.vertices.add(len(vertices))
  646. mesh.vertices.foreach_set("co", unpack_list(vertices))
  647. nbr_loops = len(faces)
  648. nbr_polys = nbr_loops // 4
  649. mesh.loops.add(nbr_loops)
  650. mesh.polygons.add(nbr_polys)
  651. mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
  652. mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
  653. mesh.loops.foreach_set("vertex_index", faces)
  654. mesh.update()
  655. ob = bpy.data.objects.new(name=name, object_data=mesh)
  656. collection.objects.link(ob)
  657. return ob
  658. @staticmethod
  659. def _getPlaneVertices(half_size, z):
  660. return [(-half_size, -half_size, z),
  661. (half_size, -half_size, z),
  662. (half_size, half_size, z),
  663. (-half_size, half_size, z)]
  664. def _createGround(self, collection):
  665. vertices = self._getPlaneVertices(4.0, 0.0)
  666. faces = [0, 1, 2, 3]
  667. ob = self._createMesh(collection, "Ground", vertices, faces)
  668. ob["is_ground"] = True
  669. return ob
  670. @staticmethod
  671. def _findGround(context):
  672. scene = context.scene
  673. for ob in scene.objects:
  674. if ob.type == 'MESH' and "is_ground" in ob:
  675. return ob
  676. return None
  677. @staticmethod
  678. def _createLight():
  679. light = bpy.data.lights.new(name="Light", type='POINT')
  680. lightob = bpy.data.objects.new(name="Light", object_data=light)
  681. lightob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
  682. return lightob
  683. def _createSampleObject(self, collection):
  684. vertices = self._getPlaneVertices(1.0, -1.0) + \
  685. self._getPlaneVertices(1.0, 1.0)
  686. faces = (0, 1, 2, 3,
  687. 4, 7, 6, 5,
  688. 0, 4, 5, 1,
  689. 1, 5, 6, 2,
  690. 2, 6, 7, 3,
  691. 3, 7, 4, 0)
  692. return self._createMesh(collection, "Cube", vertices, faces)
  693. def _setupObjects(self, context):
  694. def setup_shadow_catcher_objects(collection):
  695. """Make all the newly created and the old objects of a collection """ \
  696. """to be properly setup for shadow catch"""
  697. for ob in collection.objects:
  698. ob.cycles.is_shadow_catcher = True
  699. for child in collection.children:
  700. setup_shadow_catcher_collection(child)
  701. scene = context.scene
  702. fg_coll = bpy.data.collections["foreground", None]
  703. bg_coll = bpy.data.collections["background", None]
  704. # Ensure all lights are active on foreground and background.
  705. has_light = False
  706. has_mesh = False
  707. for ob in scene.objects:
  708. if ob.type == 'LIGHT':
  709. has_light = True
  710. elif ob.type == 'MESH' and "is_ground" not in ob:
  711. has_mesh = True
  712. # Create sample light if there is no lights in the scene.
  713. if not has_light:
  714. light = self._createLight()
  715. fg_coll.objects.link(light)
  716. bg_coll.objects.link(light)
  717. # Create sample object if there's no meshes in the scene.
  718. if not has_mesh:
  719. ob = self._createSampleObject(fg_coll)
  720. # Create ground object if needed.
  721. ground = self._findGround(context)
  722. if not ground:
  723. ground = self._createGround(bg_coll)
  724. # And set everything on background layer to shadow catcher.
  725. if hasattr(scene, "cycles"):
  726. setup_shadow_catcher_objects(bg_coll)
  727. def execute(self, context):
  728. self._setupScene(context)
  729. self._setupWorld(context)
  730. self._setupCamera(context)
  731. self._setupViewport(context)
  732. self._setupViewLayers(context)
  733. self._setupCollections(context)
  734. self._setupNodes(context)
  735. self._setupObjects(context)
  736. return {'FINISHED'}
  737. class CLIP_OT_track_settings_as_default(Operator):
  738. """Copy tracking settings from active track to default settings"""
  739. bl_idname = "clip.track_settings_as_default"
  740. bl_label = "Track Settings As Default"
  741. bl_options = {'UNDO', 'REGISTER'}
  742. @classmethod
  743. def poll(cls, context):
  744. sc = context.space_data
  745. if sc.type != 'CLIP_EDITOR':
  746. return False
  747. clip = sc.clip
  748. return clip and clip.tracking.tracks.active
  749. def execute(self, context):
  750. sc = context.space_data
  751. clip = sc.clip
  752. track = clip.tracking.tracks.active
  753. framenr = context.scene.frame_current - clip.frame_start + 1
  754. CLIP_default_settings_from_track(clip, track, framenr)
  755. return {'FINISHED'}
  756. class CLIP_OT_track_settings_to_track(bpy.types.Operator):
  757. """Copy tracking settings from active track to selected tracks"""
  758. bl_label = "Copy Track Settings"
  759. bl_idname = "clip.track_settings_to_track"
  760. bl_options = {'UNDO', 'REGISTER'}
  761. _attrs_track = (
  762. "correlation_min",
  763. "frames_limit",
  764. "pattern_match",
  765. "margin",
  766. "motion_model",
  767. "use_brute",
  768. "use_normalization",
  769. "use_mask",
  770. "use_red_channel",
  771. "use_green_channel",
  772. "use_blue_channel",
  773. "weight"
  774. )
  775. _attrs_marker = (
  776. "pattern_corners",
  777. "search_min",
  778. "search_max",
  779. )
  780. @classmethod
  781. def poll(cls, context):
  782. space = context.space_data
  783. if space.type != 'CLIP_EDITOR':
  784. return False
  785. clip = space.clip
  786. return clip and clip.tracking.tracks.active
  787. def execute(self, context):
  788. space = context.space_data
  789. clip = space.clip
  790. track = clip.tracking.tracks.active
  791. framenr = context.scene.frame_current - clip.frame_start + 1
  792. marker = track.markers.find_frame(framenr, exact=False)
  793. for t in clip.tracking.tracks:
  794. if t.select and t != track:
  795. marker_selected = t.markers.find_frame(framenr, exact=False)
  796. for attr in self._attrs_track:
  797. setattr(t, attr, getattr(track, attr))
  798. for attr in self._attrs_marker:
  799. setattr(marker_selected, attr, getattr(marker, attr))
  800. return {'FINISHED'}
  801. classes = (
  802. CLIP_OT_bundles_to_mesh,
  803. CLIP_OT_constraint_to_fcurve,
  804. CLIP_OT_delete_proxy,
  805. CLIP_OT_filter_tracks,
  806. CLIP_OT_set_active_clip,
  807. CLIP_OT_set_viewport_background,
  808. CLIP_OT_setup_tracking_scene,
  809. CLIP_OT_track_settings_as_default,
  810. CLIP_OT_track_settings_to_track,
  811. CLIP_OT_track_to_empty,
  812. )