wzhouxiff commited on
Commit
2890711
0 Parent(s):
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +39 -0
  2. .gitignore +5 -0
  3. README.md +13 -0
  4. app.py +861 -0
  5. configs/inference/config_motionctrl_cmcm.yaml +169 -0
  6. configs/inference/motionctrl_run.sh +36 -0
  7. examples/camera_poses/test.txt +42 -0
  8. examples/camera_poses/test_camera_018f7907401f2fef.json +1 -0
  9. examples/camera_poses/test_camera_088b93f15ca8745d.json +1 -0
  10. examples/camera_poses/test_camera_1424acd0007d40b5.json +1 -0
  11. examples/camera_poses/test_camera_D.json +226 -0
  12. examples/camera_poses/test_camera_I.json +226 -0
  13. examples/camera_poses/test_camera_I_0.2x.json +226 -0
  14. examples/camera_poses/test_camera_I_0.4x.json +226 -0
  15. examples/camera_poses/test_camera_I_1.0x.json +226 -0
  16. examples/camera_poses/test_camera_I_2.0x.json +226 -0
  17. examples/camera_poses/test_camera_L.json +226 -0
  18. examples/camera_poses/test_camera_O.json +226 -0
  19. examples/camera_poses/test_camera_O_0.2x.json +226 -0
  20. examples/camera_poses/test_camera_O_0.4x.json +226 -0
  21. examples/camera_poses/test_camera_O_1.0x.json +226 -0
  22. examples/camera_poses/test_camera_O_2.0x.json +226 -0
  23. examples/camera_poses/test_camera_R.json +226 -0
  24. examples/camera_poses/test_camera_Round-RI-120.json +226 -0
  25. examples/camera_poses/test_camera_Round-RI.json +226 -0
  26. examples/camera_poses/test_camera_Round-RI_90.json +226 -0
  27. examples/camera_poses/test_camera_Round-ZoomIn.json +226 -0
  28. examples/camera_poses/test_camera_SPIN-ACW-60.json +226 -0
  29. examples/camera_poses/test_camera_SPIN-CW-60.json +226 -0
  30. examples/camera_poses/test_camera_U.json +226 -0
  31. examples/camera_poses/test_camera_b133a504fc90a2d1.json +1 -0
  32. examples/camera_poses/test_camera_d9642c8efc01481d.json +1 -0
  33. examples/camera_poses/test_camera_d971457c81bca597.json +1 -0
  34. gradio_utils/__pycache__/camera_utils.cpython-310.pyc +0 -0
  35. gradio_utils/__pycache__/flow_utils.cpython-310.pyc +0 -0
  36. gradio_utils/__pycache__/motionctrl_cmcm_gradio.cpython-310.pyc +0 -0
  37. gradio_utils/__pycache__/traj_utils.cpython-310.pyc +0 -0
  38. gradio_utils/__pycache__/utils.cpython-310.pyc +0 -0
  39. gradio_utils/camera_utils.py +157 -0
  40. gradio_utils/flow_utils.py +69 -0
  41. gradio_utils/motionctrl_cmcm_gradio.py +276 -0
  42. gradio_utils/traj_utils.py +104 -0
  43. gradio_utils/utils.py +175 -0
  44. main.py +943 -0
  45. main/inference/motionctrl_cmcm.py +416 -0
  46. pytest.ini +3 -0
  47. requirements.txt +32 -0
  48. scripts/__init__.py +0 -0
  49. scripts/demo/__init__.py +0 -0
  50. scripts/demo/detect.py +156 -0
.gitattributes ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.gif filter=lfs diff=lfs merge=lfs -text
37
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
38
+ *.jpg filter=lfs diff=lfs merge=lfs -text
39
+ *.png filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .vscode/
2
+ .DS_Store
3
+ *.gif
4
+ *.png
5
+ *.jpg
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: MotionCtrl SVD
3
+ emoji: 📉
4
+ colorFrom: yellow
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.37.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import tempfile
4
+
5
+ import gradio as gr
6
+ import numpy as np
7
+ import torch
8
+ from glob import glob
9
+ from torchvision.transforms import CenterCrop, Compose, Resize
10
+
11
+ from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera, create_relative
12
+
13
+ from gradio_utils.utils import vis_camera
14
+ from gradio_utils.motionctrl_cmcm_gradio import build_model, motionctrl_sample
15
+
16
+ os.environ['KMP_DUPLICATE_LIB_OK']='True'
17
+ SPACE_ID = os.environ.get('SPACE_ID', '')
18
+
19
+
20
+ #### Description ####
21
+ title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>"""
22
+ subtitle = r"""<h2 align="center">Deployed on SVD Generation</h2>"""
23
+
24
+ description = r"""
25
+ <b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br>
26
+ 🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br>
27
+ 🤗 Try to control the motion of the generated videos yourself!<br>
28
+ ❗❗❗ Please note **ONLY** Camera Motion Control in the current version of **MotionCtrl** deployed on **SVD** is avaliable.<br>
29
+ """
30
+ # <div>
31
+ # <img src="https://raw.githubusercontent.com/TencentARC/MotionCtrl/main/assets/svd/00_ibzz5-dxv2h.gif", width="300">
32
+ # <img src="https://raw.githubusercontent.com/TencentARC/MotionCtrl/main/assets/svd/01_5guvn-0x6v2.gif", width="300">
33
+ # <img src="https://raw.githubusercontent.com/TencentARC/MotionCtrl/main/assets/svd/12_sn7bz-0hcaf.gif", width="300">
34
+ # <img src="https://raw.githubusercontent.com/TencentARC/MotionCtrl/main/assets/svd/13_3lyco-4ru8j.gif", width="300">
35
+ # </div>
36
+ article = r"""
37
+ If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks!
38
+ [![GitHub Stars](https://img.shields.io/github/stars/TencentARC%2FMotionCtrl
39
+ )](https://github.com/TencentARC/MotionCtrl)
40
+
41
+ ---
42
+
43
+ 📝 **Citation**
44
+ <br>
45
+ If our work is useful for your research, please consider citing:
46
+ ```bibtex
47
+ @inproceedings{wang2023motionctrl,
48
+ title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation},
49
+ author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin},
50
+ booktitle={arXiv preprint arXiv:2312.03641},
51
+ year={2023}
52
+ }
53
+ ```
54
+
55
+ 📧 **Contact**
56
+ <br>
57
+ If you have any questions, please feel free to reach me out at <b>wzhoux@connect.hku.hk</b>.
58
+
59
+ """
60
+ css = """
61
+ .gradio-container {width: 85% !important}
62
+ .gr-monochrome-group {border-radius: 5px !important; border: revert-layer !important; border-width: 2px !important; color: black !important;}
63
+ span.svelte-s1r2yt {font-size: 17px !important; font-weight: bold !important; color: #d30f2f !important;}
64
+ button {border-radius: 8px !important;}
65
+ .add_button {background-color: #4CAF50 !important;}
66
+ .remove_button {background-color: #f44336 !important;}
67
+ .clear_button {background-color: gray !important;}
68
+ .mask_button_group {gap: 10px !important;}
69
+ .video {height: 300px !important;}
70
+ .image {height: 300px !important;}
71
+ .video .wrap.svelte-lcpz3o {display: flex !important; align-items: center !important; justify-content: center !important;}
72
+ .video .wrap.svelte-lcpz3o > :first-child {height: 100% !important;}
73
+ .margin_center {width: 50% !important; margin: auto !important;}
74
+ .jc_center {justify-content: center !important;}
75
+ """
76
+
77
+
78
+ T_base = [
79
+ [1.,0.,0.], ## W2C x 的正方向: 相机朝左 left
80
+ [-1.,0.,0.], ## W2C x 的负方向: 相机朝右 right
81
+ [0., 1., 0.], ## W2C y 的正方向: 相机朝上 up
82
+ [0.,-1.,0.], ## W2C y 的负方向: 相机朝下 down
83
+ [0.,0.,1.], ## W2C z 的正方向: 相机往前 zoom out
84
+ [0.,0.,-1.], ## W2C z 的负方向: 相机往前 zoom in
85
+ ]
86
+ radius = 1
87
+ n = 16
88
+ # step =
89
+ look_at = np.array([0, 0, 0.8]).reshape(3,1)
90
+ # look_at = np.array([0, 0, 0.2]).reshape(3,1)
91
+
92
+ T_list = []
93
+ base_R = np.array([[1., 0., 0.],
94
+ [0., 1., 0.],
95
+ [0., 0., 1.]])
96
+ res = []
97
+ res_forsave = []
98
+ T_range = 1.8
99
+
100
+
101
+
102
+ for i in range(0, 16):
103
+ # theta = (1)*np.pi*i/n
104
+
105
+ R = base_R[:,:3]
106
+ T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2
107
+ RT = np.concatenate([R,T], axis=1)
108
+ res.append(RT)
109
+
110
+ fig = vis_camera(res)
111
+
112
+ # MODE = ["camera motion control", "object motion control", "camera + object motion control"]
113
+ MODE = ["control camera poses", "control object trajectory", "control both camera and object motion"]
114
+ RESIZE_MODE = ['Center Crop To 576x1024', 'Keep original spatial ratio']
115
+ DIY_MODE = ['Customized Mode 1: First A then B',
116
+ 'Customized Mode 2: Both A and B',
117
+ 'Customized Mode 3: RAW Camera Poses']
118
+
119
+ ## load default model
120
+ num_frames = 14
121
+ num_steps = 25
122
+ device = "cuda" if torch.cuda.is_available() else "cpu"
123
+ print(f"Using device {device}")
124
+
125
+ config = "configs/inference/config_motionctrl_cmcm.yaml"
126
+ ckpt='checkpoints/motionctrl_svd.ckpt'
127
+ if not os.path.exists(ckpt):
128
+ os.system(f'wget https://huggingface.co/TencentARC/MotionCtrl/resolve/main/motionctrl_svd.ckpt?download=true -P .')
129
+ os.system(f'mkdir checkpoints')
130
+ os.system(f'mv motionctrl_svd.ckpt?download=true {ckpt}')
131
+ model = build_model(config, ckpt, device, num_frames, num_steps)
132
+ width, height = 1024, 576
133
+
134
+ traj_list = []
135
+ camera_dict = {
136
+ "motion":[],
137
+ "mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
138
+ "speed": 1.0,
139
+ "complex": None
140
+ }
141
+
142
+ def fn_vis_camera(camera_args):
143
+ global camera_dict, num_frames, width, height
144
+ RT = process_camera(camera_dict, camera_args, num_frames=num_frames, width=width, height=height) # [t, 3, 4]
145
+
146
+ rescale_T = 1.0
147
+ rescale_T = max(rescale_T, np.max(np.abs(RT[:,:,-1])) / 1.9)
148
+
149
+ fig = vis_camera(create_relative(RT), rescale_T=rescale_T)
150
+
151
+ vis_step3_prompt_generate = True
152
+ vis_generation_dec = True
153
+ vis_prompt = True
154
+ vis_num_samples = True
155
+ vis_seed = True
156
+ vis_start = True
157
+ vis_gen_video = True
158
+ vis_repeat_highlight = True
159
+
160
+ return fig, \
161
+ gr.update(visible=vis_step3_prompt_generate), \
162
+ gr.update(visible=vis_generation_dec), \
163
+ gr.update(visible=vis_prompt), \
164
+ gr.update(visible=vis_num_samples), \
165
+ gr.update(visible=vis_seed), \
166
+ gr.update(visible=vis_start), \
167
+ gr.update(visible=vis_gen_video, value=None), \
168
+ gr.update(visible=vis_repeat_highlight)
169
+
170
+ def display_camera_info(camera_dict, camera_mode=None):
171
+ if camera_dict['complex'] is not None:
172
+ res = f"complex : {camera_dict['complex']}. "
173
+ res += f"speed : {camera_dict['speed']}. "
174
+ else:
175
+ res = ""
176
+ res += f"motion : {[_ for _ in camera_dict['motion']]}. "
177
+ res += f"speed : {camera_dict['speed']}. "
178
+ if camera_mode == CAMERA_MOTION_MODE[2]:
179
+ res += f"mode : {camera_dict['mode']}. "
180
+ return res
181
+
182
+ def add_camera_motion(camera_motion, camera_mode):
183
+ global camera_dict
184
+ if camera_dict['complex'] is not None:
185
+ camera_dict['complex'] = None
186
+ if camera_mode == CAMERA_MOTION_MODE[2] and len(camera_dict['motion']) <2:
187
+ camera_dict['motion'].append(camera_motion)
188
+ else:
189
+ camera_dict['motion']=[camera_motion]
190
+
191
+ return display_camera_info(camera_dict, camera_mode)
192
+
193
+ def add_complex_camera_motion(camera_motion):
194
+ global camera_dict
195
+ camera_dict['complex']=camera_motion
196
+ return display_camera_info(camera_dict)
197
+
198
+ def input_raw_camera_pose(combine_type, camera_mode):
199
+ global camera_dict
200
+ camera_dict['mode'] = combine_type
201
+
202
+ vis_U = False
203
+ vis_D = False
204
+ vis_L = False
205
+ vis_R = False
206
+ vis_I = False
207
+ vis_O = False
208
+ vis_ACW = False
209
+ vis_CW = False
210
+ vis_speed = True
211
+ vis_combine3_des = True
212
+
213
+ return gr.update(value='1 0 0 0 0 1 0 0 0 0 1 0\n1 0 0 0 0 1 0 0 0 0 1 -0.225\n1 0 0 0 0 1 0 0 0 0 1 -0.45\n1 0 0 0 0 1 0 0 0 0 1 -0.675\n1 0 0 0 0 1 0 0 0 0 1 -0.9\n1 0 0 0 0 1 0 0 0 0 1 -1.125\n1 0 0 0 0 1 0 0 0 0 1 -1.35\n1 0 0 0 0 1 0 0 0 0 1 -1.575\n1 0 0 0 0 1 0 0 0 0 1 -1.8\n1 0 0 0 0 1 0 0 0 0 1 -2.025\n1 0 0 0 0 1 0 0 0 0 1 -2.25\n1 0 0 0 0 1 0 0 0 0 1 -2.475\n1 0 0 0 0 1 0 0 0 0 1 -2.7\n1 0 0 0 0 1 0 0 0 0 1 -2.925\n', max_lines=16, interactive=True), \
214
+ gr.update(visible=vis_U), \
215
+ gr.update(visible=vis_D), \
216
+ gr.update(visible=vis_L),\
217
+ gr.update(visible=vis_R), \
218
+ gr.update(visible=vis_I), \
219
+ gr.update(visible=vis_O), \
220
+ gr.update(visible=vis_ACW), \
221
+ gr.update(visible=vis_CW), \
222
+ gr.update(visible=vis_speed), \
223
+ gr.update(visible=vis_combine3_des)
224
+
225
+ def change_camera_mode(combine_type, camera_mode):
226
+ global camera_dict
227
+ camera_dict['mode'] = combine_type
228
+
229
+ vis_U = True
230
+ vis_D = True
231
+ vis_L = True
232
+ vis_R = True
233
+ vis_I = True
234
+ vis_O = True
235
+ vis_ACW = True
236
+ vis_CW = True
237
+ vis_speed = True
238
+ vis_combine3_des = False
239
+
240
+ return display_camera_info(camera_dict, camera_mode), \
241
+ gr.update(visible=vis_U), \
242
+ gr.update(visible=vis_D), \
243
+ gr.update(visible=vis_L),\
244
+ gr.update(visible=vis_R), \
245
+ gr.update(visible=vis_I), \
246
+ gr.update(visible=vis_O), \
247
+ gr.update(visible=vis_ACW), \
248
+ gr.update(visible=vis_CW), \
249
+ gr.update(visible=vis_speed), \
250
+ gr.update(visible=vis_combine3_des)
251
+
252
+ def change_camera_speed(camera_speed):
253
+ global camera_dict
254
+ camera_dict['speed'] = camera_speed
255
+ return display_camera_info(camera_dict)
256
+
257
+ def reset_camera():
258
+ global camera_dict
259
+ camera_dict = {
260
+ "motion":[],
261
+ "mode": "Customized Mode 1: First A then B",
262
+ "speed": 1.0,
263
+ "complex": None
264
+ }
265
+ return display_camera_info(camera_dict)
266
+
267
+
268
+ def visualized_camera_poses(step2_camera_motion):
269
+ reset_camera()
270
+
271
+ # generate video
272
+ vis_step3_prompt_generate = False
273
+ vis_generation_dec = False
274
+ vis_prompt = False
275
+ vis_num_samples = False
276
+ vis_seed = False
277
+ vis_start = False
278
+ vis_gen_video = False
279
+ vis_repeat_highlight = False
280
+
281
+ if step2_camera_motion == CAMERA_MOTION_MODE[0]:
282
+ vis_basic_camera_motion = True
283
+ vis_basic_camera_motion_des = True
284
+ vis_custom_camera_motion = False
285
+ vis_custom_run_status = False
286
+ vis_complex_camera_motion = False
287
+ vis_complex_camera_motion_des = False
288
+ vis_U = True
289
+ vis_D = True
290
+ vis_L = True
291
+ vis_R = True
292
+ vis_I = True
293
+ vis_O = True
294
+ vis_ACW = True
295
+ vis_CW = True
296
+ vis_combine1 = False
297
+ vis_combine2 = False
298
+ vis_combine3 = False
299
+ vis_combine3_des = False
300
+ vis_speed = True
301
+
302
+ vis_Pose_1, vis_Pose_2, vis_Pose_3, vis_Pose_4 = False, False, False, False
303
+ vis_Pose_5, vis_Pose_6, vis_Pose_7, vis_Pose_8 = False, False, False, False
304
+
305
+ elif step2_camera_motion == CAMERA_MOTION_MODE[1]:
306
+ vis_basic_camera_motion = False
307
+ vis_basic_camera_motion_des = False
308
+ vis_custom_camera_motion = False
309
+ vis_custom_run_status = False
310
+ vis_complex_camera_motion = True
311
+ vis_complex_camera_motion_des = True
312
+ vis_U = False
313
+ vis_D = False
314
+ vis_L = False
315
+ vis_R = False
316
+ vis_I = False
317
+ vis_O = False
318
+ vis_ACW = False
319
+ vis_CW = False
320
+ vis_combine1 = False
321
+ vis_combine2 = False
322
+ vis_combine3 = False
323
+ vis_combine3_des = False
324
+ vis_speed = True
325
+
326
+ vis_Pose_1, vis_Pose_2, vis_Pose_3, vis_Pose_4 = True, True, True, True
327
+ vis_Pose_5, vis_Pose_6, vis_Pose_7, vis_Pose_8 = True, True, True, True
328
+
329
+ else: # step2_camera_motion = CAMERA_MOTION_MODE[2]:
330
+ vis_basic_camera_motion = False
331
+ vis_basic_camera_motion_des = False
332
+ vis_custom_camera_motion = True
333
+ vis_custom_run_status = True
334
+ vis_complex_camera_motion = False
335
+ vis_complex_camera_motion_des = False
336
+ vis_U = False
337
+ vis_D = False
338
+ vis_L = False
339
+ vis_R = False
340
+ vis_I = False
341
+ vis_O = False
342
+ vis_ACW = False
343
+ vis_CW = False
344
+ vis_combine1 = True
345
+ vis_combine2 = True
346
+ vis_combine3 = True
347
+ vis_combine3_des = False
348
+ vis_speed = False
349
+
350
+ vis_Pose_1, vis_Pose_2, vis_Pose_3, vis_Pose_4 = False, False, False, False
351
+ vis_Pose_5, vis_Pose_6, vis_Pose_7, vis_Pose_8 = False, False, False, False
352
+
353
+ vis_camera_args = True
354
+ vis_camera_reset = True
355
+ vis_camera_vis = True
356
+ vis_vis_camera = True
357
+
358
+ return gr.update(visible=vis_basic_camera_motion), \
359
+ gr.update(visible=vis_basic_camera_motion_des), \
360
+ gr.update(visible=vis_custom_camera_motion), \
361
+ gr.update(visible=vis_custom_run_status), \
362
+ gr.update(visible=vis_complex_camera_motion), \
363
+ gr.update(visible=vis_complex_camera_motion_des), \
364
+ gr.update(visible=vis_U), gr.update(visible=vis_D), gr.update(visible=vis_L), gr.update(visible=vis_R), \
365
+ gr.update(visible=vis_I), gr.update(visible=vis_O), gr.update(visible=vis_ACW), gr.update(visible=vis_CW), \
366
+ gr.update(visible=vis_combine1), gr.update(visible=vis_combine2), gr.update(visible=vis_combine3), \
367
+ gr.update(visible=vis_combine3_des), \
368
+ gr.update(visible=vis_speed), \
369
+ gr.update(visible=vis_Pose_1), gr.update(visible=vis_Pose_2), gr.update(visible=vis_Pose_3), gr.update(visible=vis_Pose_4), \
370
+ gr.update(visible=vis_Pose_5), gr.update(visible=vis_Pose_6), gr.update(visible=vis_Pose_7), gr.update(visible=vis_Pose_8), \
371
+ gr.update(visible=vis_camera_args, value=None), \
372
+ gr.update(visible=vis_camera_reset), gr.update(visible=vis_camera_vis), \
373
+ gr.update(visible=vis_vis_camera, value=None), \
374
+ gr.update(visible=vis_step3_prompt_generate), \
375
+ gr.update(visible=vis_generation_dec), \
376
+ gr.update(visible=vis_prompt), \
377
+ gr.update(visible=vis_num_samples), \
378
+ gr.update(visible=vis_seed), \
379
+ gr.update(visible=vis_start), \
380
+ gr.update(visible=vis_gen_video), \
381
+ gr.update(visible=vis_repeat_highlight)
382
+
383
+
384
+ def process_input_image(input_image, resize_mode):
385
+ global width, height
386
+ if resize_mode == RESIZE_MODE[0]:
387
+ height = 576
388
+ width = 1024
389
+ w, h = input_image.size
390
+ h_ratio = h / height
391
+ w_ratio = w / width
392
+
393
+ if h_ratio > w_ratio:
394
+ h = int(h / w_ratio)
395
+ if h < height:
396
+ h = height
397
+ input_image = Resize((h, width))(input_image)
398
+
399
+ else:
400
+ w = int(w / h_ratio)
401
+ if w < width:
402
+ w = width
403
+ input_image = Resize((height, w))(input_image)
404
+
405
+ transformer = Compose([
406
+ # Resize(width),
407
+ CenterCrop((height, width)),
408
+ ])
409
+
410
+ input_image = transformer(input_image)
411
+ else:
412
+ w, h = input_image.size
413
+ if h > w:
414
+ height = 576
415
+ width = int(w * height / h)
416
+ else:
417
+ width = 1024
418
+ height = int(h * width / w)
419
+
420
+ input_image = Resize((height, width))(input_image)
421
+ # print(f'input_image size: {input_image.size}')
422
+
423
+ vis_step2_camera_motion = True
424
+ vis_step2_camera_motion_des = True
425
+ vis_camera_mode = True
426
+ vis_camera_info = True
427
+
428
+ ####
429
+ # camera motion control
430
+ vis_basic_camera_motion = False
431
+ vis_basic_camera_motion_des = False
432
+ vis_custom_camera_motion = False
433
+ vis_custom_run_status = False
434
+ vis_complex_camera_motion = False
435
+ vis_complex_camera_motion_des = False
436
+ vis_U = False
437
+ vis_D = False
438
+ vis_L = False
439
+ vis_R = False
440
+ vis_I = False
441
+ vis_O = False
442
+ vis_ACW = False
443
+ vis_CW = False
444
+ vis_combine1 = False
445
+ vis_combine2 = False
446
+ vis_combine3 = False
447
+ vis_combine3_des = False
448
+ vis_speed = False
449
+
450
+ vis_Pose_1, vis_Pose_2, vis_Pose_3, vis_Pose_4 = False, False, False, False
451
+ vis_Pose_5, vis_Pose_6, vis_Pose_7, vis_Pose_8 = False, False, False, False
452
+
453
+ vis_camera_args = False
454
+ vis_camera_reset = False
455
+ vis_camera_vis = False
456
+ vis_vis_camera = False
457
+
458
+ # generate video
459
+ vis_step3_prompt_generate = False
460
+ vis_generation_dec = False
461
+ vis_prompt = False
462
+ vis_num_samples = False
463
+ vis_seed = False
464
+ vis_start = False
465
+ vis_gen_video = False
466
+ vis_repeat_highlight = False
467
+
468
+ return gr.update(visible=True, value=input_image, height=height, width=width), \
469
+ gr.update(visible=vis_step2_camera_motion), \
470
+ gr.update(visible=vis_step2_camera_motion_des), \
471
+ gr.update(visible=vis_camera_mode), \
472
+ gr.update(visible=vis_camera_info), \
473
+ gr.update(visible=vis_basic_camera_motion), \
474
+ gr.update(visible=vis_basic_camera_motion_des), \
475
+ gr.update(visible=vis_custom_camera_motion), \
476
+ gr.update(visible=vis_custom_run_status), \
477
+ gr.update(visible=vis_complex_camera_motion), \
478
+ gr.update(visible=vis_complex_camera_motion_des), \
479
+ gr.update(visible=vis_U), gr.update(visible=vis_D), gr.update(visible=vis_L), gr.update(visible=vis_R), \
480
+ gr.update(visible=vis_I), gr.update(visible=vis_O), gr.update(visible=vis_ACW), gr.update(visible=vis_CW), \
481
+ gr.update(visible=vis_combine1), gr.update(visible=vis_combine2), gr.update(visible=vis_combine3), \
482
+ gr.update(visible=vis_combine3_des), \
483
+ gr.update(visible=vis_speed), \
484
+ gr.update(visible=vis_Pose_1), gr.update(visible=vis_Pose_2), gr.update(visible=vis_Pose_3), gr.update(visible=vis_Pose_4), \
485
+ gr.update(visible=vis_Pose_5), gr.update(visible=vis_Pose_6), gr.update(visible=vis_Pose_7), gr.update(visible=vis_Pose_8), \
486
+ gr.update(visible=vis_camera_args, value=None), \
487
+ gr.update(visible=vis_camera_reset), gr.update(visible=vis_camera_vis), \
488
+ gr.update(visible=vis_vis_camera, value=None), \
489
+ gr.update(visible=vis_step3_prompt_generate), \
490
+ gr.update(visible=vis_generation_dec), \
491
+ gr.update(visible=vis_prompt), \
492
+ gr.update(visible=vis_num_samples), \
493
+ gr.update(visible=vis_seed), \
494
+ gr.update(visible=vis_start), \
495
+ gr.update(visible=vis_gen_video), \
496
+ gr.update(visible=vis_repeat_highlight)
497
+
498
+ def model_run(input_image, fps_id, seed, n_samples, camera_args):
499
+ global model, device, camera_dict, num_frames, num_steps, width, height
500
+ RT = process_camera(camera_dict, camera_args, num_frames=num_frames, width=width, height=height).reshape(-1,12)
501
+
502
+ video_path = motionctrl_sample(
503
+ model=model,
504
+ image=input_image,
505
+ RT=RT,
506
+ num_frames=num_frames,
507
+ fps_id=fps_id,
508
+ decoding_t=1,
509
+ seed=seed,
510
+ sample_num=n_samples,
511
+ device=device
512
+ )
513
+
514
+ return video_path
515
+
516
+ def main(args):
517
+ demo = gr.Blocks()
518
+ with demo:
519
+
520
+ gr.Markdown(title)
521
+ gr.Markdown(subtitle)
522
+ gr.Markdown(description)
523
+
524
+ with gr.Column():
525
+
526
+ # step 0: Some useful tricks
527
+ gr.Markdown("## Step 0/3: Some Useful Tricks", show_label=False)
528
+ gr.HighlightedText(value=[("",""), (f"1. If the motion control is not obvious, try to increase the `Motion Speed`. \
529
+ \n 2. If the generated videos are distored severely, try to descrease the `Motion Speed` \
530
+ or increase `FPS`.", "Normal")],
531
+ color_map={"Normal": "green", "Error": "red", "Clear clicks": "gray", "Add mask": "green", "Remove mask": "red"}, visible=True)
532
+
533
+ # step 2: input an image
534
+ step2_title = gr.Markdown("---\n## Step 1/3: Input an Image", show_label=False, visible=True)
535
+ step2_dec = gr.Markdown(f"\n 1. Upload an Image by `Drag` or Click `Upload Image`; \
536
+ \n 2. Click `{RESIZE_MODE[0]}` or `{RESIZE_MODE[1]}` to select the image resize mode. \
537
+ You will get a processed image and go into the next step. \
538
+ \n - `{RESIZE_MODE[0]}`: Our MotionCtrl is train on image with spatial size 576x1024. Choose `{RESIZE_MODE[0]}` can get better generated video. \
539
+ \n - `{RESIZE_MODE[1]}`: Choose `{RESIZE_MODE[1]}` if you want to generate video with the same spatial ratio as the input image.",
540
+ show_label=False, visible=True)
541
+
542
+ with gr.Row(equal_height=True):
543
+ with gr.Column(scale=2):
544
+ input_image = gr.Image(type="pil", interactive=True, elem_id="input_image", elem_classes='image', visible=True)
545
+ # process_input_image_button = gr.Button(value="Process Input Image", visible=False)
546
+ with gr.Row():
547
+ center_crop_botton = gr.Button(value=RESIZE_MODE[0], visible=True)
548
+ keep_spatial_raition_botton = gr.Button(value=RESIZE_MODE[1], visible=True)
549
+ with gr.Column(scale=2):
550
+ process_image = gr.Image(type="pil", interactive=False, elem_id="process_image", elem_classes='image', visible=False)
551
+ # step2_proceed_button = gr.Button(value="Proceed", visible=False)
552
+
553
+
554
+ # step3 - camera motion control
555
+ step2_camera_motion = gr.Markdown("---\n## Step 2/3: Select the camera poses", show_label=False, visible=False)
556
+ step2_camera_motion_des = gr.Markdown(f"\n - {CAMERA_MOTION_MODE[0]}: Including 8 basic camera poses, such as pan up, pan down, zoom in, and zoom out. \
557
+ \n - {CAMERA_MOTION_MODE[1]}: Complex camera poses extracted from the real videos. \
558
+ \n - {CAMERA_MOTION_MODE[2]}: You can customize complex camera poses yourself by combining or fusing two of the eight basic camera poses or input RAW RT matrix. \
559
+ \n - Click `Proceed` to go into next step",
560
+ show_label=False, visible=False)
561
+ camera_mode = gr.Radio(choices=CAMERA_MOTION_MODE, value=CAMERA_MOTION_MODE[0], label="Camera Motion Control Mode", interactive=True, visible=False)
562
+ camera_info = gr.Button(value="Proceed", visible=False)
563
+
564
+ with gr.Row():
565
+ with gr.Column():
566
+ # step3.1 - camera motion control - basic
567
+ basic_camera_motion = gr.Markdown("---\n### Basic Camera Poses", show_label=False, visible=False)
568
+ basic_camera_motion_des = gr.Markdown(f"\n 1. Click one of the basic camera poses, such as `Pan Up`; \
569
+ \n 2. Slide the `Motion speed` to get a speed value. The large the value, the fast the camera motion; \
570
+ \n 3. Click `Visualize Camera and Proceed` to visualize the camera poses and go proceed; \
571
+ \n 4. Click `Reset Camera` to reset the camera poses (If needed). ",
572
+ show_label=False, visible=False)
573
+
574
+
575
+ # step3.2 - camera motion control - provided complex
576
+ complex_camera_motion = gr.Markdown("---\n### Provided Complex Camera Poses", show_label=False, visible=False)
577
+ complex_camera_motion_des = gr.Markdown(f"\n 1. Click one of the complex camera poses, such as `Pose_1`; \
578
+ \n 2. Click `Visualize Camera and Proceed` to visualize the camera poses and go proceed; \
579
+ \n 3. Click `Reset Camera` to reset the camera poses (If needed). ",
580
+ show_label=False, visible=False)
581
+
582
+ # step3.3 - camera motion control - custom
583
+ custom_camera_motion = gr.Markdown(f"---\n### {CAMERA_MOTION_MODE[2]}", show_label=False, visible=False)
584
+ custom_run_status = gr.Markdown(f"\n 1. Click `{DIY_MODE[0]}`, `{DIY_MODE[1]}`, or `{DIY_MODE[2]}` \
585
+ \n - `Customized Mode 1: First A then B`: For example, click `Pan Up` and `Pan Left`, the camera will first `Pan Up` and then `Pan Left`; \
586
+ \n - `Customized Mode 2: Both A and B`: For example, click `Pan Up` and `Pan Left`, the camera will move towards the upper left corner; \
587
+ \n - `{DIY_MODE[2]}`: Input the RAW RT matrix yourselves. \
588
+ \n 2. Slide the `Motion speed` to get a speed value. The large the value, the fast the camera motion; \
589
+ \n 3. Click `Visualize Camera and Proceed` to visualize the camera poses and go proceed; \
590
+ \n 4. Click `Reset Camera` to reset the camera poses (If needed). ",
591
+ show_label=False, visible=False)
592
+
593
+ gr.HighlightedText(value=[("",""), ("1. Select two of the basic camera poses; 2. Select Customized Mode 1 OR Customized Mode 2. 3. Visualized Camera to show the customized camera poses", "Normal")],
594
+ color_map={"Normal": "green", "Error": "red", "Clear clicks": "gray", "Add mask": "green", "Remove mask": "red"}, visible=False)
595
+
596
+ with gr.Row():
597
+ combine1 = gr.Button(value=DIY_MODE[0], visible=False)
598
+ combine2 = gr.Button(value=DIY_MODE[1], visible=False)
599
+ combine3 = gr.Button(value=DIY_MODE[2], visible=False)
600
+ with gr.Row():
601
+ combine3_des = gr.Markdown(f"---\n#### Input your camera pose in the following textbox. \
602
+ A total of 14 lines and each line contains 12 float number, indicated \
603
+ the RT matrix in the shape of 1x12. \
604
+ The example is RT matrix of ZOOM IN.", show_label=False, visible=False)
605
+
606
+ with gr.Row():
607
+ U = gr.Button(value="Pan Up", visible=False)
608
+ D = gr.Button(value="Pan Down", visible=False)
609
+ L = gr.Button(value="Pan Left", visible=False)
610
+ R = gr.Button(value="Pan Right", visible=False)
611
+ with gr.Row():
612
+ I = gr.Button(value="Zoom In", visible=False)
613
+ O = gr.Button(value="Zoom Out", visible=False)
614
+ ACW = gr.Button(value="ACW", visible=False)
615
+ CW = gr.Button(value="CW", visible=False)
616
+
617
+ with gr.Row():
618
+ speed = gr.Slider(minimum=0, maximum=8, step=0.2, label="Motion Speed", value=1.0, visible=False)
619
+
620
+ with gr.Row():
621
+ Pose_1 = gr.Button(value="Pose_1", visible=False)
622
+ Pose_2 = gr.Button(value="Pose_2", visible=False)
623
+ Pose_3 = gr.Button(value="Pose_3", visible=False)
624
+ Pose_4 = gr.Button(value="Pose_4", visible=False)
625
+ with gr.Row():
626
+ Pose_5 = gr.Button(value="Pose_5", visible=False)
627
+ Pose_6 = gr.Button(value="Pose_6", visible=False)
628
+ Pose_7 = gr.Button(value="Pose_7", visible=False)
629
+ Pose_8 = gr.Button(value="Pose_8", visible=False)
630
+
631
+ with gr.Row():
632
+ camera_args = gr.Textbox(value="Camera Type", label="Camera Type", visible=False)
633
+ with gr.Row():
634
+ camera_vis= gr.Button(value="Visualize Camera and Proceed", visible=False)
635
+ camera_reset = gr.Button(value="Reset Camera", visible=False)
636
+ with gr.Column():
637
+ vis_camera = gr.Plot(fig, label='Camera Poses', visible=False)
638
+
639
+
640
+ # step4 - Generate videos
641
+ with gr.Row():
642
+ with gr.Column():
643
+ step3_prompt_generate = gr.Markdown("---\n## Step 3/3: Generate videos", show_label=False, visible=False)
644
+ generation_dec = gr.Markdown(f"\n 1. Set `FPS`.; \
645
+ \n 2. Set `n_samples`; \
646
+ \n 3. Set `seed`; \
647
+ \n 4. Click `Start generation !` to generate videos; ", visible=False)
648
+ # prompt = gr.Textbox(value="a dog sitting on grass", label="Prompt", interactive=True, visible=False)
649
+ prompt = gr.Slider(minimum=5, maximum=30, step=1, label="FPS", value=10, visible=False)
650
+ n_samples = gr.Number(value=2, precision=0, interactive=True, label="n_samples", visible=False)
651
+ seed = gr.Number(value=1234, precision=0, interactive=True, label="Seed", visible=False)
652
+ start = gr.Button(value="Start generation !", visible=False)
653
+ with gr.Column():
654
+ gen_video = gr.Video(value=None, label="Generate Video", visible=False)
655
+ repeat_highlight=gr.HighlightedText(value=[("",""), (f"1. If the motion control is not obvious, try to increase the `Motion Speed`. \
656
+ \n 2. If the generated videos are distored severely, try to descrease the `Motion Speed` \
657
+ or increase `FPS`.", "Normal")],
658
+ color_map={"Normal": "green", "Error": "red", "Clear clicks": "gray", "Add mask": "green", "Remove mask": "red"}, visible=False)
659
+
660
+ center_crop_botton.click(
661
+ fn=process_input_image,
662
+ inputs=[input_image, center_crop_botton],
663
+ outputs=[
664
+ process_image,
665
+ step2_camera_motion,
666
+ step2_camera_motion_des,
667
+ camera_mode,
668
+ camera_info,
669
+ basic_camera_motion,
670
+ basic_camera_motion_des,
671
+ custom_camera_motion,
672
+ custom_run_status,
673
+ complex_camera_motion,
674
+ complex_camera_motion_des,
675
+ U, D, L, R,
676
+ I, O, ACW, CW,
677
+ combine1, combine2, combine3, combine3_des,
678
+ speed,
679
+ Pose_1, Pose_2, Pose_3, Pose_4,
680
+ Pose_5, Pose_6, Pose_7, Pose_8,
681
+ camera_args,
682
+ camera_reset, camera_vis,
683
+ vis_camera,
684
+
685
+ step3_prompt_generate,
686
+ generation_dec,
687
+ prompt,
688
+ n_samples,
689
+ seed, start, gen_video, repeat_highlight])
690
+
691
+ keep_spatial_raition_botton.click(
692
+ fn=process_input_image,
693
+ inputs=[input_image, keep_spatial_raition_botton],
694
+ outputs=[
695
+ process_image,
696
+ step2_camera_motion,
697
+ step2_camera_motion_des,
698
+ camera_mode,
699
+ camera_info,
700
+ basic_camera_motion,
701
+ basic_camera_motion_des,
702
+ custom_camera_motion,
703
+ custom_run_status,
704
+ complex_camera_motion,
705
+ complex_camera_motion_des,
706
+ U, D, L, R,
707
+ I, O, ACW, CW,
708
+ combine1, combine2, combine3, combine3_des,
709
+ speed,
710
+ Pose_1, Pose_2, Pose_3, Pose_4,
711
+ Pose_5, Pose_6, Pose_7, Pose_8,
712
+ camera_args,
713
+ camera_reset, camera_vis,
714
+ vis_camera,
715
+
716
+ step3_prompt_generate,
717
+ generation_dec,
718
+ prompt,
719
+ n_samples,
720
+ seed, start, gen_video, repeat_highlight])
721
+
722
+
723
+ camera_info.click(
724
+ fn=visualized_camera_poses,
725
+ inputs=[camera_mode],
726
+ outputs=[basic_camera_motion,
727
+ basic_camera_motion_des,
728
+ custom_camera_motion,
729
+ custom_run_status,
730
+ complex_camera_motion,
731
+ complex_camera_motion_des,
732
+ U, D, L, R,
733
+ I, O, ACW, CW,
734
+ combine1, combine2, combine3, combine3_des,
735
+ speed,
736
+ Pose_1, Pose_2, Pose_3, Pose_4,
737
+ Pose_5, Pose_6, Pose_7, Pose_8,
738
+ camera_args,
739
+ camera_reset, camera_vis,
740
+ vis_camera,
741
+ step3_prompt_generate, generation_dec, prompt, n_samples, seed, start, gen_video, repeat_highlight],
742
+ )
743
+
744
+
745
+ U.click(fn=add_camera_motion, inputs=[U, camera_mode], outputs=camera_args)
746
+ D.click(fn=add_camera_motion, inputs=[D, camera_mode], outputs=camera_args)
747
+ L.click(fn=add_camera_motion, inputs=[L, camera_mode], outputs=camera_args)
748
+ R.click(fn=add_camera_motion, inputs=[R, camera_mode], outputs=camera_args)
749
+ I.click(fn=add_camera_motion, inputs=[I, camera_mode], outputs=camera_args)
750
+ O.click(fn=add_camera_motion, inputs=[O, camera_mode], outputs=camera_args)
751
+ ACW.click(fn=add_camera_motion, inputs=[ACW, camera_mode], outputs=camera_args)
752
+ CW.click(fn=add_camera_motion, inputs=[CW, camera_mode], outputs=camera_args)
753
+ speed.change(fn=change_camera_speed, inputs=speed, outputs=camera_args)
754
+ camera_reset.click(fn=reset_camera, inputs=None, outputs=[camera_args])
755
+
756
+ combine1.click(fn=change_camera_mode,
757
+ inputs=[combine1, camera_mode],
758
+ outputs=[camera_args,
759
+ U, D, L, R,
760
+ I, O, ACW, CW, speed,
761
+ combine3_des])
762
+ combine2.click(fn=change_camera_mode,
763
+ inputs=[combine2, camera_mode],
764
+ outputs=[camera_args,
765
+ U, D, L, R,
766
+ I, O, ACW, CW,
767
+ speed,
768
+ combine3_des])
769
+ combine3.click(fn=input_raw_camera_pose,
770
+ inputs=[combine3, camera_mode],
771
+ outputs=[camera_args,
772
+ U, D, L, R,
773
+ I, O, ACW, CW,
774
+ speed,
775
+ combine3_des])
776
+
777
+ camera_vis.click(fn=fn_vis_camera, inputs=[camera_args],
778
+ outputs=[vis_camera,
779
+ step3_prompt_generate,
780
+ generation_dec,
781
+ prompt,
782
+ n_samples,
783
+ seed,
784
+ start,
785
+ gen_video,
786
+ repeat_highlight])
787
+
788
+ Pose_1.click(fn=add_complex_camera_motion, inputs=Pose_1, outputs=camera_args)
789
+ Pose_2.click(fn=add_complex_camera_motion, inputs=Pose_2, outputs=camera_args)
790
+ Pose_3.click(fn=add_complex_camera_motion, inputs=Pose_3, outputs=camera_args)
791
+ Pose_4.click(fn=add_complex_camera_motion, inputs=Pose_4, outputs=camera_args)
792
+ Pose_5.click(fn=add_complex_camera_motion, inputs=Pose_5, outputs=camera_args)
793
+ Pose_6.click(fn=add_complex_camera_motion, inputs=Pose_6, outputs=camera_args)
794
+ Pose_7.click(fn=add_complex_camera_motion, inputs=Pose_7, outputs=camera_args)
795
+ Pose_8.click(fn=add_complex_camera_motion, inputs=Pose_8, outputs=camera_args)
796
+
797
+
798
+ start.click(fn=model_run,
799
+ inputs=[process_image, prompt, seed, n_samples, camera_args],
800
+ outputs=gen_video)
801
+
802
+ # set example
803
+ gr.Markdown("## Examples")
804
+ examples = glob(os.path.join(os.path.dirname(__file__), "./assets/demo/images", "*.png"))
805
+ gr.Examples(
806
+ examples=examples,
807
+ inputs=[input_image],
808
+ )
809
+
810
+ gr.Markdown(article)
811
+
812
+ # demo.launch(server_name='0.0.0.0', share=False, server_port=args['server_port'])
813
+ # demo.queue(concurrency_count=1, max_size=10)
814
+ # demo.launch()
815
+ demo.queue(max_size=10).launch(**args)
816
+
817
+
818
+ if __name__=="__main__":
819
+ parser = argparse.ArgumentParser()
820
+ # parser.add_argument("--port", type=int, default=12345)
821
+
822
+ parser.add_argument(
823
+ '--listen',
824
+ type=str,
825
+ default='0.0.0.0' if 'SPACE_ID' in os.environ else '127.0.0.1',
826
+ help='IP to listen on for connections to Gradio',
827
+ )
828
+ parser.add_argument(
829
+ '--username', type=str, default='', help='Username for authentication'
830
+ )
831
+ parser.add_argument(
832
+ '--password', type=str, default='', help='Password for authentication'
833
+ )
834
+ parser.add_argument(
835
+ '--server_port',
836
+ type=int,
837
+ default=0,
838
+ help='Port to run the server listener on',
839
+ )
840
+ parser.add_argument(
841
+ '--inbrowser', action='store_true', help='Open in browser'
842
+ )
843
+ parser.add_argument(
844
+ '--share', action='store_true', help='Share the gradio UI'
845
+ )
846
+
847
+ args = parser.parse_args()
848
+
849
+ launch_kwargs = {}
850
+ launch_kwargs['server_name'] = args.listen
851
+
852
+ if args.username and args.password:
853
+ launch_kwargs['auth'] = (args.username, args.password)
854
+ if args.server_port:
855
+ launch_kwargs['server_port'] = args.server_port
856
+ if args.inbrowser:
857
+ launch_kwargs['inbrowser'] = args.inbrowser
858
+ if args.share:
859
+ launch_kwargs['share'] = args.share
860
+
861
+ main(launch_kwargs)
configs/inference/config_motionctrl_cmcm.yaml ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 3.0e-5
3
+ target: sgm.motionctrl.camera_motion_control.CameraMotionControl
4
+ params:
5
+ ckpt_path: /group/30098/zhouxiawang/env/share/weights/svd/stable-video-diffusion-img2vid/svd.safetensors
6
+ scale_factor: 0.18215
7
+ input_key: video
8
+ no_cond_log: true
9
+ en_and_decode_n_samples_a_time: 1
10
+ use_ema: false
11
+ disable_first_stage_autocast: true
12
+
13
+ denoiser_config:
14
+ target: sgm.modules.diffusionmodules.denoiser.Denoiser
15
+ params:
16
+ scaling_config:
17
+ target: sgm.modules.diffusionmodules.denoiser_scaling.VScalingWithEDMcNoise
18
+
19
+ network_config:
20
+ target: sgm.modules.diffusionmodules.video_model.VideoUNet
21
+ params:
22
+ num_frames: 14
23
+ adm_in_channels: 768
24
+ num_classes: sequential
25
+ use_checkpoint: false
26
+ in_channels: 8
27
+ out_channels: 4
28
+ model_channels: 320
29
+ attention_resolutions: [4, 2, 1]
30
+ num_res_blocks: 2
31
+ channel_mult: [1, 2, 4, 4]
32
+ num_head_channels: 64
33
+ use_linear_in_transformer: true
34
+ transformer_depth: 1
35
+ context_dim: 1024
36
+ spatial_transformer_attn_type: softmax-xformers
37
+ extra_ff_mix_layer: true
38
+ use_spatial_context: true
39
+ merge_strategy: learned_with_images
40
+ video_kernel_size: [3, 1, 1]
41
+
42
+ conditioner_config:
43
+ target: sgm.modules.GeneralConditioner
44
+ params:
45
+ emb_models:
46
+ - is_trainable: false
47
+ input_key: cond_frames_without_noise
48
+ ucg_rate: 0.1
49
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImagePredictionEmbedder
50
+ params:
51
+ n_cond_frames: 1
52
+ n_copies: 1
53
+ open_clip_embedding_config:
54
+ target: sgm.modules.encoders.modules.FrozenOpenCLIPImageEmbedder
55
+ params:
56
+ freeze: true
57
+ # version: "/apdcephfs_cq3/share_1290939/vg_zoo/dependencies/OpenCLIP-ViT-H-14-laion2B-s32B-b79K/blobs/9a78ef8e8c73fd0df621682e7a8e8eb36c6916cb3c16b291a082ecd52ab79cc4"
58
+
59
+ - input_key: fps_id
60
+ is_trainable: false
61
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
62
+ params:
63
+ outdim: 256
64
+
65
+ - input_key: motion_bucket_id
66
+ is_trainable: false
67
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
68
+ params:
69
+ outdim: 256
70
+
71
+ - input_key: cond_frames
72
+ is_trainable: false
73
+ ucg_rate: 0.1
74
+ target: sgm.modules.encoders.modules.VideoPredictionEmbedderWithEncoder
75
+ params:
76
+ disable_encoder_autocast: true
77
+ n_cond_frames: 1
78
+ n_copies: 1
79
+ is_ae: true
80
+ encoder_config:
81
+ target: sgm.models.autoencoder.AutoencoderKLModeOnly
82
+ params:
83
+ embed_dim: 4
84
+ monitor: val/rec_loss
85
+ ddconfig:
86
+ attn_type: vanilla-xformers
87
+ double_z: true
88
+ z_channels: 4
89
+ resolution: 256
90
+ in_channels: 3
91
+ out_ch: 3
92
+ ch: 128
93
+ ch_mult: [1, 2, 4, 4]
94
+ num_res_blocks: 2
95
+ attn_resolutions: []
96
+ dropout: 0.0
97
+ lossconfig:
98
+ target: torch.nn.Identity
99
+
100
+ - input_key: cond_aug
101
+ is_trainable: false
102
+ target: sgm.modules.encoders.modules.ConcatTimestepEmbedderND
103
+ params:
104
+ outdim: 256
105
+
106
+ first_stage_config:
107
+ target: sgm.models.autoencoder.AutoencodingEngine
108
+ params:
109
+ loss_config:
110
+ target: torch.nn.Identity
111
+ regularizer_config:
112
+ target: sgm.modules.autoencoding.regularizers.DiagonalGaussianRegularizer
113
+ encoder_config:
114
+ target: sgm.modules.diffusionmodules.model.Encoder
115
+ params:
116
+ attn_type: vanilla
117
+ double_z: true
118
+ z_channels: 4
119
+ resolution: 256
120
+ in_channels: 3
121
+ out_ch: 3
122
+ ch: 128
123
+ ch_mult: [1, 2, 4, 4]
124
+ num_res_blocks: 2
125
+ attn_resolutions: []
126
+ dropout: 0.0
127
+ decoder_config:
128
+ target: sgm.modules.autoencoding.temporal_ae.VideoDecoder
129
+ params:
130
+ attn_type: vanilla
131
+ double_z: true
132
+ z_channels: 4
133
+ resolution: 256
134
+ in_channels: 3
135
+ out_ch: 3
136
+ ch: 128
137
+ ch_mult: [1, 2, 4, 4]
138
+ num_res_blocks: 2
139
+ attn_resolutions: []
140
+ dropout: 0.0
141
+ video_kernel_size: [3, 1, 1]
142
+
143
+ # loss_fn_config:
144
+ # target: sgm.modules.diffusionmodules.loss.StandardDiffusionLoss
145
+ # params:
146
+ # batch2model_keys: ['RT']
147
+ # loss_weighting_config:
148
+ # target: sgm.modules.diffusionmodules.loss_weighting.VWeighting
149
+ # sigma_sampler_config:
150
+ # target: sgm.modules.diffusionmodules.sigma_sampling.EDMSampling
151
+ # params:
152
+ # p_mean: 1.0
153
+ # p_std: 1.6
154
+
155
+ sampler_config:
156
+ target: sgm.modules.diffusionmodules.sampling.EulerEDMSampler
157
+ params:
158
+ num_steps: 25
159
+ discretization_config:
160
+ target: sgm.modules.diffusionmodules.discretizer.EDMDiscretization
161
+ params:
162
+ sigma_max: 700.0
163
+
164
+ guider_config:
165
+ target: sgm.modules.diffusionmodules.guiders.LinearPredictionGuider
166
+ params:
167
+ num_frames: 14
168
+ max_scale: 2.5
169
+ min_scale: 1.0
configs/inference/motionctrl_run.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ckpt='checkpoints/motionctrl_svd.ckpt'
4
+ config='configs/inference/config_motionctrl_cmcm.yaml'
5
+
6
+ height=576
7
+ width=1024
8
+ cond_aug=0.02
9
+
10
+ fps=10
11
+
12
+ image_input='examples/basic/eduardo-gorghetto-5auIBbcoRNw-unsplash.jpg'
13
+
14
+ res_dir="outputs/motionctrl_svd"
15
+ if [ ! -d $res_dir ]; then
16
+ mkdir -p $res_dir
17
+ fi
18
+
19
+ CUDA_VISIBLE_DEVICES=7 python main/inference/motionctrl_cmcm.py \
20
+ --seed 12345 \
21
+ --ckpt $ckpt \
22
+ --config $config \
23
+ --savedir $res_dir \
24
+ --savefps 10 \
25
+ --ddim_steps 25 \
26
+ --frames 14 \
27
+ --input $image_input \
28
+ --fps $fps \
29
+ --motion 127 \
30
+ --cond_aug $cond_aug \
31
+ --decoding_t 1 --resize \
32
+ --height $height --width $width \
33
+ --sample_num 2 \
34
+ --transform \
35
+ --pose_dir 'examples/camera_poses' \
36
+ --speed 2.0 \
examples/camera_poses/test.txt ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gr.update(value=f'1.0 -4.493872218791495e-10 5.58983348497577e-09 1.9967236752904682e-09 \
2
+ -4.493872218791495e-10 1.0 -6.144247333139674e-10 1.0815730533408896e-09 \
3
+ 5.58983348497577e-09 -6.144247333139674e-10 1.0 -7.984015226725205e-09 \n\
4
+ 0.9982863664627075 -0.0024742060340940952 0.05846544727683067 -0.024547122418880463 \
5
+ 0.002410230925306678 0.9999964237213135 0.0011647245846688747 -0.003784072818234563 \
6
+ -0.05846811458468437 -0.0010218139505013824 0.9982887506484985 -0.09103696048259735 \n\
7
+ 0.9933298230171204 -0.006303737405687571 0.11513543128967285 -0.053876250982284546 \
8
+ 0.00586089538410306 0.9999741315841675 0.004184383898973465 -0.006566310301423073 \
9
+ -0.115158811211586 -0.0034816779661923647 0.9933409690856934 -0.18525512516498566\n \
10
+ 0.9849286675453186 -0.013619760051369667 0.17242403328418732 -0.08322551101446152 \
11
+ 0.01256392989307642 0.9998950958251953 0.0072133541107177734 -0.004579910542815924 \
12
+ -0.17250417172908783 -0.004938316997140646 0.9849964380264282 -0.28701746463775635 \n\
13
+ 0.9731453657150269 -0.022617166861891747 0.2290775030851364 -0.11655563861131668 \
14
+ 0.02060025744140148 0.9997251629829407 0.011192308738827705 -0.0017426757840439677\
15
+ -0.2292676568031311 -0.006172688212245703 0.9733438491821289 -0.37736839056015015\n \
16
+ 0.9582399725914001 -0.03294993191957474 0.2840607464313507 -0.15743066370487213 \
17
+ 0.030182993039488792 0.9994447827339172 0.014113469049334526 -0.002769832033663988 \
18
+ -0.28436803817749023 -0.004950287751853466 0.9587023854255676 -0.46959081292152405 \n \
19
+ 0.940129816532135 -0.03991429880261421 0.3384712040424347 -0.22889098525047302 \
20
+ 0.03725311905145645 0.9992027282714844 0.01435780432075262 -0.0028311305213719606 \
21
+ -0.3387744128704071 -0.0008890923927538097 0.9408671855926514 -0.5631460547447205\n \
22
+ 0.9222924709320068 -0.044258520007133484 0.38395029306411743 -0.2986142039299011 \
23
+ 0.04110203683376312 0.9990199208259583 0.01642671786248684 0.0013055746676400304 \
24
+ -0.38430097699165344 0.000630900904070586 0.9232076406478882 -0.6414245367050171\n \
25
+ 0.9061535000801086 -0.04851173609495163 0.4201577305793762 -0.3483412563800812 \
26
+ 0.04521748423576355 0.9988185167312622 0.017803886905312538 0.0010280977003276348 \
27
+ -0.4205249547958374 0.0028654206544160843 0.907276451587677 -0.7144853472709656\n \
28
+ 0.8919307589530945 -0.05171844735741615 0.4492044746875763 -0.37905213236808777 \
29
+ 0.04818608984351158 0.9986518621444702 0.019300933927297592 0.00036871168413199484 \
30
+ -0.44959715008735657 0.004430312197655439 0.8932204246520996,-0.7976372241973877\n \
31
+ 0.8792291879653931 -0.05425972864031792 0.47329893708229065 -0.39671003818511963 \
32
+ 0.05076585337519646 0.998507022857666 0.02016463316977024 0.001104982104152441 \
33
+ -0.4736863970756531 0.00629808846861124 0.8806710243225098 -0.8874085545539856\n \
34
+ 0.8659296035766602 -0.0567130371928215 0.49694016575813293 -0.4097800552845001 \
35
+ 0.05366959795355797 0.9983500838279724 0.020415671169757843 0.0009228077251464128 \
36
+ -0.497278094291687 0.008992047980427742 0.8675445914268494 -0.9762357473373413\n \
37
+ 0.8503361940383911 -0.055699657648801804 0.5232837200164795 -0.44268566370010376 \
38
+ 0.054582174867391586 0.9983546733856201 0.01757136546075344 0.005412018392235041 \
39
+ -0.5234014391899109, 0.013620397076010704, 0.8519773483276367, -1.069865107536316 \n \
40
+ 0.836037814617157 -0.05214058235287666 0.5461887717247009 -0.4671085774898529 \
41
+ 0.05177384987473488 0.9985294938087463 0.01607322134077549 0.008980141952633858 \
42
+ -0.5462236404418945 0.014840473420917988 0.8375079035758972 -1.1569048166275024\n', interactive=True), \
examples/camera_poses/test_camera_018f7907401f2fef.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [[1.0, -4.493872218791495e-10, 5.58983348497577e-09, 1.9967236752904682e-09, -4.493872218791495e-10, 1.0, -6.144247333139674e-10, 1.0815730533408896e-09, 5.58983348497577e-09, -6.144247333139674e-10, 1.0, -7.984015226725205e-09], [0.9982863664627075, -0.0024742060340940952, 0.05846544727683067, -0.024547122418880463, 0.002410230925306678, 0.9999964237213135, 0.0011647245846688747, -0.003784072818234563, -0.05846811458468437, -0.0010218139505013824, 0.9982887506484985, -0.09103696048259735], [0.9933298230171204, -0.006303737405687571, 0.11513543128967285, -0.053876250982284546, 0.00586089538410306, 0.9999741315841675, 0.004184383898973465, -0.006566310301423073, -0.115158811211586, -0.0034816779661923647, 0.9933409690856934, -0.18525512516498566], [0.9849286675453186, -0.013619760051369667, 0.17242403328418732, -0.08322551101446152, 0.01256392989307642, 0.9998950958251953, 0.0072133541107177734, -0.004579910542815924, -0.17250417172908783, -0.004938316997140646, 0.9849964380264282, -0.28701746463775635], [0.9731453657150269, -0.022617166861891747, 0.2290775030851364, -0.11655563861131668, 0.02060025744140148, 0.9997251629829407, 0.011192308738827705, -0.0017426757840439677, -0.2292676568031311, -0.006172688212245703, 0.9733438491821289, -0.37736839056015015], [0.9582399725914001, -0.03294993191957474, 0.2840607464313507, -0.15743066370487213, 0.030182993039488792, 0.9994447827339172, 0.014113469049334526, -0.002769832033663988, -0.28436803817749023, -0.004950287751853466, 0.9587023854255676, -0.46959081292152405], [0.940129816532135, -0.03991429880261421, 0.3384712040424347, -0.22889098525047302, 0.03725311905145645, 0.9992027282714844, 0.01435780432075262, -0.0028311305213719606, -0.3387744128704071, -0.0008890923927538097, 0.9408671855926514, -0.5631460547447205], [0.9222924709320068, -0.044258520007133484, 0.38395029306411743, -0.2986142039299011, 0.04110203683376312, 0.9990199208259583, 0.01642671786248684, 0.0013055746676400304, -0.38430097699165344, 0.000630900904070586, 0.9232076406478882, -0.6414245367050171], [0.9061535000801086, -0.04851173609495163, 0.4201577305793762, -0.3483412563800812, 0.04521748423576355, 0.9988185167312622, 0.017803886905312538, 0.0010280977003276348, -0.4205249547958374, 0.0028654206544160843, 0.907276451587677, -0.7144853472709656], [0.8919307589530945, -0.05171844735741615, 0.4492044746875763, -0.37905213236808777, 0.04818608984351158, 0.9986518621444702, 0.019300933927297592, 0.00036871168413199484, -0.44959715008735657, 0.004430312197655439, 0.8932204246520996, -0.7976372241973877], [0.8792291879653931, -0.05425972864031792, 0.47329893708229065, -0.39671003818511963, 0.05076585337519646, 0.998507022857666, 0.02016463316977024, 0.001104982104152441, -0.4736863970756531, 0.00629808846861124, 0.8806710243225098, -0.8874085545539856], [0.8659296035766602, -0.0567130371928215, 0.49694016575813293, -0.4097800552845001, 0.05366959795355797, 0.9983500838279724, 0.020415671169757843, 0.0009228077251464128, -0.497278094291687, 0.008992047980427742, 0.8675445914268494, -0.9762357473373413], [0.8503361940383911, -0.055699657648801804, 0.5232837200164795, -0.44268566370010376, 0.054582174867391586, 0.9983546733856201, 0.01757136546075344, 0.005412018392235041, -0.5234014391899109, 0.013620397076010704, 0.8519773483276367, -1.069865107536316], [0.836037814617157, -0.05214058235287666, 0.5461887717247009, -0.4671085774898529, 0.05177384987473488, 0.9985294938087463, 0.01607322134077549, 0.008980141952633858, -0.5462236404418945, 0.014840473420917988, 0.8375079035758972, -1.1569048166275024], [0.82603919506073, -0.04987695440649986, 0.5614013671875, -0.4677649438381195, 0.05124447122216225, 0.9985973834991455, 0.013318539597094059, 0.012170637026429176, -0.5612781643867493, 0.017767081037163734, 0.8274364471435547, -1.2651430368423462], [0.8179472088813782, -0.0496118925511837, 0.573150098323822, -0.45822662115097046, 0.052784956991672516, 0.9985441565513611, 0.011104168370366096, 0.018991567194461823, -0.5728666186332703, 0.0211710836738348, 0.8193751573562622, -1.3895009756088257]]
examples/camera_poses/test_camera_088b93f15ca8745d.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [[0.9999999403953552, 3.8618797049139175e-10, -1.3441345814158012e-08, 1.3928219289027766e-07, 3.8618797049139175e-10, 1.0, -4.134579345560496e-10, -6.074658998045379e-09, -1.3441345814158012e-08, -4.134579345560496e-10, 1.0, 7.038884319854333e-08], [0.9994913339614868, 0.003077245783060789, -0.031741149723529816, 0.08338673412799835, -0.0030815028585493565, 0.999995231628418, -8.520588744431734e-05, 0.006532138213515282, 0.0317407064139843, 0.00018297435599379241, 0.9994961619377136, -0.02256060019135475], [0.9979938268661499, 0.0051255361177027225, -0.06310292333364487, 0.18344485759735107, -0.005117486696690321, 0.9999868869781494, 0.00028916727751493454, 0.018134046345949173, 0.06310353428125381, 3.434090831433423e-05, 0.9980069994926453, -0.030579563230276108], [0.9954646825790405, 0.00820203311741352, -0.0947771891951561, 0.29663264751434326, -0.00811922550201416, 0.9999662041664124, 0.0012593322899192572, 0.02404301054775715, 0.09478426724672318, -0.0004841022891923785, 0.9954977035522461, -0.02678978443145752], [0.9913660883903503, 0.012001598253846169, -0.13057230412960052, 0.4076530337333679, -0.011968829669058323, 0.999927818775177, 0.0010357286082580686, 0.024977533146739006, 0.1305752843618393, 0.0005360084469430149, 0.9914382100105286, -0.010779343545436859], [0.985666811466217, 0.017323914915323257, -0.16781197488307953, 0.509911060333252, -0.017399737611413002, 0.9998481273651123, 0.0010186078725382686, 0.023117201402783394, 0.16780413687229156, 0.0019158748909831047, 0.9858185052871704, 0.018053216859698296], [0.9784473180770874, 0.022585421800613403, -0.20525763928890228, 0.5957884192466736, -0.022850200533866882, 0.9997382760047913, 0.0010805513011291623, 0.020451901480555534, 0.2052282989025116, 0.003632916137576103, 0.9787073731422424, 0.03460140898823738], [0.9711515307426453, 0.026846906170248985, -0.23694702982902527, 0.6832671165466309, -0.02745947800576687, 0.999622642993927, 0.0007151798927225173, 0.012211678549647331, 0.23687675595283508, 0.005811895243823528, 0.971522331237793, 0.03236595541238785], [0.9641746878623962, 0.030338184908032417, -0.26352745294570923, 0.7764986157417297, -0.031404945999383926, 0.9995067715644836, 0.0001645474840188399, 0.0011497576488181949, 0.26340243220329285, 0.008117412216961384, 0.964651882648468, 0.022656364366412163], [0.9573631882667542, 0.0335896760225296, -0.2869274914264679, 0.8815275430679321, -0.03532479330897331, 0.9993755221366882, -0.0008711823611520231, -0.003618708113208413, 0.2867189943790436, 0.010969695635139942, 0.9579519033432007, 0.005283573176711798], [0.9507063627243042, 0.036557890474796295, -0.3079299330711365, 0.9931321740150452, -0.03846294432878494, 0.9992600679397583, -0.00011733790597645566, 0.0018704120302572846, 0.30769774317741394, 0.01195544097572565, 0.9514090418815613, -0.035360634326934814], [0.9448517560958862, 0.039408694952726364, -0.3251185715198517, 1.1025006771087646, -0.041503626853227615, 0.9991382360458374, 0.0004919985658489168, 0.007425118237733841, 0.32485777139663696, 0.013028733432292938, 0.9456731081008911, -0.09869624674320221], [0.940796971321106, 0.04081147164106369, -0.33650481700897217, 1.1961394548416138, -0.0429220013320446, 0.9990777373313904, 0.0011677180882543325, 0.019955899566411972, 0.336242139339447, 0.013344875536859035, 0.9416810274124146, -0.16835527122020721], [0.9376427531242371, 0.04111124947667122, -0.3451607823371887, 1.2392503023147583, -0.043144747614860535, 0.9990671873092651, 0.0017920633545145392, 0.03982722759246826, 0.34491249918937683, 0.013211555778980255, 0.938541829586029, -0.24618202447891235], [0.9353355765342712, 0.04122937470674515, -0.3513509929180145, 1.285768747329712, -0.043183211237192154, 0.9990646243095398, 0.0022769556380808353, 0.06841164082288742, 0.3511161506175995, 0.01304274145513773, 0.936241090297699, -0.3213619291782379], [0.9342393279075623, 0.041213057935237885, -0.3542574644088745, 1.3363462686538696, -0.04236872121691704, 0.9990919232368469, 0.0044970144517719746, 0.08925694227218628, 0.35412102937698364, 0.010808154009282589, 0.9351370930671692, -0.40201041102409363]]
examples/camera_poses/test_camera_1424acd0007d40b5.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [[1.0, 9.44418099280142e-10, 3.889182664806867e-08, 6.214055492392845e-09, 9.44418099280142e-10, 1.0, -1.0644604121756718e-11, -7.621465680784922e-10, 3.889182664806867e-08, -1.0644604121756718e-11, 1.0, -2.7145965475483536e-08], [0.9873979091644287, -0.007892023772001266, 0.15806053578853607, 0.4749181270599365, 0.008024877868592739, 0.9999678134918213, -0.00020230526570230722, 0.1585356593132019, -0.15805381536483765, 0.0014681711327284575, 0.9874294400215149, -0.2091633826494217], [0.9708925485610962, -0.011486345902085304, 0.23923994600772858, 0.8120080828666687, 0.012198254466056824, 0.9999244809150696, -0.0014952132478356361, 0.2486257702112198, -0.23920467495918274, 0.004370000213384628, 0.9709593057632446, -0.5957822799682617], [0.9619541168212891, -0.013188007287681103, 0.2728927433490753, 1.1486873626708984, 0.014017474837601185, 0.9999011754989624, -0.001090032048523426, 0.3114692270755768, -0.2728513777256012, 0.0048738280311226845, 0.962043821811676, -1.0323039293289185], [0.9586812257766724, -0.013936692848801613, 0.284140944480896, 1.5948307514190674, 0.014867136254906654, 0.9998888373374939, -0.0011181083973497152, 0.36000898480415344, -0.2840937674045563, 0.0052962712943553925, 0.958781898021698, -1.4377187490463257], [0.9583359360694885, -0.011928150430321693, 0.28539448976516724, 2.002793788909912, 0.014221147634088993, 0.9998810887336731, -0.0059633455239236355, 0.35464340448379517, -0.28528934717178345, 0.009773525409400463, 0.9583916068077087, -1.8953297138214111], [0.9584393501281738, -0.010862396098673344, 0.28508952260017395, 2.3351645469665527, 0.012857729569077492, 0.9999041557312012, -0.005128204356878996, 0.38934090733528137, -0.28500640392303467, 0.008580676279962063, 0.9584871530532837, -2.4214961528778076], [0.9587277173995972, -0.009760312736034393, 0.28415825963020325, 2.6858017444610596, 0.012186127714812756, 0.9999027848243713, -0.0067702098749578, 0.4173329174518585, -0.28406453132629395, 0.009953574277460575, 0.9587535262107849, -3.030754327774048], [0.9589635729789734, -0.0070899901911616325, 0.2834406793117523, 2.8917219638824463, 0.010482418350875378, 0.9998904466629028, -0.01045384630560875, 0.4001043438911438, -0.28333547711372375, 0.012996001169085503, 0.9589328169822693, -3.6960957050323486], [0.9590328931808472, -0.005921780597418547, 0.2832328677177429, 3.034579038619995, 0.00947173498570919, 0.9998928308486938, -0.01116593275219202, 0.4193899631500244, -0.2831363379955292, 0.013391205109655857, 0.9589861631393433, -4.384733200073242], [0.9593284726142883, -0.004661113955080509, 0.28225383162498474, 3.2042288780212402, 0.0076906089670956135, 0.9999240636825562, -0.009626304730772972, 0.4752484858036041, -0.28218749165534973, 0.011405492201447487, 0.9592914581298828, -5.098723411560059], [0.9591755867004395, -0.0035665074829012156, 0.2827887237071991, 3.263953924179077, 0.0062992447055876255, 0.9999418258666992, -0.008754877373576164, 0.4543868899345398, -0.282740980386734, 0.010178821161389351, 0.95914226770401, -5.7807512283325195], [0.9591742753982544, -0.003413048107177019, 0.2827949523925781, 3.3116462230682373, 0.003146615345031023, 0.9999940991401672, 0.0013963348465040326, 0.4299861788749695, -0.28279799222946167, -0.0004494813329074532, 0.9591793417930603, -6.478931903839111], [0.9585762619972229, -0.002857929328456521, 0.28482162952423096, 3.4120190143585205, -0.008201238699257374, 0.9992581605911255, 0.0376281812787056, 0.21596357226371765, -0.2847178280353546, -0.03840537369251251, 0.957841694355011, -7.178638935089111], [0.9572952389717102, -0.002719884505495429, 0.28909942507743835, 3.4662365913391113, -0.030634215101599693, 0.9933721423149109, 0.11078491061925888, -0.29767531156539917, -0.2874845862388611, -0.11491020023822784, 0.9508671164512634, -7.794575214385986], [0.9545961618423462, -0.005338957067579031, 0.29785510897636414, 3.5083835124969482, -0.06037351116538048, 0.9756243824958801, 0.2109788954257965, -1.0968165397644043, -0.29172107577323914, -0.21938219666481018, 0.9310050010681152, -8.306528091430664]]
examples/camera_poses/test_camera_D.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ -0.2,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ -0.28750000000000003,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.0
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ -0.37500000000000006,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.0
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ -0.4625000000000001,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ -0.55,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.0
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ -0.6375000000000002,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.0
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ -0.7250000000000001,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.0
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ -0.8125000000000002,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.0
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ -0.9000000000000001,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.0
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ -0.9875000000000003,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.0
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ -1.0750000000000002,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.0
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ -1.1625000000000003,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.0
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ -1.2500000000000002,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.0
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ -1.3375000000000001,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.0
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ -1.4250000000000003,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.0
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ -1.5125000000000004,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.0
225
+ ]
226
+ ]
examples/camera_poses/test_camera_I.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ -0.2
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ -0.28750000000000003
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ -0.37500000000000006
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ -0.4625000000000001
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ -0.55
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ -0.6375000000000002
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ -0.7250000000000001
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ -0.8125000000000002
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ -0.9000000000000001
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ -0.9875000000000003
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ -1.0750000000000002
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ -1.1625000000000003
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ -1.2500000000000002
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ -1.3375000000000001
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ -1.4250000000000003
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ -1.5125000000000004
225
+ ]
226
+ ]
examples/camera_poses/test_camera_I_0.2x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ -0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ -0.022500000000000003
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ -0.045000000000000005
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ -0.0675
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ -0.09000000000000001
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ -0.11250000000000002
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ -0.135
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ -0.15750000000000003
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ -0.18000000000000002
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ -0.2025
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ -0.22500000000000003
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ -0.24750000000000003
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ -0.27
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ -0.29250000000000004
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ -0.31500000000000006
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ -0.3375
225
+ ]
226
+ ]
examples/camera_poses/test_camera_I_0.4x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ -0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ -0.045000000000000005
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ -0.09000000000000001
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ -0.135
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ -0.18000000000000002
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ -0.22500000000000003
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ -0.27
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ -0.31500000000000006
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ -0.36000000000000004
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ -0.405
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ -0.45000000000000007
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ -0.49500000000000005
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ -0.54
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ -0.5850000000000001
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ -0.6300000000000001
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ -0.675
225
+ ]
226
+ ]
examples/camera_poses/test_camera_I_1.0x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ -0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ -0.1125
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ -0.225
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ -0.3375
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ -0.45
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ -0.5625
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ -0.675
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ -0.7875
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ -0.9
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ -1.0125
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ -1.125
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ -1.2375
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ -1.35
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ -1.4625000000000001
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ -1.575
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ -1.6875
225
+ ]
226
+ ]
examples/camera_poses/test_camera_I_2.0x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ -0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ -0.225
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ -0.45
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ -0.675
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ -0.9
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ -1.125
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ -1.35
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ -1.575
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ -1.8
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ -2.025
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ -2.25
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ -2.475
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ -2.7
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ -2.9250000000000003
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ -3.15
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ -3.375
225
+ ]
226
+ ]
examples/camera_poses/test_camera_L.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.2,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.28750000000000003,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.0
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.37500000000000006,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.0
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.4625000000000001,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.55,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.0
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.6375000000000002,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.0
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.7250000000000001,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.0
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.8125000000000002,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.0
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.9000000000000001,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.0
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.9875000000000003,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.0
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 1.0750000000000002,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.0
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 1.1625000000000003,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.0
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 1.2500000000000002,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.0
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 1.3375000000000001,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.0
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 1.4250000000000003,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.0
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 1.5125000000000004,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.0
225
+ ]
226
+ ]
examples/camera_poses/test_camera_O.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.2
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.28750000000000003
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.37500000000000006
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.4625000000000001
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.55
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.6375000000000002
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.7250000000000001
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.8125000000000002
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.9000000000000001
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.9875000000000003
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 1.0750000000000002
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 1.1625000000000003
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 1.2500000000000002
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 1.3375000000000001
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 1.4250000000000003
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 1.5125000000000004
225
+ ]
226
+ ]
examples/camera_poses/test_camera_O_0.2x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.022500000000000003
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.045000000000000005
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0675
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.09000000000000001
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.11250000000000002
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.135
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.15750000000000003
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.18000000000000002
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.2025
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.22500000000000003
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.24750000000000003
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.27
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.29250000000000004
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.31500000000000006
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.3375
225
+ ]
226
+ ]
examples/camera_poses/test_camera_O_0.4x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.045000000000000005
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.09000000000000001
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.135
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.18000000000000002
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.22500000000000003
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.27
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.31500000000000006
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.36000000000000004
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.405
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.45000000000000007
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.49500000000000005
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.54
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.5850000000000001
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.6300000000000001
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.675
225
+ ]
226
+ ]
examples/camera_poses/test_camera_O_1.0x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.1125
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.225
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.3375
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.45
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.5625
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.675
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.7875
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.9
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 1.0125
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 1.125
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 1.2375
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 1.35
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 1.4625000000000001
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 1.575
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 1.6875
225
+ ]
226
+ ]
examples/camera_poses/test_camera_O_2.0x.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.225
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.45
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.675
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.9
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 1.125
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 1.35
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 1.575
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 1.8
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 2.025
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 2.25
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 2.475
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 2.7
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 2.9250000000000003
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 3.15
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 3.375
225
+ ]
226
+ ]
examples/camera_poses/test_camera_R.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ -0.2,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ -0.28750000000000003,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.0
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ -0.37500000000000006,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.0
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ -0.4625000000000001,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ -0.55,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.0
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ -0.6375000000000002,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.0
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ -0.7250000000000001,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.0
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ -0.8125000000000002,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.0
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ -0.9000000000000001,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.0
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ -0.9875000000000003,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.0
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ -1.0750000000000002,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.0
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ -1.1625000000000003,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.0
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ -1.2500000000000002,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.0
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ -1.3375000000000001,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.0
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ -1.4250000000000003,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.0
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ -1.5125000000000004,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.0
225
+ ]
226
+ ]
examples/camera_poses/test_camera_Round-RI-120.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 1.8
15
+ ],
16
+ [
17
+ 0.9914448613738104,
18
+ 0.0,
19
+ 0.13052619222005157,
20
+ 0.13052619222005157,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ -0.13052619222005157,
26
+ 0.0,
27
+ 0.9914448613738104,
28
+ 1.7914448613738103
29
+ ],
30
+ [
31
+ 0.9659258262890683,
32
+ 0.0,
33
+ 0.25881904510252074,
34
+ 0.25881904510252074,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ -0.25881904510252074,
40
+ 0.0,
41
+ 0.9659258262890683,
42
+ 1.7659258262890685
43
+ ],
44
+ [
45
+ 0.9238795325112867,
46
+ 0.0,
47
+ 0.3826834323650898,
48
+ 0.3826834323650898,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ -0.3826834323650898,
54
+ 0.0,
55
+ 0.9238795325112867,
56
+ 1.7238795325112868
57
+ ],
58
+ [
59
+ 0.8660254037844387,
60
+ 0.0,
61
+ 0.49999999999999994,
62
+ 0.49999999999999994,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ -0.49999999999999994,
68
+ 0.0,
69
+ 0.8660254037844387,
70
+ 1.6660254037844386
71
+ ],
72
+ [
73
+ 0.7933533402912353,
74
+ 0.0,
75
+ 0.6087614290087205,
76
+ 0.6087614290087205,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ -0.6087614290087205,
82
+ 0.0,
83
+ 0.7933533402912353,
84
+ 1.5933533402912352
85
+ ],
86
+ [
87
+ 0.7071067811865476,
88
+ 0.0,
89
+ 0.7071067811865476,
90
+ 0.7071067811865476,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ -0.7071067811865476,
96
+ 0.0,
97
+ 0.7071067811865476,
98
+ 1.5071067811865477
99
+ ],
100
+ [
101
+ 0.6087614290087207,
102
+ 0.0,
103
+ 0.7933533402912352,
104
+ 0.7933533402912352,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ -0.7933533402912352,
110
+ 0.0,
111
+ 0.6087614290087207,
112
+ 1.4087614290087207
113
+ ],
114
+ [
115
+ 0.5000000000000001,
116
+ 0.0,
117
+ 0.8660254037844386,
118
+ 0.8660254037844386,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ -0.8660254037844386,
124
+ 0.0,
125
+ 0.5000000000000001,
126
+ 1.3000000000000003
127
+ ],
128
+ [
129
+ 0.38268343236508984,
130
+ 0.0,
131
+ 0.9238795325112867,
132
+ 0.9238795325112867,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ -0.9238795325112867,
138
+ 0.0,
139
+ 0.38268343236508984,
140
+ 1.1826834323650899
141
+ ],
142
+ [
143
+ 0.25881904510252096,
144
+ 0.0,
145
+ 0.9659258262890682,
146
+ 0.9659258262890682,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ -0.9659258262890682,
152
+ 0.0,
153
+ 0.25881904510252096,
154
+ 1.058819045102521
155
+ ],
156
+ [
157
+ 0.1305261922200517,
158
+ 0.0,
159
+ 0.9914448613738104,
160
+ 0.9914448613738104,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ -0.9914448613738104,
166
+ 0.0,
167
+ 0.1305261922200517,
168
+ 0.9305261922200517
169
+ ],
170
+ [
171
+ 6.123233995736766e-17,
172
+ 0.0,
173
+ 1.0,
174
+ 1.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ -1.0,
180
+ 0.0,
181
+ 6.123233995736766e-17,
182
+ 0.8000000000000002
183
+ ],
184
+ [
185
+ -0.13052619222005138,
186
+ 0.0,
187
+ 0.9914448613738105,
188
+ 0.9914448613738105,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ -0.9914448613738105,
194
+ 0.0,
195
+ -0.13052619222005138,
196
+ 0.6694738077799487
197
+ ],
198
+ [
199
+ -0.25881904510252063,
200
+ 0.0,
201
+ 0.9659258262890683,
202
+ 0.9659258262890683,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ -0.9659258262890683,
208
+ 0.0,
209
+ -0.25881904510252063,
210
+ 0.5411809548974794
211
+ ],
212
+ [
213
+ -0.3826834323650895,
214
+ 0.0,
215
+ 0.9238795325112868,
216
+ 0.9238795325112868,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ -0.9238795325112868,
222
+ 0.0,
223
+ -0.3826834323650895,
224
+ 0.41731656763491054
225
+ ]
226
+ ]
examples/camera_poses/test_camera_Round-RI.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 1.7000000000000002
15
+ ],
16
+ [
17
+ 0.9807852804032304,
18
+ 0.0,
19
+ 0.19509032201612825,
20
+ 0.17558128981451543,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ -0.19509032201612825,
26
+ 0.0,
27
+ 0.9807852804032304,
28
+ 1.6827067523629076
29
+ ],
30
+ [
31
+ 0.9238795325112867,
32
+ 0.0,
33
+ 0.3826834323650898,
34
+ 0.3444150891285808,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ -0.3826834323650898,
40
+ 0.0,
41
+ 0.9238795325112867,
42
+ 1.631491579260158
43
+ ],
44
+ [
45
+ 0.8314696123025452,
46
+ 0.0,
47
+ 0.5555702330196022,
48
+ 0.500013209717642,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ -0.5555702330196022,
54
+ 0.0,
55
+ 0.8314696123025452,
56
+ 1.5483226510722907
57
+ ],
58
+ [
59
+ 0.7071067811865476,
60
+ 0.0,
61
+ 0.7071067811865476,
62
+ 0.6363961030678928,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ -0.7071067811865476,
68
+ 0.0,
69
+ 0.7071067811865476,
70
+ 1.436396103067893
71
+ ],
72
+ [
73
+ 0.5555702330196023,
74
+ 0.0,
75
+ 0.8314696123025452,
76
+ 0.7483226510722907,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ -0.8314696123025452,
82
+ 0.0,
83
+ 0.5555702330196023,
84
+ 1.3000132097176422
85
+ ],
86
+ [
87
+ 0.38268343236508984,
88
+ 0.0,
89
+ 0.9238795325112867,
90
+ 0.831491579260158,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ -0.9238795325112867,
96
+ 0.0,
97
+ 0.38268343236508984,
98
+ 1.144415089128581
99
+ ],
100
+ [
101
+ 0.19509032201612833,
102
+ 0.0,
103
+ 0.9807852804032304,
104
+ 0.8827067523629074,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ -0.9807852804032304,
110
+ 0.0,
111
+ 0.19509032201612833,
112
+ 0.9755812898145155
113
+ ],
114
+ [
115
+ 6.123233995736766e-17,
116
+ 0.0,
117
+ 1.0,
118
+ 0.9,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ -1.0,
124
+ 0.0,
125
+ 6.123233995736766e-17,
126
+ 0.8
127
+ ],
128
+ [
129
+ -0.1950903220161282,
130
+ 0.0,
131
+ 0.9807852804032304,
132
+ 0.8827067523629074,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ -0.9807852804032304,
138
+ 0.0,
139
+ -0.1950903220161282,
140
+ 0.6244187101854847
141
+ ],
142
+ [
143
+ -0.3826834323650897,
144
+ 0.0,
145
+ 0.9238795325112867,
146
+ 0.831491579260158,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ -0.9238795325112867,
152
+ 0.0,
153
+ -0.3826834323650897,
154
+ 0.4555849108714193
155
+ ],
156
+ [
157
+ -0.555570233019602,
158
+ 0.0,
159
+ 0.8314696123025453,
160
+ 0.7483226510722908,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ -0.8314696123025453,
166
+ 0.0,
167
+ -0.555570233019602,
168
+ 0.2999867902823583
169
+ ],
170
+ [
171
+ -0.7071067811865475,
172
+ 0.0,
173
+ 0.7071067811865476,
174
+ 0.6363961030678928,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ -0.7071067811865476,
180
+ 0.0,
181
+ -0.7071067811865475,
182
+ 0.1636038969321073
183
+ ],
184
+ [
185
+ -0.8314696123025453,
186
+ 0.0,
187
+ 0.5555702330196022,
188
+ 0.500013209717642,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ -0.5555702330196022,
194
+ 0.0,
195
+ -0.8314696123025453,
196
+ 0.051677348927709255
197
+ ],
198
+ [
199
+ -0.9238795325112867,
200
+ 0.0,
201
+ 0.3826834323650899,
202
+ 0.34441508912858093,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ -0.3826834323650899,
208
+ 0.0,
209
+ -0.9238795325112867,
210
+ -0.031491579260158
211
+ ],
212
+ [
213
+ -0.9807852804032304,
214
+ 0.0,
215
+ 0.1950903220161286,
216
+ 0.17558128981451576,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ -0.1950903220161286,
222
+ 0.0,
223
+ -0.9807852804032304,
224
+ -0.08270675236290737
225
+ ]
226
+ ]
examples/camera_poses/test_camera_Round-RI_90.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 1.7000000000000002
15
+ ],
16
+ [
17
+ 0.9951847266721969,
18
+ 0.0,
19
+ 0.0980171403295606,
20
+ 0.08821542629660455,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ -0.0980171403295606,
26
+ 0.0,
27
+ 0.9951847266721969,
28
+ 1.6956662540049772
29
+ ],
30
+ [
31
+ 0.9807852804032304,
32
+ 0.0,
33
+ 0.19509032201612825,
34
+ 0.17558128981451543,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ -0.19509032201612825,
40
+ 0.0,
41
+ 0.9807852804032304,
42
+ 1.6827067523629076
43
+ ],
44
+ [
45
+ 0.9569403357322088,
46
+ 0.0,
47
+ 0.29028467725446233,
48
+ 0.2612562095290161,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ -0.29028467725446233,
54
+ 0.0,
55
+ 0.9569403357322088,
56
+ 1.661246302158988
57
+ ],
58
+ [
59
+ 0.9238795325112867,
60
+ 0.0,
61
+ 0.3826834323650898,
62
+ 0.3444150891285808,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ -0.3826834323650898,
68
+ 0.0,
69
+ 0.9238795325112867,
70
+ 1.631491579260158
71
+ ],
72
+ [
73
+ 0.881921264348355,
74
+ 0.0,
75
+ 0.47139673682599764,
76
+ 0.4242570631433979,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ -0.47139673682599764,
82
+ 0.0,
83
+ 0.881921264348355,
84
+ 1.5937291379135194
85
+ ],
86
+ [
87
+ 0.8314696123025452,
88
+ 0.0,
89
+ 0.5555702330196022,
90
+ 0.500013209717642,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ -0.5555702330196022,
96
+ 0.0,
97
+ 0.8314696123025452,
98
+ 1.5483226510722907
99
+ ],
100
+ [
101
+ 0.773010453362737,
102
+ 0.0,
103
+ 0.6343932841636455,
104
+ 0.5709539557472809,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ -0.6343932841636455,
110
+ 0.0,
111
+ 0.773010453362737,
112
+ 1.4957094080264635
113
+ ],
114
+ [
115
+ 0.7071067811865476,
116
+ 0.0,
117
+ 0.7071067811865476,
118
+ 0.6363961030678928,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ -0.7071067811865476,
124
+ 0.0,
125
+ 0.7071067811865476,
126
+ 1.436396103067893
127
+ ],
128
+ [
129
+ 0.6343932841636455,
130
+ 0.0,
131
+ 0.7730104533627369,
132
+ 0.6957094080264632,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ -0.7730104533627369,
138
+ 0.0,
139
+ 0.6343932841636455,
140
+ 1.370953955747281
141
+ ],
142
+ [
143
+ 0.5555702330196023,
144
+ 0.0,
145
+ 0.8314696123025452,
146
+ 0.7483226510722907,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ -0.8314696123025452,
152
+ 0.0,
153
+ 0.5555702330196023,
154
+ 1.3000132097176422
155
+ ],
156
+ [
157
+ 0.4713967368259978,
158
+ 0.0,
159
+ 0.8819212643483549,
160
+ 0.7937291379135195,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ -0.8819212643483549,
166
+ 0.0,
167
+ 0.4713967368259978,
168
+ 1.2242570631433982
169
+ ],
170
+ [
171
+ 0.38268343236508984,
172
+ 0.0,
173
+ 0.9238795325112867,
174
+ 0.831491579260158,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ -0.9238795325112867,
180
+ 0.0,
181
+ 0.38268343236508984,
182
+ 1.144415089128581
183
+ ],
184
+ [
185
+ 0.29028467725446233,
186
+ 0.0,
187
+ 0.9569403357322089,
188
+ 0.861246302158988,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ -0.9569403357322089,
194
+ 0.0,
195
+ 0.29028467725446233,
196
+ 1.0612562095290161
197
+ ],
198
+ [
199
+ 0.19509032201612833,
200
+ 0.0,
201
+ 0.9807852804032304,
202
+ 0.8827067523629074,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ -0.9807852804032304,
208
+ 0.0,
209
+ 0.19509032201612833,
210
+ 0.9755812898145155
211
+ ],
212
+ [
213
+ 0.09801714032956077,
214
+ 0.0,
215
+ 0.9951847266721968,
216
+ 0.8956662540049771,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ -0.9951847266721968,
222
+ 0.0,
223
+ 0.09801714032956077,
224
+ 0.8882154262966048
225
+ ]
226
+ ]
examples/camera_poses/test_camera_Round-ZoomIn.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.81
15
+ ],
16
+ [
17
+ 0.9807852804032304,
18
+ 0.0,
19
+ 0.19509032201612825,
20
+ 0.0019509032201612826,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.0,
25
+ -0.19509032201612825,
26
+ 0.0,
27
+ 0.9807852804032304,
28
+ 0.8098078528040323
29
+ ],
30
+ [
31
+ 0.9238795325112867,
32
+ 0.0,
33
+ 0.3826834323650898,
34
+ 0.003826834323650898,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.0,
39
+ -0.3826834323650898,
40
+ 0.0,
41
+ 0.9238795325112867,
42
+ 0.8092387953251129
43
+ ],
44
+ [
45
+ 0.8314696123025452,
46
+ 0.0,
47
+ 0.5555702330196022,
48
+ 0.005555702330196022,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.0,
53
+ -0.5555702330196022,
54
+ 0.0,
55
+ 0.8314696123025452,
56
+ 0.8083146961230255
57
+ ],
58
+ [
59
+ 0.7071067811865476,
60
+ 0.0,
61
+ 0.7071067811865476,
62
+ 0.007071067811865476,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.0,
67
+ -0.7071067811865476,
68
+ 0.0,
69
+ 0.7071067811865476,
70
+ 0.8070710678118656
71
+ ],
72
+ [
73
+ 0.5555702330196023,
74
+ 0.0,
75
+ 0.8314696123025452,
76
+ 0.008314696123025453,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.0,
81
+ -0.8314696123025452,
82
+ 0.0,
83
+ 0.5555702330196023,
84
+ 0.805555702330196
85
+ ],
86
+ [
87
+ 0.38268343236508984,
88
+ 0.0,
89
+ 0.9238795325112867,
90
+ 0.009238795325112868,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.0,
95
+ -0.9238795325112867,
96
+ 0.0,
97
+ 0.38268343236508984,
98
+ 0.803826834323651
99
+ ],
100
+ [
101
+ 0.19509032201612833,
102
+ 0.0,
103
+ 0.9807852804032304,
104
+ 0.009807852804032305,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.0,
109
+ -0.9807852804032304,
110
+ 0.0,
111
+ 0.19509032201612833,
112
+ 0.8019509032201614
113
+ ],
114
+ [
115
+ 0.19509032201612833,
116
+ 0.0,
117
+ 0.9807852804032304,
118
+ 0.009807852804032305,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.0,
123
+ -0.9807852804032304,
124
+ 0.0,
125
+ 0.19509032201612833,
126
+ 0.8019509032201614
127
+ ],
128
+ [
129
+ 0.19509032201612833,
130
+ 0.0,
131
+ 0.9807852804032304,
132
+ 0.009807852804032305,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.0,
137
+ -0.9807852804032304,
138
+ 0.0,
139
+ 0.19509032201612833,
140
+ 0.7019509032201614
141
+ ],
142
+ [
143
+ 0.19509032201612833,
144
+ 0.0,
145
+ 0.9807852804032304,
146
+ 0.009807852804032305,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 0.0,
151
+ -0.9807852804032304,
152
+ 0.0,
153
+ 0.19509032201612833,
154
+ 0.6019509032201614
155
+ ],
156
+ [
157
+ 0.19509032201612833,
158
+ 0.0,
159
+ 0.9807852804032304,
160
+ 0.009807852804032305,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 0.0,
165
+ -0.9807852804032304,
166
+ 0.0,
167
+ 0.19509032201612833,
168
+ 0.5019509032201613
169
+ ],
170
+ [
171
+ 0.19509032201612833,
172
+ 0.0,
173
+ 0.9807852804032304,
174
+ 0.009807852804032305,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 0.0,
179
+ -0.9807852804032304,
180
+ 0.0,
181
+ 0.19509032201612833,
182
+ 0.4019509032201613
183
+ ],
184
+ [
185
+ 0.19509032201612833,
186
+ 0.0,
187
+ 0.9807852804032304,
188
+ 0.009807852804032305,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 0.0,
193
+ -0.9807852804032304,
194
+ 0.0,
195
+ 0.19509032201612833,
196
+ 0.3019509032201613
197
+ ],
198
+ [
199
+ 0.19509032201612833,
200
+ 0.0,
201
+ 0.9807852804032304,
202
+ 0.009807852804032305,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 0.0,
207
+ -0.9807852804032304,
208
+ 0.0,
209
+ 0.19509032201612833,
210
+ 0.2019509032201613
211
+ ],
212
+ [
213
+ 0.19509032201612833,
214
+ 0.0,
215
+ 0.9807852804032304,
216
+ 0.009807852804032305,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 0.0,
221
+ -0.9807852804032304,
222
+ 0.0,
223
+ 0.19509032201612833,
224
+ 0.10195090322016129
225
+ ]
226
+ ]
examples/camera_poses/test_camera_SPIN-ACW-60.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 0.9978589232386035,
18
+ -0.06540312923014306,
19
+ 0.0,
20
+ 0.0,
21
+ 0.06540312923014306,
22
+ 0.9978589232386035,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.0
29
+ ],
30
+ [
31
+ 0.9914448613738104,
32
+ -0.13052619222005157,
33
+ 0.0,
34
+ 0.0,
35
+ 0.13052619222005157,
36
+ 0.9914448613738104,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.0
43
+ ],
44
+ [
45
+ 0.9807852804032304,
46
+ -0.19509032201612825,
47
+ 0.0,
48
+ 0.0,
49
+ 0.19509032201612825,
50
+ 0.9807852804032304,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0
57
+ ],
58
+ [
59
+ 0.9659258262890683,
60
+ -0.25881904510252074,
61
+ 0.0,
62
+ 0.0,
63
+ 0.25881904510252074,
64
+ 0.9659258262890683,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.0
71
+ ],
72
+ [
73
+ 0.9469301294951057,
74
+ -0.32143946530316153,
75
+ 0.0,
76
+ 0.0,
77
+ 0.32143946530316153,
78
+ 0.9469301294951057,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.0
85
+ ],
86
+ [
87
+ 0.9238795325112867,
88
+ -0.3826834323650898,
89
+ 0.0,
90
+ 0.0,
91
+ 0.3826834323650898,
92
+ 0.9238795325112867,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.0
99
+ ],
100
+ [
101
+ 0.8968727415326884,
102
+ -0.44228869021900125,
103
+ 0.0,
104
+ 0.0,
105
+ 0.44228869021900125,
106
+ 0.8968727415326884,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.0
113
+ ],
114
+ [
115
+ 0.8660254037844387,
116
+ -0.49999999999999994,
117
+ 0.0,
118
+ 0.0,
119
+ 0.49999999999999994,
120
+ 0.8660254037844387,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.0
127
+ ],
128
+ [
129
+ 0.8314696123025452,
130
+ -0.5555702330196022,
131
+ 0.0,
132
+ 0.0,
133
+ 0.5555702330196022,
134
+ 0.8314696123025452,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.0
141
+ ],
142
+ [
143
+ 0.7933533402912353,
144
+ -0.6087614290087205,
145
+ 0.0,
146
+ 0.0,
147
+ 0.6087614290087205,
148
+ 0.7933533402912353,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.0
155
+ ],
156
+ [
157
+ 0.7518398074789774,
158
+ -0.6593458151000688,
159
+ 0.0,
160
+ 0.0,
161
+ 0.6593458151000688,
162
+ 0.7518398074789774,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.0
169
+ ],
170
+ [
171
+ 0.7071067811865476,
172
+ -0.7071067811865476,
173
+ 0.0,
174
+ 0.0,
175
+ 0.7071067811865476,
176
+ 0.7071067811865476,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.0
183
+ ],
184
+ [
185
+ 0.659345815100069,
186
+ -0.7518398074789773,
187
+ 0.0,
188
+ 0.0,
189
+ 0.7518398074789773,
190
+ 0.659345815100069,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.0
197
+ ],
198
+ [
199
+ 0.6087614290087207,
200
+ -0.7933533402912352,
201
+ 0.0,
202
+ 0.0,
203
+ 0.7933533402912352,
204
+ 0.6087614290087207,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.0
211
+ ],
212
+ [
213
+ 0.5555702330196024,
214
+ -0.8314696123025451,
215
+ 0.0,
216
+ 0.0,
217
+ 0.8314696123025451,
218
+ 0.5555702330196024,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.0
225
+ ]
226
+ ]
examples/camera_poses/test_camera_SPIN-CW-60.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.0,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 0.9978589232386035,
18
+ 0.06540312923014306,
19
+ 0.0,
20
+ 0.0,
21
+ -0.06540312923014306,
22
+ 0.9978589232386035,
23
+ 0.0,
24
+ 0.0,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.0
29
+ ],
30
+ [
31
+ 0.9914448613738104,
32
+ 0.13052619222005157,
33
+ 0.0,
34
+ 0.0,
35
+ -0.13052619222005157,
36
+ 0.9914448613738104,
37
+ 0.0,
38
+ 0.0,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.0
43
+ ],
44
+ [
45
+ 0.9807852804032304,
46
+ 0.19509032201612825,
47
+ 0.0,
48
+ 0.0,
49
+ -0.19509032201612825,
50
+ 0.9807852804032304,
51
+ 0.0,
52
+ 0.0,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0
57
+ ],
58
+ [
59
+ 0.9659258262890683,
60
+ 0.25881904510252074,
61
+ 0.0,
62
+ 0.0,
63
+ -0.25881904510252074,
64
+ 0.9659258262890683,
65
+ 0.0,
66
+ 0.0,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.0
71
+ ],
72
+ [
73
+ 0.9469301294951057,
74
+ 0.32143946530316153,
75
+ 0.0,
76
+ 0.0,
77
+ -0.32143946530316153,
78
+ 0.9469301294951057,
79
+ 0.0,
80
+ 0.0,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.0
85
+ ],
86
+ [
87
+ 0.9238795325112867,
88
+ 0.3826834323650898,
89
+ 0.0,
90
+ 0.0,
91
+ -0.3826834323650898,
92
+ 0.9238795325112867,
93
+ 0.0,
94
+ 0.0,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.0
99
+ ],
100
+ [
101
+ 0.8968727415326884,
102
+ 0.44228869021900125,
103
+ 0.0,
104
+ 0.0,
105
+ -0.44228869021900125,
106
+ 0.8968727415326884,
107
+ 0.0,
108
+ 0.0,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.0
113
+ ],
114
+ [
115
+ 0.8660254037844387,
116
+ 0.49999999999999994,
117
+ 0.0,
118
+ 0.0,
119
+ -0.49999999999999994,
120
+ 0.8660254037844387,
121
+ 0.0,
122
+ 0.0,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.0
127
+ ],
128
+ [
129
+ 0.8314696123025452,
130
+ 0.5555702330196022,
131
+ 0.0,
132
+ 0.0,
133
+ -0.5555702330196022,
134
+ 0.8314696123025452,
135
+ 0.0,
136
+ 0.0,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.0
141
+ ],
142
+ [
143
+ 0.7933533402912353,
144
+ 0.6087614290087205,
145
+ 0.0,
146
+ 0.0,
147
+ -0.6087614290087205,
148
+ 0.7933533402912353,
149
+ 0.0,
150
+ 0.0,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.0
155
+ ],
156
+ [
157
+ 0.7518398074789774,
158
+ 0.6593458151000688,
159
+ 0.0,
160
+ 0.0,
161
+ -0.6593458151000688,
162
+ 0.7518398074789774,
163
+ 0.0,
164
+ 0.0,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.0
169
+ ],
170
+ [
171
+ 0.7071067811865476,
172
+ 0.7071067811865476,
173
+ 0.0,
174
+ 0.0,
175
+ -0.7071067811865476,
176
+ 0.7071067811865476,
177
+ 0.0,
178
+ 0.0,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.0
183
+ ],
184
+ [
185
+ 0.659345815100069,
186
+ 0.7518398074789773,
187
+ 0.0,
188
+ 0.0,
189
+ -0.7518398074789773,
190
+ 0.659345815100069,
191
+ 0.0,
192
+ 0.0,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.0
197
+ ],
198
+ [
199
+ 0.6087614290087207,
200
+ 0.7933533402912352,
201
+ 0.0,
202
+ 0.0,
203
+ -0.7933533402912352,
204
+ 0.6087614290087207,
205
+ 0.0,
206
+ 0.0,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.0
211
+ ],
212
+ [
213
+ 0.5555702330196024,
214
+ 0.8314696123025451,
215
+ 0.0,
216
+ 0.0,
217
+ -0.8314696123025451,
218
+ 0.5555702330196024,
219
+ 0.0,
220
+ 0.0,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.0
225
+ ]
226
+ ]
examples/camera_poses/test_camera_U.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ 1.0,
4
+ 0.0,
5
+ 0.0,
6
+ 0.0,
7
+ 0.0,
8
+ 1.0,
9
+ 0.0,
10
+ 0.2,
11
+ 0.0,
12
+ 0.0,
13
+ 1.0,
14
+ 0.0
15
+ ],
16
+ [
17
+ 1.0,
18
+ 0.0,
19
+ 0.0,
20
+ 0.0,
21
+ 0.0,
22
+ 1.0,
23
+ 0.0,
24
+ 0.28750000000000003,
25
+ 0.0,
26
+ 0.0,
27
+ 1.0,
28
+ 0.0
29
+ ],
30
+ [
31
+ 1.0,
32
+ 0.0,
33
+ 0.0,
34
+ 0.0,
35
+ 0.0,
36
+ 1.0,
37
+ 0.0,
38
+ 0.37500000000000006,
39
+ 0.0,
40
+ 0.0,
41
+ 1.0,
42
+ 0.0
43
+ ],
44
+ [
45
+ 1.0,
46
+ 0.0,
47
+ 0.0,
48
+ 0.0,
49
+ 0.0,
50
+ 1.0,
51
+ 0.0,
52
+ 0.4625000000000001,
53
+ 0.0,
54
+ 0.0,
55
+ 1.0,
56
+ 0.0
57
+ ],
58
+ [
59
+ 1.0,
60
+ 0.0,
61
+ 0.0,
62
+ 0.0,
63
+ 0.0,
64
+ 1.0,
65
+ 0.0,
66
+ 0.55,
67
+ 0.0,
68
+ 0.0,
69
+ 1.0,
70
+ 0.0
71
+ ],
72
+ [
73
+ 1.0,
74
+ 0.0,
75
+ 0.0,
76
+ 0.0,
77
+ 0.0,
78
+ 1.0,
79
+ 0.0,
80
+ 0.6375000000000002,
81
+ 0.0,
82
+ 0.0,
83
+ 1.0,
84
+ 0.0
85
+ ],
86
+ [
87
+ 1.0,
88
+ 0.0,
89
+ 0.0,
90
+ 0.0,
91
+ 0.0,
92
+ 1.0,
93
+ 0.0,
94
+ 0.7250000000000001,
95
+ 0.0,
96
+ 0.0,
97
+ 1.0,
98
+ 0.0
99
+ ],
100
+ [
101
+ 1.0,
102
+ 0.0,
103
+ 0.0,
104
+ 0.0,
105
+ 0.0,
106
+ 1.0,
107
+ 0.0,
108
+ 0.8125000000000002,
109
+ 0.0,
110
+ 0.0,
111
+ 1.0,
112
+ 0.0
113
+ ],
114
+ [
115
+ 1.0,
116
+ 0.0,
117
+ 0.0,
118
+ 0.0,
119
+ 0.0,
120
+ 1.0,
121
+ 0.0,
122
+ 0.9000000000000001,
123
+ 0.0,
124
+ 0.0,
125
+ 1.0,
126
+ 0.0
127
+ ],
128
+ [
129
+ 1.0,
130
+ 0.0,
131
+ 0.0,
132
+ 0.0,
133
+ 0.0,
134
+ 1.0,
135
+ 0.0,
136
+ 0.9875000000000003,
137
+ 0.0,
138
+ 0.0,
139
+ 1.0,
140
+ 0.0
141
+ ],
142
+ [
143
+ 1.0,
144
+ 0.0,
145
+ 0.0,
146
+ 0.0,
147
+ 0.0,
148
+ 1.0,
149
+ 0.0,
150
+ 1.0750000000000002,
151
+ 0.0,
152
+ 0.0,
153
+ 1.0,
154
+ 0.0
155
+ ],
156
+ [
157
+ 1.0,
158
+ 0.0,
159
+ 0.0,
160
+ 0.0,
161
+ 0.0,
162
+ 1.0,
163
+ 0.0,
164
+ 1.1625000000000003,
165
+ 0.0,
166
+ 0.0,
167
+ 1.0,
168
+ 0.0
169
+ ],
170
+ [
171
+ 1.0,
172
+ 0.0,
173
+ 0.0,
174
+ 0.0,
175
+ 0.0,
176
+ 1.0,
177
+ 0.0,
178
+ 1.2500000000000002,
179
+ 0.0,
180
+ 0.0,
181
+ 1.0,
182
+ 0.0
183
+ ],
184
+ [
185
+ 1.0,
186
+ 0.0,
187
+ 0.0,
188
+ 0.0,
189
+ 0.0,
190
+ 1.0,
191
+ 0.0,
192
+ 1.3375000000000001,
193
+ 0.0,
194
+ 0.0,
195
+ 1.0,
196
+ 0.0
197
+ ],
198
+ [
199
+ 1.0,
200
+ 0.0,
201
+ 0.0,
202
+ 0.0,
203
+ 0.0,
204
+ 1.0,
205
+ 0.0,
206
+ 1.4250000000000003,
207
+ 0.0,
208
+ 0.0,
209
+ 1.0,
210
+ 0.0
211
+ ],
212
+ [
213
+ 1.0,
214
+ 0.0,
215
+ 0.0,
216
+ 0.0,
217
+ 0.0,
218
+ 1.0,
219
+ 0.0,
220
+ 1.5125000000000004,
221
+ 0.0,
222
+ 0.0,
223
+ 1.0,
224
+ 0.0
225
+ ]
226
+ ]
examples/camera_poses/test_camera_b133a504fc90a2d1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [[0.9999999403953552, -3.2563843288535566e-10, 8.624932434919685e-10, -5.431840754965833e-09, -3.2563843288535566e-10, 1.0, -7.078895802870022e-10, -2.678919752696629e-09, 8.624932434919685e-10, -7.078895802870022e-10, 1.0, 7.303774474110014e-09], [0.9999998807907104, 0.00026603759033605456, -0.0003414043167140335, -0.030107486993074417, -0.0002659278397914022, 0.9999999403953552, 0.00032293255208060145, 0.011978899128735065, 0.0003414914826862514, -0.0003228419227525592, 0.9999999403953552, -0.07893969118595123], [0.9999994039535522, -0.0006866907933726907, -0.0008186722989194095, -0.050785429775714874, 0.0006877407431602478, 0.9999989867210388, 0.0012827541213482618, 0.023323407396674156, 0.0008177922572940588, -0.0012833180371671915, 0.9999988675117493, -0.1486724615097046], [0.999999463558197, -0.0005364732351154089, -0.0008647883078083396, -0.0673225075006485, 0.0005379383219406009, 0.9999984502792358, 0.0016958877677097917, 0.03169107437133789, 0.0008638793369755149, -0.0016963522648438811, 0.9999982118606567, -0.21477271616458893], [0.9999995231628418, -0.0005853328038938344, 0.000731398060452193, -0.08539554476737976, 0.0005843221442773938, 0.9999988675117493, 0.0013807439245283604, 0.037947509437799454, -0.0007322035962715745, -0.0013803173787891865, 0.9999987483024597, -0.2858566641807556], [0.9999935030937195, -0.0007727851625531912, 0.00352613371796906, -0.10591986775398254, 0.0007684561423957348, 0.9999989867210388, 0.0012290359009057283, 0.0426139310002327, -0.003527079476043582, -0.0012263187672942877, 0.999993085861206, -0.3645589053630829], [0.9999805688858032, -0.0012497249990701675, 0.006103655323386192, -0.12257411330938339, 0.0012419418198987842, 0.9999984502792358, 0.001278668874874711, 0.05143251642584801, -0.006105243694037199, -0.0012710647424682975, 0.9999805688858032, -0.44212815165519714], [0.9999697804450989, -0.001364423311315477, 0.0076533216051757336, -0.13916294276714325, 0.0013530527940019965, 0.9999980330467224, 0.0014907552395015955, 0.06076823174953461, -0.0076553407125175, -0.0014803558588027954, 0.9999696016311646, -0.5131306648254395], [0.9999656081199646, -0.001309191924519837, 0.00818517804145813, -0.1577269434928894, 0.0012902054004371166, 0.9999964833259583, 0.0023244714830070734, 0.07221967726945877, -0.008188189007341862, -0.0023138325195759535, 0.9999638199806213, -0.5706046223640442], [0.9999715089797974, -0.000632039678748697, 0.0075194560922682285, -0.1842648684978485, 0.0006101227481849492, 0.9999955892562866, 0.002916603581979871, 0.0780157595872879, -0.0075212628580629826, -0.0029119334649294615, 0.9999675154685974, -0.6207911372184753], [0.999971330165863, 9.749359742272645e-05, 0.007570990361273289, -0.2200128734111786, -0.00012177121971035376, 0.9999948740005493, 0.0032062893733382225, 0.07083319127559662, -0.007570638321340084, -0.003207120578736067, 0.9999662041664124, -0.671477735042572], [0.9999842047691345, 0.0011085873702540994, 0.005505275446921587, -0.265299528837204, -0.0011242963373661041, 0.9999953508377075, 0.002851187251508236, 0.06430258601903915, -0.00550208892673254, -0.002857332583516836, 0.9999808073043823, -0.7294802069664001], [0.9999891519546509, 0.0030521126464009285, -0.003507567336782813, -0.3109421133995056, -0.0030410285107791424, 0.9999904632568359, 0.003161099273711443, 0.06503432989120483, 0.003517185803502798, -0.0031503995414823294, 0.999988853931427, -0.7917969226837158], [0.9995473623275757, 0.00647346256300807, -0.029379382729530334, -0.3455933630466461, -0.006376372650265694, 0.9999739527702332, 0.0033971993252635, 0.06981948018074036, 0.02940060943365097, -0.003208327107131481, 0.9995625615119934, -0.8474093675613403], [0.9966378808021545, 0.011493867263197899, -0.08112204819917679, -0.3555213510990143, -0.011167873628437519, 0.9999276399612427, 0.004471173509955406, 0.0734858587384224, 0.08116757124662399, -0.00355018163099885, 0.9966941475868225, -0.9062771201133728], [0.9889613389968872, 0.01762101612985134, -0.14712226390838623, -0.33937495946884155, -0.01693679392337799, 0.999839186668396, 0.0059022014029324055, 0.0776127353310585, 0.14720259606838226, -0.0033452697098255157, 0.9891006946563721, -0.9548948407173157]]
examples/camera_poses/test_camera_d9642c8efc01481d.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [[1.0, -1.4532828274127496e-09, 3.928266045782891e-10, -8.700192566379883e-09, -1.4532828274127496e-09, 1.0, -1.7456260048565042e-10, 9.261455491405002e-11, 3.928266045782891e-10, -1.7456260048565042e-10, 1.0, -1.5881864712241622e-08], [0.9850640296936035, -0.04082970321178436, 0.16727784276008606, 0.20439539849758148, 0.03502606600522995, 0.9986825585365295, 0.03750050067901611, -0.15185177326202393, -0.16858860850334167, -0.03108130767941475, 0.9851963520050049, 0.10121102631092072], [0.9168016910552979, -0.09482394903898239, 0.387921541929245, 0.44365406036376953, 0.06679922342300415, 0.9941277503967285, 0.08513444662094116, -0.26796984672546387, -0.39371633529663086, -0.0521385557949543, 0.9177521467208862, 0.06753730028867722], [0.7643709182739258, -0.15802493691444397, 0.6251122951507568, 0.6479341983795166, 0.09576083719730377, 0.9865723252296448, 0.13230620324611664, -0.33551496267318726, -0.6376261115074158, -0.04126973822712898, 0.7692397236824036, -0.07632352411746979], [0.5624101758003235, -0.2065712958574295, 0.8006392121315002, 0.8095709085464478, 0.10458854585886002, 0.9782856106758118, 0.17893710732460022, -0.3449772298336029, -0.8202170729637146, -0.016898371279239655, 0.5718027949333191, -0.267223596572876], [0.3860713243484497, -0.23765023052692413, 0.8913311958312988, 1.017128586769104, 0.10842984914779663, 0.9712379574775696, 0.21198998391628265, -0.34939509630203247, -0.9160742163658142, 0.014803661964833736, 0.4007355272769928, -0.5046621561050415], [0.25270596146583557, -0.2560441493988037, 0.9330493807792664, 1.2611535787582397, 0.09987718611955643, 0.9661006331443787, 0.23806333541870117, -0.36809343099594116, -0.9623742699623108, 0.03303031623363495, 0.26971235871315, -0.8164812922477722], [0.12896282970905304, -0.26742202043533325, 0.9549105167388916, 1.4084848165512085, 0.09037449955940247, 0.9621138572692871, 0.25723403692245483, -0.3597045838832855, -0.9875227212905884, 0.05312592536211014, 0.14824506640434265, -1.1886308193206787], [-0.037818778306245804, -0.2752484083175659, 0.9606289863586426, 1.3755683898925781, 0.08146493136882782, 0.957267701625824, 0.277492493391037, -0.353816956281662, -0.9959584474563599, 0.08875200152397156, -0.013779602944850922, -1.6891316175460815], [-0.1970304548740387, -0.2752428948879242, 0.9409677982330322, 1.1786020994186401, 0.07280438393354416, 0.9530242681503296, 0.2940141260623932, -0.36850038170814514, -0.9776904582977295, 0.12643630802631378, -0.16773590445518494, -2.261430025100708], [-0.3677733540534973, -0.26668044924736023, 0.8908559679985046, 0.8089978098869324, 0.06243090331554413, 0.9487544894218445, 0.30978602170944214, -0.37012383341789246, -0.9278174042701721, 0.16954797506332397, -0.33227747678756714, -2.8139760494232178], [-0.5150132775306702, -0.2527574300765991, 0.8190696835517883, 0.4462648332118988, 0.050335872918367386, 0.9449706673622131, 0.32325950264930725, -0.381427526473999, -0.8557030558586121, 0.20771150290966034, -0.473949670791626, -3.2886314392089844], [-0.6352092623710632, -0.23657281696796417, 0.7352160215377808, 0.0826139897108078, 0.036419764161109924, 0.9416990280151367, 0.33447936177253723, -0.3719184398651123, -0.771480917930603, 0.23924078047275543, -0.5895600318908691, -3.677316904067993], [-0.7218965888023376, -0.21918001770973206, 0.6563730239868164, -0.2607730031013489, 0.024064550176262856, 0.9399895071983337, 0.340353786945343, -0.359468549489975, -0.6915825009346008, 0.26149556040763855, -0.6733006834983826, -3.9900803565979004], [-0.7831082344055176, -0.2019210308790207, 0.5881916880607605, -0.609000563621521, 0.015634668990969658, 0.9391284584999084, 0.3432103097438812, -0.35386255383491516, -0.621688961982727, 0.2779669761657715, -0.732282280921936, -4.316582202911377], [-0.8348898887634277, -0.1848600059747696, 0.5184455513954163, -0.9689992070198059, 0.007868430577218533, 0.93780916929245, 0.3470619320869446, -0.3402447998523712, -0.5503608584403992, 0.2938378155231476, -0.7815127968788147, -4.561704635620117]]
examples/camera_poses/test_camera_d971457c81bca597.json ADDED
@@ -0,0 +1 @@
 
 
1
+ [[1.0, -1.9455232563858615e-11, -8.611562019034125e-10, -1.0473515388298438e-09, -1.9455232563858615e-11, 1.0, 8.674055917978762e-10, 1.8375034827045056e-09, -8.611562019034125e-10, 8.674055917978762e-10, 1.0, 8.45981773522908e-09], [0.9973401427268982, -0.0032562706619501114, 0.07281485944986343, -0.07197967171669006, 0.00329946493729949, 0.9999944567680359, -0.00047293686657212675, -0.0565447174012661, -0.07281292229890823, 0.0007119301008060575, 0.9973453879356384, -0.3188611567020416], [0.9906007051467896, -0.0066957552917301655, 0.13662146031856537, -0.15415722131729126, 0.006810691673308611, 0.9999766945838928, -0.00037385127507150173, -0.10014614462852478, -0.1366157978773117, 0.0013008243404328823, 0.9906232953071594, -0.6077051758766174], [0.9827210903167725, -0.009423106908798218, 0.18485280871391296, -0.23053960502147675, 0.009786692447960377, 0.9999515414237976, -0.0010545575059950352, -0.11272216588258743, -0.18483391404151917, 0.002845433307811618, 0.9827656745910645, -0.8897562623023987], [0.9756309986114502, -0.011318936944007874, 0.2191258817911148, -0.2929331362247467, 0.01199379749596119, 0.9999265670776367, -0.0017497432418167591, -0.10348402708768845, -0.21908999979496002, 0.004335256293416023, 0.9756950736045837, -1.1368639469146729], [0.9685831069946289, -0.012191502377390862, 0.24839124083518982, -0.379209041595459, 0.013217172585427761, 0.9999096393585205, -0.0024619612377136946, -0.12078691273927689, -0.24833877384662628, 0.005667644087225199, 0.9686566591262817, -1.3666095733642578], [0.9615083932876587, -0.012519675306975842, 0.2744903266429901, -0.5242342352867126, 0.013806014321744442, 0.9999009370803833, -0.0027547888457775116, -0.13015854358673096, -0.2744286358356476, 0.006438371259719133, 0.9615859389305115, -1.5762972831726074], [0.9528889656066895, -0.014083069749176502, 0.3029923439025879, -0.6702165007591248, 0.015383570455014706, 0.9998798370361328, -0.0019058430334553123, -0.14860114455223083, -0.3029291331768036, 0.00647716224193573, 0.9529911279678345, -1.7173497676849365], [0.9457509517669678, -0.015159872360527515, 0.3245387375354767, -0.8001917600631714, 0.016729505732655525, 0.9998579621315002, -0.0020466779824346304, -0.15521110594272614, -0.32446160912513733, 0.007365020923316479, 0.9458702206611633, -1.8571592569351196], [0.9417706727981567, -0.015540024265646935, 0.33589670062065125, -0.9244012236595154, 0.017394419759511948, 0.9998455047607422, -0.0025124624371528625, -0.1416105479001999, -0.3358057737350464, 0.008208892308175564, 0.9418954849243164, -1.9969842433929443], [0.9381415247917175, -0.016401933506131172, 0.34586337208747864, -1.0128529071807861, 0.01851990446448326, 0.9998245239257812, -0.0028197101783007383, -0.11503250896930695, -0.3457564413547516, 0.009050644934177399, 0.9382806420326233, -2.080850124359131], [0.9357938766479492, -0.016873901709914207, 0.3521435558795929, -1.1008079051971436, 0.01865064539015293, 0.9998247027397156, -0.0016533475136384368, -0.048780668526887894, -0.3520539104938507, 0.008114897646009922, 0.9359445571899414, -2.088500499725342], [0.9334627985954285, -0.016492612659931183, 0.3582947850227356, -1.2417148351669312, 0.017931735143065453, 0.9998390078544617, -0.0006939812446944416, 0.01625901833176613, -0.35822567343711853, 0.007072653621435165, 0.9336082339286804, -2.0250792503356934], [0.9281281232833862, -0.016213543713092804, 0.3719078302383423, -1.4365860223770142, 0.01746317930519581, 0.9998475313186646, 8.084020009846427e-06, 0.060201361775398254, -0.37185123562812805, 0.006487190257757902, 0.9282696843147278, -1.8963005542755127], [0.9220678806304932, -0.017285069450736046, 0.3866421580314636, -1.604330062866211, 0.01880805380642414, 0.9998230934143066, -0.00015593231364618987, 0.0799601748585701, -0.38657107949256897, 0.007415766827762127, 0.9222298264503479, -1.7388361692428589], [0.9185494184494019, -0.018356993794441223, 0.3948797583580017, -1.7530856132507324, 0.019847355782985687, 0.9998030066490173, 0.0003104731731582433, 0.10000917315483093, -0.3948076665401459, 0.007552135270088911, 0.9187328219413757, -1.5337872505187988]]
gradio_utils/__pycache__/camera_utils.cpython-310.pyc ADDED
Binary file (4.79 kB). View file
 
gradio_utils/__pycache__/flow_utils.cpython-310.pyc ADDED
Binary file (2.65 kB). View file
 
gradio_utils/__pycache__/motionctrl_cmcm_gradio.cpython-310.pyc ADDED
Binary file (7.18 kB). View file
 
gradio_utils/__pycache__/traj_utils.cpython-310.pyc ADDED
Binary file (3.16 kB). View file
 
gradio_utils/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.04 kB). View file
 
gradio_utils/camera_utils.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ # import plotly.express as px
3
+ # import plotly.graph_objects as go
4
+ import json
5
+
6
+ import numpy as np
7
+
8
+ CAMERA_MOTION_MODE = ["Basic Camera Poses", "Provided Complex Camera Poses", "Custom Camera Poses"]
9
+
10
+ CAMERA = {
11
+ # T
12
+ "base_T_norm": 1.5,
13
+ "base_angle": np.pi/3,
14
+
15
+ "Pan Up": { "angle":[0., 0., 0.], "T":[0., 1., 0.]},
16
+ "Pan Down": { "angle":[0., 0., 0.], "T":[0.,-1.,0.]},
17
+ "Pan Left": { "angle":[0., 0., 0.], "T":[3.,0.,0.]},
18
+ "Pan Right": { "angle":[0., 0., 0.], "T": [-3.,0.,0.]},
19
+ "Zoom In": { "angle":[0., 0., 0.], "T": [0.,0.,-4.]},
20
+ "Zoom Out": { "angle":[0., 0., 0.], "T": [0.,0.,4.]},
21
+ "ACW": { "angle": [0., 0., 1.], "T":[0., 0., 0.]},
22
+ "CW": { "angle": [0., 0., -1.], "T":[0., 0., 0.]},
23
+ }
24
+
25
+ COMPLEX_CAMERA = {
26
+ "Pose_1": "examples/camera_poses/test_camera_1424acd0007d40b5.json",
27
+ "Pose_2": "examples/camera_poses/test_camera_d971457c81bca597.json",
28
+ "Pose_3": "examples/camera_poses/test_camera_Round-ZoomIn.json",
29
+ "Pose_4": "examples/camera_poses/test_camera_Round-RI_90.json",
30
+ "Pose_5": "examples/camera_poses/test_camera_Round-RI-120.json",
31
+ "Pose_6": "examples/camera_poses/test_camera_018f7907401f2fef.json",
32
+ "Pose_7": "examples/camera_poses/test_camera_088b93f15ca8745d.json",
33
+ "Pose_8": "examples/camera_poses/test_camera_b133a504fc90a2d1.json",
34
+ }
35
+
36
+
37
+
38
+ def compute_R_form_rad_angle(angles):
39
+ theta_x, theta_y, theta_z = angles
40
+ Rx = np.array([[1, 0, 0],
41
+ [0, np.cos(theta_x), -np.sin(theta_x)],
42
+ [0, np.sin(theta_x), np.cos(theta_x)]])
43
+
44
+ Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)],
45
+ [0, 1, 0],
46
+ [-np.sin(theta_y), 0, np.cos(theta_y)]])
47
+
48
+ Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0],
49
+ [np.sin(theta_z), np.cos(theta_z), 0],
50
+ [0, 0, 1]])
51
+
52
+ # 计算相机外参的旋转矩阵
53
+ R = np.dot(Rz, np.dot(Ry, Rx))
54
+ return R
55
+
56
+ def get_camera_motion(angle, T, speed, n=16):
57
+ RT = []
58
+ for i in range(n):
59
+ _angle = (i/n)*speed*(CAMERA["base_angle"])*angle
60
+ R = compute_R_form_rad_angle(_angle)
61
+ # _T = (i/n)*speed*(T.reshape(3,1))
62
+ _T=(i/n)*speed*(CAMERA["base_T_norm"])*(T.reshape(3,1))
63
+ _RT = np.concatenate([R,_T], axis=1)
64
+ RT.append(_RT)
65
+ RT = np.stack(RT)
66
+ return RT
67
+
68
+ def create_relative(RT_list, K_1=4.7, dataset="syn"):
69
+ RT = copy.deepcopy(RT_list[0])
70
+ R_inv = RT[:,:3].T
71
+ T = RT[:,-1]
72
+
73
+ temp = []
74
+ for _RT in RT_list:
75
+ _RT[:,:3] = np.dot(_RT[:,:3], R_inv)
76
+ _RT[:,-1] = _RT[:,-1] - np.dot(_RT[:,:3], T)
77
+ temp.append(_RT)
78
+ RT_list = temp
79
+
80
+ return RT_list
81
+
82
+ def combine_camera_motion(RT_0, RT_1):
83
+ RT = copy.deepcopy(RT_0[-1])
84
+ R = RT[:,:3]
85
+ R_inv = RT[:,:3].T
86
+ T = RT[:,-1]
87
+
88
+ temp = []
89
+ for _RT in RT_1:
90
+ _RT[:,:3] = np.dot(_RT[:,:3], R)
91
+ _RT[:,-1] = _RT[:,-1] + np.dot(np.dot(_RT[:,:3], R_inv), T)
92
+ temp.append(_RT)
93
+
94
+ RT_1 = np.stack(temp)
95
+
96
+ return np.concatenate([RT_0, RT_1], axis=0)
97
+
98
+ def process_camera(camera_dict, camera_args, num_frames=16, width=256, height=256):
99
+ speed = camera_dict['speed']
100
+ motion_list = camera_dict['motion']
101
+ mode = camera_dict['mode']
102
+
103
+ if mode == 'Customized Mode 3: RAW Camera Poses':
104
+ # print(camera_args)
105
+ RT = camera_args.strip().split()
106
+ assert(len(RT) == num_frames*12), "The number of camera poses should be equal to the number of frames"
107
+ RT = [float(x) for x in RT]
108
+ RT = np.array(RT).reshape(-1, 3, 4)
109
+ RT[:, :, -1] = RT[:, :, -1] * np.array([1.5, 1, 1.3]) * speed
110
+ return RT
111
+
112
+ if camera_dict['complex'] is not None:
113
+ with open(COMPLEX_CAMERA[camera_dict['complex']]) as f:
114
+ RT = json.load(f) # [16, 12]
115
+ if num_frames < len(RT):
116
+ half = (len(RT) - num_frames) // 2
117
+ RT = RT[half:half+num_frames]
118
+ RT = np.array(RT).reshape(-1, 3, 4)
119
+ RT[:, :, -1] = RT[:, :, -1] * np.array([1.5, 1, 1.3]) * speed
120
+ return RT
121
+
122
+ half_num_frames = num_frames//2
123
+
124
+
125
+
126
+ print(len(motion_list))
127
+ if len(motion_list) == 0:
128
+ angle = np.array([0,0,0])
129
+ T = np.array([0,0,0])
130
+ RT = get_camera_motion(angle, T, speed, num_frames)
131
+
132
+ elif len(motion_list) == 1:
133
+ angle = np.array(CAMERA[motion_list[0]]["angle"])
134
+ T = np.array(CAMERA[motion_list[0]]["T"])
135
+ print(angle, T)
136
+ RT = get_camera_motion(angle, T, speed, num_frames)
137
+
138
+
139
+ elif len(motion_list) == 2:
140
+ if mode == "Customized Mode 1: First A then B":
141
+ angle = np.array(CAMERA[motion_list[0]]["angle"])
142
+ T = np.array(CAMERA[motion_list[0]]["T"])
143
+ RT_0 = get_camera_motion(angle, T, speed, half_num_frames)
144
+
145
+ angle = np.array(CAMERA[motion_list[1]]["angle"])
146
+ T = np.array(CAMERA[motion_list[1]]["T"])
147
+ RT_1 = get_camera_motion(angle, T, speed, num_frames-half_num_frames)
148
+
149
+ RT = combine_camera_motion(RT_0, RT_1)
150
+
151
+ elif mode == "Customized Mode 2: Both A and B":
152
+ angle = np.array(CAMERA[motion_list[0]]["angle"]) + np.array(CAMERA[motion_list[1]]["angle"])
153
+ T = np.array(CAMERA[motion_list[0]]["T"]) + np.array(CAMERA[motion_list[1]]["T"])
154
+ RT = get_camera_motion(angle, T, speed, num_frames)
155
+
156
+ return RT
157
+
gradio_utils/flow_utils.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def sigma_matrix2(sig_x, sig_y, theta):
5
+ """Calculate the rotated sigma matrix (two dimensional matrix).
6
+ Args:
7
+ sig_x (float):
8
+ sig_y (float):
9
+ theta (float): Radian measurement.
10
+ Returns:
11
+ ndarray: Rotated sigma matrix.
12
+ """
13
+ d_matrix = np.array([[sig_x**2, 0], [0, sig_y**2]])
14
+ u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
15
+ return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
16
+
17
+
18
+ def mesh_grid(kernel_size):
19
+ """Generate the mesh grid, centering at zero.
20
+ Args:
21
+ kernel_size (int):
22
+ Returns:
23
+ xy (ndarray): with the shape (kernel_size, kernel_size, 2)
24
+ xx (ndarray): with the shape (kernel_size, kernel_size)
25
+ yy (ndarray): with the shape (kernel_size, kernel_size)
26
+ """
27
+ ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
28
+ xx, yy = np.meshgrid(ax, ax)
29
+ xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
30
+ 1))).reshape(kernel_size, kernel_size, 2)
31
+ return xy, xx, yy
32
+
33
+
34
+ def pdf2(sigma_matrix, grid):
35
+ """Calculate PDF of the bivariate Gaussian distribution.
36
+ Args:
37
+ sigma_matrix (ndarray): with the shape (2, 2)
38
+ grid (ndarray): generated by :func:`mesh_grid`,
39
+ with the shape (K, K, 2), K is the kernel size.
40
+ Returns:
41
+ kernel (ndarrray): un-normalized kernel.
42
+ """
43
+ inverse_sigma = np.linalg.inv(sigma_matrix)
44
+ kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
45
+ return kernel
46
+
47
+ def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
48
+ """Generate a bivariate isotropic or anisotropic Gaussian kernel.
49
+ In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
50
+ Args:
51
+ kernel_size (int):
52
+ sig_x (float):
53
+ sig_y (float):
54
+ theta (float): Radian measurement.
55
+ grid (ndarray, optional): generated by :func:`mesh_grid`,
56
+ with the shape (K, K, 2), K is the kernel size. Default: None
57
+ isotropic (bool):
58
+ Returns:
59
+ kernel (ndarray): normalized kernel.
60
+ """
61
+ if grid is None:
62
+ grid, _, _ = mesh_grid(kernel_size)
63
+ if isotropic:
64
+ sigma_matrix = np.array([[sig_x**2, 0], [0, sig_x**2]])
65
+ else:
66
+ sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
67
+ kernel = pdf2(sigma_matrix, grid)
68
+ kernel = kernel / np.sum(kernel)
69
+ return kernel
gradio_utils/motionctrl_cmcm_gradio.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime
3
+ import json
4
+ import math
5
+ import os
6
+ import sys
7
+ import time
8
+ from glob import glob
9
+ from pathlib import Path
10
+ from typing import Optional
11
+
12
+ import cv2
13
+ import numpy as np
14
+ import torch
15
+ import torchvision
16
+ from einops import rearrange, repeat
17
+ from fire import Fire
18
+ from omegaconf import OmegaConf
19
+ from PIL import Image
20
+ from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
21
+ import tempfile
22
+
23
+ sys.path.insert(1, os.path.join(sys.path[0], '..'))
24
+ from sgm.util import default, instantiate_from_config
25
+
26
+
27
+
28
+ def to_relative_RT2(org_pose, keyframe_idx=0, keyframe_zero=False):
29
+ org_pose = org_pose.reshape(-1, 3, 4) # [t, 3, 4]
30
+ R_dst = org_pose[:, :, :3]
31
+ T_dst = org_pose[:, :, 3:]
32
+
33
+ R_src = R_dst[keyframe_idx: keyframe_idx+1].repeat(org_pose.shape[0], axis=0) # [t, 3, 3]
34
+ T_src = T_dst[keyframe_idx: keyframe_idx+1].repeat(org_pose.shape[0], axis=0)
35
+
36
+ R_src_inv = R_src.transpose(0, 2, 1) # [t, 3, 3]
37
+
38
+ R_rel = R_dst @ R_src_inv # [t, 3, 3]
39
+ T_rel = T_dst - R_rel@T_src
40
+
41
+ RT_rel = np.concatenate([R_rel, T_rel], axis=-1) # [t, 3, 4]
42
+ RT_rel = RT_rel.reshape(-1, 12) # [t, 12]
43
+
44
+ if keyframe_zero:
45
+ RT_rel[keyframe_idx] = np.zeros_like(RT_rel[keyframe_idx])
46
+
47
+ return RT_rel
48
+
49
+ def build_model(config, ckpt, device, num_frames, num_steps):
50
+ num_frames = default(num_frames, 14)
51
+ num_steps = default(num_steps, 25)
52
+ model_config = default(config, "configs/inference/config_motionctrl_cmcm.yaml")
53
+
54
+ print(f"Loading model from {ckpt}")
55
+ model, filter = load_model(
56
+ model_config,
57
+ ckpt,
58
+ device,
59
+ num_frames,
60
+ num_steps,
61
+ )
62
+
63
+ model.eval()
64
+
65
+ return model
66
+
67
+ def motionctrl_sample(
68
+ model,
69
+ image: Image = None, # Can either be image file or folder with image files
70
+ RT: np.ndarray = None,
71
+ num_frames: Optional[int] = None,
72
+ fps_id: int = 6,
73
+ motion_bucket_id: int = 127,
74
+ cond_aug: float = 0.02,
75
+ seed: int = 23,
76
+ decoding_t: int = 1, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
77
+ save_fps: int = 10,
78
+ sample_num: int = 1,
79
+ device: str = "cuda",
80
+ ):
81
+ """
82
+ Simple script to generate a single sample conditioned on an image `input_path` or multiple images, one for each
83
+ image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t`.
84
+ """
85
+
86
+ torch.manual_seed(seed)
87
+
88
+ w, h = image.size
89
+
90
+ # RT: [t, 3, 4]
91
+ # RT = RT.reshape(-1, 3, 4) # [t, 3, 4]
92
+ # adaptive to different spatial ratio
93
+ # base_len = min(w, h) * 0.5
94
+ # K = np.array([[w/base_len, 0, w/base_len],
95
+ # [0, h/base_len, h/base_len],
96
+ # [0, 0, 1]])
97
+ # for i in range(RT.shape[0]):
98
+ # RT[i,:,:] = np.dot(K, RT[i,:,:])
99
+
100
+ RT = to_relative_RT2(RT) # [t, 12]
101
+ RT = torch.tensor(RT).float().to(device) # [t, 12]
102
+ RT = RT.unsqueeze(0).repeat(2,1,1)
103
+
104
+ if h % 64 != 0 or w % 64 != 0:
105
+ width, height = map(lambda x: x - x % 64, (w, h))
106
+ image = image.resize((width, height))
107
+ print(
108
+ f"WARNING: Your image is of size {h}x{w} which is not divisible by 64. We are resizing to {height}x{width}!"
109
+ )
110
+
111
+ image = ToTensor()(image)
112
+ image = image * 2.0 - 1.0
113
+
114
+ image = image.unsqueeze(0).to(device)
115
+ H, W = image.shape[2:]
116
+ assert image.shape[1] == 3
117
+ F = 8
118
+ C = 4
119
+ shape = (num_frames, C, H // F, W // F)
120
+
121
+ if motion_bucket_id > 255:
122
+ print(
123
+ "WARNING: High motion bucket! This may lead to suboptimal performance."
124
+ )
125
+
126
+ if fps_id < 5:
127
+ print("WARNING: Small fps value! This may lead to suboptimal performance.")
128
+
129
+ if fps_id > 30:
130
+ print("WARNING: Large fps value! This may lead to suboptimal performance.")
131
+
132
+ value_dict = {}
133
+ value_dict["motion_bucket_id"] = motion_bucket_id
134
+ value_dict["fps_id"] = fps_id
135
+ value_dict["cond_aug"] = cond_aug
136
+ value_dict["cond_frames_without_noise"] = image
137
+ value_dict["cond_frames"] = image + cond_aug * torch.randn_like(image)
138
+
139
+ with torch.no_grad():
140
+ with torch.autocast(device):
141
+ batch, batch_uc = get_batch(
142
+ get_unique_embedder_keys_from_conditioner(model.conditioner),
143
+ value_dict,
144
+ [1, num_frames],
145
+ T=num_frames,
146
+ device=device,
147
+ )
148
+ c, uc = model.conditioner.get_unconditional_conditioning(
149
+ batch,
150
+ batch_uc=batch_uc,
151
+ force_uc_zero_embeddings=[
152
+ "cond_frames",
153
+ "cond_frames_without_noise",
154
+ ],
155
+ )
156
+
157
+ for k in ["crossattn", "concat"]:
158
+ uc[k] = repeat(uc[k], "b ... -> b t ...", t=num_frames)
159
+ uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=num_frames)
160
+ c[k] = repeat(c[k], "b ... -> b t ...", t=num_frames)
161
+ c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=num_frames)
162
+
163
+
164
+
165
+ additional_model_inputs = {}
166
+ additional_model_inputs["image_only_indicator"] = torch.zeros(
167
+ 2, num_frames
168
+ ).to(device)
169
+ #additional_model_inputs["image_only_indicator"][:,0] = 1
170
+ additional_model_inputs["num_video_frames"] = batch["num_video_frames"]
171
+
172
+
173
+ additional_model_inputs["RT"] = RT.clone()
174
+
175
+ def denoiser(input, sigma, c):
176
+ return model.denoiser(
177
+ model.model, input, sigma, c, **additional_model_inputs
178
+ )
179
+
180
+ results = []
181
+ for j in range(sample_num):
182
+ randn = torch.randn(shape, device=device)
183
+ samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
184
+ model.en_and_decode_n_samples_a_time = decoding_t
185
+ samples_x = model.decode_first_stage(samples_z)
186
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0) # [1*t, c, h, w]
187
+ results.append(samples)
188
+
189
+ samples = torch.stack(results, dim=0) # [sample_num, t, c, h, w]
190
+ samples = samples.data.cpu()
191
+
192
+ video_path = tempfile.NamedTemporaryFile(suffix='.mp4').name
193
+ save_results(samples, video_path, fps=save_fps)
194
+
195
+ return video_path
196
+
197
+ def save_results(resutls, filename, fps=10):
198
+ video = resutls.permute(1, 0, 2, 3, 4) # [t, sample_num, c, h, w]
199
+ frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(video.shape[1])) for framesheet in video] #[3, 1*h, n*w]
200
+ grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, n*h, w]
201
+ # already in [0,1]
202
+ grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1)
203
+ torchvision.io.write_video(filename, grid, fps=fps, video_codec='h264', options={'crf': '10'})
204
+
205
+ def get_unique_embedder_keys_from_conditioner(conditioner):
206
+ return list(set([x.input_key for x in conditioner.embedders]))
207
+
208
+
209
+ def get_batch(keys, value_dict, N, T, device):
210
+ batch = {}
211
+ batch_uc = {}
212
+
213
+ for key in keys:
214
+ if key == "fps_id":
215
+ batch[key] = (
216
+ torch.tensor([value_dict["fps_id"]])
217
+ .to(device)
218
+ .repeat(int(math.prod(N)))
219
+ )
220
+ elif key == "motion_bucket_id":
221
+ batch[key] = (
222
+ torch.tensor([value_dict["motion_bucket_id"]])
223
+ .to(device)
224
+ .repeat(int(math.prod(N)))
225
+ )
226
+ elif key == "cond_aug":
227
+ batch[key] = repeat(
228
+ torch.tensor([value_dict["cond_aug"]]).to(device),
229
+ "1 -> b",
230
+ b=math.prod(N),
231
+ )
232
+ elif key == "cond_frames":
233
+ batch[key] = repeat(value_dict["cond_frames"], "1 ... -> b ...", b=N[0])
234
+ elif key == "cond_frames_without_noise":
235
+ batch[key] = repeat(
236
+ value_dict["cond_frames_without_noise"], "1 ... -> b ...", b=N[0]
237
+ )
238
+ else:
239
+ batch[key] = value_dict[key]
240
+
241
+ if T is not None:
242
+ batch["num_video_frames"] = T
243
+
244
+ for key in batch.keys():
245
+ if key not in batch_uc and isinstance(batch[key], torch.Tensor):
246
+ batch_uc[key] = torch.clone(batch[key])
247
+ return batch, batch_uc
248
+
249
+
250
+ def load_model(
251
+ config: str,
252
+ ckpt: str,
253
+ device: str,
254
+ num_frames: int,
255
+ num_steps: int,
256
+ ):
257
+
258
+ config = OmegaConf.load(config)
259
+ config.model.params.ckpt_path = ckpt
260
+ if device == "cuda":
261
+ config.model.params.conditioner_config.params.emb_models[
262
+ 0
263
+ ].params.open_clip_embedding_config.params.init_device = device
264
+
265
+ config.model.params.sampler_config.params.num_steps = num_steps
266
+ config.model.params.sampler_config.params.guider_config.params.num_frames = (
267
+ num_frames
268
+ )
269
+
270
+ model = instantiate_from_config(config.model)
271
+
272
+ model = model.to(device).eval()
273
+
274
+ filter = None #DeepFloydDataFiltering(verbose=False, device=device)
275
+ return model, filter
276
+
gradio_utils/traj_utils.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ from gradio_utils.flow_utils import bivariate_Gaussian
5
+
6
+ OBJECT_MOTION_MODE = ["Provided Trajectory", "Custom Trajectory"]
7
+
8
+ PROVIDED_TRAJS = {
9
+ "horizon_1": "examples/trajectories/horizon_2.txt",
10
+ "swaying_1": "examples/trajectories/shake_1.txt",
11
+ "swaying_2": "examples/trajectories/shake_2.txt",
12
+ "swaying_3": "examples/trajectories/shaking_10.txt",
13
+ "curve_1": "examples/trajectories/curve_1.txt",
14
+ "curve_2": "examples/trajectories/curve_2.txt",
15
+ "curve_3": "examples/trajectories/curve_3.txt",
16
+ "curve_4": "examples/trajectories/curve_4.txt",
17
+ }
18
+
19
+
20
+ def read_points(file, video_len=16, reverse=False):
21
+ with open(file, 'r') as f:
22
+ lines = f.readlines()
23
+ points = []
24
+ for line in lines:
25
+ x, y = line.strip().split(',')
26
+ points.append((int(x), int(y)))
27
+ if reverse:
28
+ points = points[::-1]
29
+
30
+ if len(points) > video_len:
31
+ skip = len(points) // video_len
32
+ points = points[::skip]
33
+ points = points[:video_len]
34
+
35
+ return points
36
+
37
+ def get_provided_traj(traj_name):
38
+ traj = read_points(PROVIDED_TRAJS[traj_name])
39
+ # xrange from 256 to 1024
40
+ traj = [[int(1024*x/256), int(1024*y/256)] for x,y in traj]
41
+ return traj
42
+
43
+ blur_kernel = bivariate_Gaussian(99, 10, 10, 0, grid=None, isotropic=True)
44
+
45
+ def process_points(points):
46
+ frames = 16
47
+ defualt_points = [[512,512]]*16
48
+
49
+ if len(points) < 2:
50
+ return defualt_points
51
+ elif len(points) >= frames:
52
+ skip = len(points)//frames
53
+ return points[::skip][:15] + points[-1:]
54
+ else:
55
+ insert_num = frames - len(points)
56
+ insert_num_dict = {}
57
+ interval = len(points) - 1
58
+ n = insert_num // interval
59
+ m = insert_num % interval
60
+ for i in range(interval):
61
+ insert_num_dict[i] = n
62
+ for i in range(m):
63
+ insert_num_dict[i] += 1
64
+
65
+ res = []
66
+ for i in range(interval):
67
+ insert_points = []
68
+ x0,y0 = points[i]
69
+ x1,y1 = points[i+1]
70
+
71
+ delta_x = x1 - x0
72
+ delta_y = y1 - y0
73
+ for j in range(insert_num_dict[i]):
74
+ x = x0 + (j+1)/(insert_num_dict[i]+1)*delta_x
75
+ y = y0 + (j+1)/(insert_num_dict[i]+1)*delta_y
76
+ insert_points.append([int(x), int(y)])
77
+
78
+ res += points[i:i+1] + insert_points
79
+ res += points[-1:]
80
+ return res
81
+
82
+ def get_flow(points, video_len=16):
83
+ optical_flow = np.zeros((video_len, 256, 256, 2), dtype=np.float32)
84
+ for i in range(video_len-1):
85
+ p = points[i]
86
+ p1 = points[i+1]
87
+ optical_flow[i+1, p[1], p[0], 0] = p1[0] - p[0]
88
+ optical_flow[i+1, p[1], p[0], 1] = p1[1] - p[1]
89
+ for i in range(1, video_len):
90
+ optical_flow[i] = cv2.filter2D(optical_flow[i], -1, blur_kernel)
91
+
92
+
93
+ return optical_flow
94
+
95
+
96
+ def process_traj(points, device='cpu'):
97
+ xy_range = 1024
98
+ points = process_points(points)
99
+ points = [[int(256*x/xy_range), int(256*y/xy_range)] for x,y in points]
100
+
101
+ optical_flow = get_flow(points)
102
+ # optical_flow = torch.tensor(optical_flow).to(device)
103
+
104
+ return optical_flow
gradio_utils/utils.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import plotly.express as px
3
+ import plotly.graph_objects as go
4
+
5
+ def vis_camera(RT_list, rescale_T=1):
6
+ fig = go.Figure()
7
+ showticklabels = True
8
+ visible = True
9
+ scene_bounds = 2
10
+ base_radius = 2.5
11
+ zoom_scale = 1.5
12
+ fov_deg = 50.0
13
+
14
+ edges = [(0, 1), (0, 2), (0, 3), (1, 2), (2, 3), (3, 1), (3, 4)]
15
+
16
+ colors = px.colors.qualitative.Plotly
17
+
18
+ cone_list = []
19
+ n = len(RT_list)
20
+ for i, RT in enumerate(RT_list):
21
+ R = RT[:,:3]
22
+ T = RT[:,-1]/rescale_T
23
+ cone = calc_cam_cone_pts_3d(R, T, fov_deg)
24
+ cone_list.append((cone, (i*1/n, "green"), f"view_{i}"))
25
+
26
+
27
+ for (cone, clr, legend) in cone_list:
28
+ for (i, edge) in enumerate(edges):
29
+ (x1, x2) = (cone[edge[0], 0], cone[edge[1], 0])
30
+ (y1, y2) = (cone[edge[0], 1], cone[edge[1], 1])
31
+ (z1, z2) = (cone[edge[0], 2], cone[edge[1], 2])
32
+ fig.add_trace(go.Scatter3d(
33
+ x=[x1, x2], y=[y1, y2], z=[z1, z2], mode='lines',
34
+ line=dict(color=clr, width=3),
35
+ name=legend, showlegend=(i == 0)))
36
+ fig.update_layout(
37
+ height=500,
38
+ autosize=True,
39
+ # hovermode=False,
40
+ margin=go.layout.Margin(l=0, r=0, b=0, t=0),
41
+
42
+ showlegend=True,
43
+ legend=dict(
44
+ yanchor='bottom',
45
+ y=0.01,
46
+ xanchor='right',
47
+ x=0.99,
48
+ ),
49
+ scene=dict(
50
+ aspectmode='manual',
51
+ aspectratio=dict(x=1, y=1, z=1.0),
52
+ camera=dict(
53
+ center=dict(x=0.0, y=0.0, z=0.0),
54
+ up=dict(x=0.0, y=-1.0, z=0.0),
55
+ eye=dict(x=scene_bounds/2, y=-scene_bounds/2, z=-scene_bounds/2),
56
+ ),
57
+
58
+ xaxis=dict(
59
+ range=[-scene_bounds, scene_bounds],
60
+ showticklabels=showticklabels,
61
+ visible=visible,
62
+ ),
63
+
64
+
65
+ yaxis=dict(
66
+ range=[-scene_bounds, scene_bounds],
67
+ showticklabels=showticklabels,
68
+ visible=visible,
69
+ ),
70
+
71
+
72
+ zaxis=dict(
73
+ range=[-scene_bounds, scene_bounds],
74
+ showticklabels=showticklabels,
75
+ visible=visible,
76
+ )
77
+ ))
78
+ return fig
79
+
80
+
81
+ def calc_cam_cone_pts_3d(R_W2C, T_W2C, fov_deg, scale=0.1, set_canonical=False, first_frame_RT=None):
82
+ fov_rad = np.deg2rad(fov_deg)
83
+ R_W2C_inv = np.linalg.inv(R_W2C)
84
+
85
+ # Camera pose center:
86
+ T = np.zeros_like(T_W2C) - T_W2C
87
+ T = np.dot(R_W2C_inv, T)
88
+ cam_x = T[0]
89
+ cam_y = T[1]
90
+ cam_z = T[2]
91
+ if set_canonical:
92
+ T = np.zeros_like(T_W2C)
93
+ T = np.dot(first_frame_RT[:,:3], T) + first_frame_RT[:,-1]
94
+ T = T - T_W2C
95
+ T = np.dot(R_W2C_inv, T)
96
+ cam_x = T[0]
97
+ cam_y = T[1]
98
+ cam_z = T[2]
99
+
100
+ # vertex
101
+ corn1 = np.array([np.tan(fov_rad / 2.0), 0.5*np.tan(fov_rad / 2.0), 1.0]) *scale
102
+ corn2 = np.array([-np.tan(fov_rad / 2.0), 0.5*np.tan(fov_rad / 2.0), 1.0]) *scale
103
+ corn3 = np.array([0, -0.25*np.tan(fov_rad / 2.0), 1.0]) *scale
104
+ corn4 = np.array([0, -0.5*np.tan(fov_rad / 2.0), 1.0]) *scale
105
+
106
+ corn1 = corn1 - T_W2C
107
+ corn2 = corn2 - T_W2C
108
+ corn3 = corn3 - T_W2C
109
+ corn4 = corn4 - T_W2C
110
+
111
+ corn1 = np.dot(R_W2C_inv, corn1)
112
+ corn2 = np.dot(R_W2C_inv, corn2)
113
+ corn3 = np.dot(R_W2C_inv, corn3)
114
+ corn4 = np.dot(R_W2C_inv, corn4)
115
+
116
+ # Now attach as offset to actual 3D camera position:
117
+ corn_x1 = corn1[0]
118
+ corn_y1 = corn1[1]
119
+ corn_z1 = corn1[2]
120
+
121
+ corn_x2 = corn2[0]
122
+ corn_y2 = corn2[1]
123
+ corn_z2 = corn2[2]
124
+
125
+ corn_x3 = corn3[0]
126
+ corn_y3 = corn3[1]
127
+ corn_z3 = corn3[2]
128
+
129
+ corn_x4 = corn4[0]
130
+ corn_y4 = corn4[1]
131
+ corn_z4 = corn4[2]
132
+
133
+
134
+ xs = [cam_x, corn_x1, corn_x2, corn_x3, corn_x4, ]
135
+ ys = [cam_y, corn_y1, corn_y2, corn_y3, corn_y4, ]
136
+ zs = [cam_z, corn_z1, corn_z2, corn_z3, corn_z4, ]
137
+
138
+ return np.array([xs, ys, zs]).T
139
+
140
+
141
+
142
+ # T_base = [
143
+ # [1.,0.,0.], ## W2C x 的正方向: 相机朝左 left
144
+ # [-1.,0.,0.], ## W2C x 的负方向: 相机朝右 right
145
+ # [0., 1., 0.], ## W2C y 的正方向: 相机朝上 up
146
+ # [0.,-1.,0.], ## W2C y 的负方向: 相机朝下 down
147
+ # [0.,0.,1.], ## W2C z 的正方向: 相机往前 zoom out
148
+ # [0.,0.,-1.], ## W2C z 的负方向: 相机往前 zoom in
149
+ # ]
150
+ # radius = 1
151
+ # n = 16
152
+ # # step =
153
+ # look_at = np.array([0, 0, 0.8]).reshape(3,1)
154
+ # # look_at = np.array([0, 0, 0.2]).reshape(3,1)
155
+
156
+ # T_list = []
157
+ # base_R = np.array([[1., 0., 0.],
158
+ # [0., 1., 0.],
159
+ # [0., 0., 1.]])
160
+ # res = []
161
+ # res_forsave = []
162
+ # T_range = 1.8
163
+
164
+
165
+
166
+ # for i in range(0, 16):
167
+ # # theta = (1)*np.pi*i/n
168
+
169
+ # R = base_R[:,:3]
170
+ # T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2
171
+ # RT = np.concatenate([R,T], axis=1)
172
+ # res.append(RT)
173
+
174
+ # fig = vis_camera(res)
175
+
main.py ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime
3
+ import glob
4
+ import inspect
5
+ import os
6
+ import sys
7
+ from inspect import Parameter
8
+ from typing import Union
9
+
10
+ import numpy as np
11
+ import pytorch_lightning as pl
12
+ import torch
13
+ import torchvision
14
+ import wandb
15
+ from matplotlib import pyplot as plt
16
+ from natsort import natsorted
17
+ from omegaconf import OmegaConf
18
+ from packaging import version
19
+ from PIL import Image
20
+ from pytorch_lightning import seed_everything
21
+ from pytorch_lightning.callbacks import Callback
22
+ from pytorch_lightning.loggers import WandbLogger
23
+ from pytorch_lightning.trainer import Trainer
24
+ from pytorch_lightning.utilities import rank_zero_only
25
+
26
+ from sgm.util import exists, instantiate_from_config, isheatmap
27
+
28
+ MULTINODE_HACKS = True
29
+
30
+
31
+ def default_trainer_args():
32
+ argspec = dict(inspect.signature(Trainer.__init__).parameters)
33
+ argspec.pop("self")
34
+ default_args = {
35
+ param: argspec[param].default
36
+ for param in argspec
37
+ if argspec[param] != Parameter.empty
38
+ }
39
+ return default_args
40
+
41
+
42
+ def get_parser(**parser_kwargs):
43
+ def str2bool(v):
44
+ if isinstance(v, bool):
45
+ return v
46
+ if v.lower() in ("yes", "true", "t", "y", "1"):
47
+ return True
48
+ elif v.lower() in ("no", "false", "f", "n", "0"):
49
+ return False
50
+ else:
51
+ raise argparse.ArgumentTypeError("Boolean value expected.")
52
+
53
+ parser = argparse.ArgumentParser(**parser_kwargs)
54
+ parser.add_argument(
55
+ "-n",
56
+ "--name",
57
+ type=str,
58
+ const=True,
59
+ default="",
60
+ nargs="?",
61
+ help="postfix for logdir",
62
+ )
63
+ parser.add_argument(
64
+ "--no_date",
65
+ type=str2bool,
66
+ nargs="?",
67
+ const=True,
68
+ default=False,
69
+ help="if True, skip date generation for logdir and only use naming via opt.base or opt.name (+ opt.postfix, optionally)",
70
+ )
71
+ parser.add_argument(
72
+ "-r",
73
+ "--resume",
74
+ type=str,
75
+ const=True,
76
+ default="",
77
+ nargs="?",
78
+ help="resume from logdir or checkpoint in logdir",
79
+ )
80
+ parser.add_argument(
81
+ "-b",
82
+ "--base",
83
+ nargs="*",
84
+ metavar="base_config.yaml",
85
+ help="paths to base configs. Loaded from left-to-right. "
86
+ "Parameters can be overwritten or added with command-line options of the form `--key value`.",
87
+ default=list(),
88
+ )
89
+ parser.add_argument(
90
+ "-t",
91
+ "--train",
92
+ type=str2bool,
93
+ const=True,
94
+ default=True,
95
+ nargs="?",
96
+ help="train",
97
+ )
98
+ parser.add_argument(
99
+ "--no-test",
100
+ type=str2bool,
101
+ const=True,
102
+ default=False,
103
+ nargs="?",
104
+ help="disable test",
105
+ )
106
+ parser.add_argument(
107
+ "-p", "--project", help="name of new or path to existing project"
108
+ )
109
+ parser.add_argument(
110
+ "-d",
111
+ "--debug",
112
+ type=str2bool,
113
+ nargs="?",
114
+ const=True,
115
+ default=False,
116
+ help="enable post-mortem debugging",
117
+ )
118
+ parser.add_argument(
119
+ "-s",
120
+ "--seed",
121
+ type=int,
122
+ default=23,
123
+ help="seed for seed_everything",
124
+ )
125
+ parser.add_argument(
126
+ "-f",
127
+ "--postfix",
128
+ type=str,
129
+ default="",
130
+ help="post-postfix for default name",
131
+ )
132
+ parser.add_argument(
133
+ "--projectname",
134
+ type=str,
135
+ default="stablediffusion",
136
+ )
137
+ parser.add_argument(
138
+ "-l",
139
+ "--logdir",
140
+ type=str,
141
+ default="logs",
142
+ help="directory for logging dat shit",
143
+ )
144
+ parser.add_argument(
145
+ "--scale_lr",
146
+ type=str2bool,
147
+ nargs="?",
148
+ const=True,
149
+ default=False,
150
+ help="scale base-lr by ngpu * batch_size * n_accumulate",
151
+ )
152
+ parser.add_argument(
153
+ "--legacy_naming",
154
+ type=str2bool,
155
+ nargs="?",
156
+ const=True,
157
+ default=False,
158
+ help="name run based on config file name if true, else by whole path",
159
+ )
160
+ parser.add_argument(
161
+ "--enable_tf32",
162
+ type=str2bool,
163
+ nargs="?",
164
+ const=True,
165
+ default=False,
166
+ help="enables the TensorFloat32 format both for matmuls and cuDNN for pytorch 1.12",
167
+ )
168
+ parser.add_argument(
169
+ "--startup",
170
+ type=str,
171
+ default=None,
172
+ help="Startuptime from distributed script",
173
+ )
174
+ parser.add_argument(
175
+ "--wandb",
176
+ type=str2bool,
177
+ nargs="?",
178
+ const=True,
179
+ default=False, # TODO: later default to True
180
+ help="log to wandb",
181
+ )
182
+ parser.add_argument(
183
+ "--no_base_name",
184
+ type=str2bool,
185
+ nargs="?",
186
+ const=True,
187
+ default=False, # TODO: later default to True
188
+ help="log to wandb",
189
+ )
190
+ if version.parse(torch.__version__) >= version.parse("2.0.0"):
191
+ parser.add_argument(
192
+ "--resume_from_checkpoint",
193
+ type=str,
194
+ default=None,
195
+ help="single checkpoint file to resume from",
196
+ )
197
+ default_args = default_trainer_args()
198
+ for key in default_args:
199
+ parser.add_argument("--" + key, default=default_args[key])
200
+ return parser
201
+
202
+
203
+ def get_checkpoint_name(logdir):
204
+ ckpt = os.path.join(logdir, "checkpoints", "last**.ckpt")
205
+ ckpt = natsorted(glob.glob(ckpt))
206
+ print('available "last" checkpoints:')
207
+ print(ckpt)
208
+ if len(ckpt) > 1:
209
+ print("got most recent checkpoint")
210
+ ckpt = sorted(ckpt, key=lambda x: os.path.getmtime(x))[-1]
211
+ print(f"Most recent ckpt is {ckpt}")
212
+ with open(os.path.join(logdir, "most_recent_ckpt.txt"), "w") as f:
213
+ f.write(ckpt + "\n")
214
+ try:
215
+ version = int(ckpt.split("/")[-1].split("-v")[-1].split(".")[0])
216
+ except Exception as e:
217
+ print("version confusion but not bad")
218
+ print(e)
219
+ version = 1
220
+ # version = last_version + 1
221
+ else:
222
+ # in this case, we only have one "last.ckpt"
223
+ ckpt = ckpt[0]
224
+ version = 1
225
+ melk_ckpt_name = f"last-v{version}.ckpt"
226
+ print(f"Current melk ckpt name: {melk_ckpt_name}")
227
+ return ckpt, melk_ckpt_name
228
+
229
+
230
+ class SetupCallback(Callback):
231
+ def __init__(
232
+ self,
233
+ resume,
234
+ now,
235
+ logdir,
236
+ ckptdir,
237
+ cfgdir,
238
+ config,
239
+ lightning_config,
240
+ debug,
241
+ ckpt_name=None,
242
+ ):
243
+ super().__init__()
244
+ self.resume = resume
245
+ self.now = now
246
+ self.logdir = logdir
247
+ self.ckptdir = ckptdir
248
+ self.cfgdir = cfgdir
249
+ self.config = config
250
+ self.lightning_config = lightning_config
251
+ self.debug = debug
252
+ self.ckpt_name = ckpt_name
253
+
254
+ def on_exception(self, trainer: pl.Trainer, pl_module, exception):
255
+ if not self.debug and trainer.global_rank == 0:
256
+ print("Summoning checkpoint.")
257
+ if self.ckpt_name is None:
258
+ ckpt_path = os.path.join(self.ckptdir, "last.ckpt")
259
+ else:
260
+ ckpt_path = os.path.join(self.ckptdir, self.ckpt_name)
261
+ trainer.save_checkpoint(ckpt_path)
262
+
263
+ def on_fit_start(self, trainer, pl_module):
264
+ if trainer.global_rank == 0:
265
+ # Create logdirs and save configs
266
+ os.makedirs(self.logdir, exist_ok=True)
267
+ os.makedirs(self.ckptdir, exist_ok=True)
268
+ os.makedirs(self.cfgdir, exist_ok=True)
269
+
270
+ if "callbacks" in self.lightning_config:
271
+ if (
272
+ "metrics_over_trainsteps_checkpoint"
273
+ in self.lightning_config["callbacks"]
274
+ ):
275
+ os.makedirs(
276
+ os.path.join(self.ckptdir, "trainstep_checkpoints"),
277
+ exist_ok=True,
278
+ )
279
+ print("Project config")
280
+ print(OmegaConf.to_yaml(self.config))
281
+ if MULTINODE_HACKS:
282
+ import time
283
+
284
+ time.sleep(5)
285
+ OmegaConf.save(
286
+ self.config,
287
+ os.path.join(self.cfgdir, "{}-project.yaml".format(self.now)),
288
+ )
289
+
290
+ print("Lightning config")
291
+ print(OmegaConf.to_yaml(self.lightning_config))
292
+ OmegaConf.save(
293
+ OmegaConf.create({"lightning": self.lightning_config}),
294
+ os.path.join(self.cfgdir, "{}-lightning.yaml".format(self.now)),
295
+ )
296
+
297
+ else:
298
+ # ModelCheckpoint callback created log directory --- remove it
299
+ if not MULTINODE_HACKS and not self.resume and os.path.exists(self.logdir):
300
+ dst, name = os.path.split(self.logdir)
301
+ dst = os.path.join(dst, "child_runs", name)
302
+ os.makedirs(os.path.split(dst)[0], exist_ok=True)
303
+ try:
304
+ os.rename(self.logdir, dst)
305
+ except FileNotFoundError:
306
+ pass
307
+
308
+
309
+ class ImageLogger(Callback):
310
+ def __init__(
311
+ self,
312
+ batch_frequency,
313
+ max_images,
314
+ clamp=True,
315
+ increase_log_steps=True,
316
+ rescale=True,
317
+ disabled=False,
318
+ log_on_batch_idx=False,
319
+ log_first_step=False,
320
+ log_images_kwargs=None,
321
+ log_before_first_step=False,
322
+ enable_autocast=True,
323
+ ):
324
+ super().__init__()
325
+ self.enable_autocast = enable_autocast
326
+ self.rescale = rescale
327
+ self.batch_freq = batch_frequency
328
+ self.max_images = max_images
329
+ self.log_steps = [2**n for n in range(int(np.log2(self.batch_freq)) + 1)]
330
+ if not increase_log_steps:
331
+ self.log_steps = [self.batch_freq]
332
+ self.clamp = clamp
333
+ self.disabled = disabled
334
+ self.log_on_batch_idx = log_on_batch_idx
335
+ self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
336
+ self.log_first_step = log_first_step
337
+ self.log_before_first_step = log_before_first_step
338
+
339
+ @rank_zero_only
340
+ def log_local(
341
+ self,
342
+ save_dir,
343
+ split,
344
+ images,
345
+ global_step,
346
+ current_epoch,
347
+ batch_idx,
348
+ pl_module: Union[None, pl.LightningModule] = None,
349
+ ):
350
+ root = os.path.join(save_dir, "images", split)
351
+ for k in images:
352
+ if isheatmap(images[k]):
353
+ fig, ax = plt.subplots()
354
+ ax = ax.matshow(
355
+ images[k].cpu().numpy(), cmap="hot", interpolation="lanczos"
356
+ )
357
+ plt.colorbar(ax)
358
+ plt.axis("off")
359
+
360
+ filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
361
+ k, global_step, current_epoch, batch_idx
362
+ )
363
+ os.makedirs(root, exist_ok=True)
364
+ path = os.path.join(root, filename)
365
+ plt.savefig(path)
366
+ plt.close()
367
+ # TODO: support wandb
368
+ else:
369
+ grid = torchvision.utils.make_grid(images[k], nrow=4)
370
+ if self.rescale:
371
+ grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
372
+ grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
373
+ grid = grid.numpy()
374
+ grid = (grid * 255).astype(np.uint8)
375
+ filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(
376
+ k, global_step, current_epoch, batch_idx
377
+ )
378
+ path = os.path.join(root, filename)
379
+ os.makedirs(os.path.split(path)[0], exist_ok=True)
380
+ img = Image.fromarray(grid)
381
+ img.save(path)
382
+ if exists(pl_module):
383
+ assert isinstance(
384
+ pl_module.logger, WandbLogger
385
+ ), "logger_log_image only supports WandbLogger currently"
386
+ pl_module.logger.log_image(
387
+ key=f"{split}/{k}",
388
+ images=[
389
+ img,
390
+ ],
391
+ step=pl_module.global_step,
392
+ )
393
+
394
+ @rank_zero_only
395
+ def log_img(self, pl_module, batch, batch_idx, split="train"):
396
+ check_idx = batch_idx if self.log_on_batch_idx else pl_module.global_step
397
+ if (
398
+ self.check_frequency(check_idx)
399
+ and hasattr(pl_module, "log_images") # batch_idx % self.batch_freq == 0
400
+ and callable(pl_module.log_images)
401
+ and
402
+ # batch_idx > 5 and
403
+ self.max_images > 0
404
+ ):
405
+ logger = type(pl_module.logger)
406
+ is_train = pl_module.training
407
+ if is_train:
408
+ pl_module.eval()
409
+
410
+ gpu_autocast_kwargs = {
411
+ "enabled": self.enable_autocast, # torch.is_autocast_enabled(),
412
+ "dtype": torch.get_autocast_gpu_dtype(),
413
+ "cache_enabled": torch.is_autocast_cache_enabled(),
414
+ }
415
+ with torch.no_grad(), torch.cuda.amp.autocast(**gpu_autocast_kwargs):
416
+ images = pl_module.log_images(
417
+ batch, split=split, **self.log_images_kwargs
418
+ )
419
+
420
+ for k in images:
421
+ N = min(images[k].shape[0], self.max_images)
422
+ if not isheatmap(images[k]):
423
+ images[k] = images[k][:N]
424
+ if isinstance(images[k], torch.Tensor):
425
+ images[k] = images[k].detach().float().cpu()
426
+ if self.clamp and not isheatmap(images[k]):
427
+ images[k] = torch.clamp(images[k], -1.0, 1.0)
428
+
429
+ self.log_local(
430
+ pl_module.logger.save_dir,
431
+ split,
432
+ images,
433
+ pl_module.global_step,
434
+ pl_module.current_epoch,
435
+ batch_idx,
436
+ pl_module=pl_module
437
+ if isinstance(pl_module.logger, WandbLogger)
438
+ else None,
439
+ )
440
+
441
+ if is_train:
442
+ pl_module.train()
443
+
444
+ def check_frequency(self, check_idx):
445
+ if ((check_idx % self.batch_freq) == 0 or (check_idx in self.log_steps)) and (
446
+ check_idx > 0 or self.log_first_step
447
+ ):
448
+ try:
449
+ self.log_steps.pop(0)
450
+ except IndexError as e:
451
+ print(e)
452
+ pass
453
+ return True
454
+ return False
455
+
456
+ @rank_zero_only
457
+ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
458
+ if not self.disabled and (pl_module.global_step > 0 or self.log_first_step):
459
+ self.log_img(pl_module, batch, batch_idx, split="train")
460
+
461
+ @rank_zero_only
462
+ def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
463
+ if self.log_before_first_step and pl_module.global_step == 0:
464
+ print(f"{self.__class__.__name__}: logging before training")
465
+ self.log_img(pl_module, batch, batch_idx, split="train")
466
+
467
+ @rank_zero_only
468
+ def on_validation_batch_end(
469
+ self, trainer, pl_module, outputs, batch, batch_idx, *args, **kwargs
470
+ ):
471
+ if not self.disabled and pl_module.global_step > 0:
472
+ self.log_img(pl_module, batch, batch_idx, split="val")
473
+ if hasattr(pl_module, "calibrate_grad_norm"):
474
+ if (
475
+ pl_module.calibrate_grad_norm and batch_idx % 25 == 0
476
+ ) and batch_idx > 0:
477
+ self.log_gradients(trainer, pl_module, batch_idx=batch_idx)
478
+
479
+
480
+ @rank_zero_only
481
+ def init_wandb(save_dir, opt, config, group_name, name_str):
482
+ print(f"setting WANDB_DIR to {save_dir}")
483
+ os.makedirs(save_dir, exist_ok=True)
484
+
485
+ os.environ["WANDB_DIR"] = save_dir
486
+ if opt.debug:
487
+ wandb.init(project=opt.projectname, mode="offline", group=group_name)
488
+ else:
489
+ wandb.init(
490
+ project=opt.projectname,
491
+ config=config,
492
+ settings=wandb.Settings(code_dir="./sgm"),
493
+ group=group_name,
494
+ name=name_str,
495
+ )
496
+
497
+
498
+ if __name__ == "__main__":
499
+ # custom parser to specify config files, train, test and debug mode,
500
+ # postfix, resume.
501
+ # `--key value` arguments are interpreted as arguments to the trainer.
502
+ # `nested.key=value` arguments are interpreted as config parameters.
503
+ # configs are merged from left-to-right followed by command line parameters.
504
+
505
+ # model:
506
+ # base_learning_rate: float
507
+ # target: path to lightning module
508
+ # params:
509
+ # key: value
510
+ # data:
511
+ # target: main.DataModuleFromConfig
512
+ # params:
513
+ # batch_size: int
514
+ # wrap: bool
515
+ # train:
516
+ # target: path to train dataset
517
+ # params:
518
+ # key: value
519
+ # validation:
520
+ # target: path to validation dataset
521
+ # params:
522
+ # key: value
523
+ # test:
524
+ # target: path to test dataset
525
+ # params:
526
+ # key: value
527
+ # lightning: (optional, has sane defaults and can be specified on cmdline)
528
+ # trainer:
529
+ # additional arguments to trainer
530
+ # logger:
531
+ # logger to instantiate
532
+ # modelcheckpoint:
533
+ # modelcheckpoint to instantiate
534
+ # callbacks:
535
+ # callback1:
536
+ # target: importpath
537
+ # params:
538
+ # key: value
539
+
540
+ now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
541
+
542
+ # add cwd for convenience and to make classes in this file available when
543
+ # running as `python main.py`
544
+ # (in particular `main.DataModuleFromConfig`)
545
+ sys.path.append(os.getcwd())
546
+
547
+ parser = get_parser()
548
+
549
+ opt, unknown = parser.parse_known_args()
550
+
551
+ if opt.name and opt.resume:
552
+ raise ValueError(
553
+ "-n/--name and -r/--resume cannot be specified both."
554
+ "If you want to resume training in a new log folder, "
555
+ "use -n/--name in combination with --resume_from_checkpoint"
556
+ )
557
+ melk_ckpt_name = None
558
+ name = None
559
+ if opt.resume:
560
+ if not os.path.exists(opt.resume):
561
+ raise ValueError("Cannot find {}".format(opt.resume))
562
+ if os.path.isfile(opt.resume):
563
+ paths = opt.resume.split("/")
564
+ # idx = len(paths)-paths[::-1].index("logs")+1
565
+ # logdir = "/".join(paths[:idx])
566
+ logdir = "/".join(paths[:-2])
567
+ ckpt = opt.resume
568
+ _, melk_ckpt_name = get_checkpoint_name(logdir)
569
+ else:
570
+ assert os.path.isdir(opt.resume), opt.resume
571
+ logdir = opt.resume.rstrip("/")
572
+ ckpt, melk_ckpt_name = get_checkpoint_name(logdir)
573
+
574
+ print("#" * 100)
575
+ print(f'Resuming from checkpoint "{ckpt}"')
576
+ print("#" * 100)
577
+
578
+ opt.resume_from_checkpoint = ckpt
579
+ base_configs = sorted(glob.glob(os.path.join(logdir, "configs/*.yaml")))
580
+ opt.base = base_configs + opt.base
581
+ _tmp = logdir.split("/")
582
+ nowname = _tmp[-1]
583
+ else:
584
+ if opt.name:
585
+ name = "_" + opt.name
586
+ elif opt.base:
587
+ if opt.no_base_name:
588
+ name = ""
589
+ else:
590
+ if opt.legacy_naming:
591
+ cfg_fname = os.path.split(opt.base[0])[-1]
592
+ cfg_name = os.path.splitext(cfg_fname)[0]
593
+ else:
594
+ assert "configs" in os.path.split(opt.base[0])[0], os.path.split(
595
+ opt.base[0]
596
+ )[0]
597
+ cfg_path = os.path.split(opt.base[0])[0].split(os.sep)[
598
+ os.path.split(opt.base[0])[0].split(os.sep).index("configs")
599
+ + 1 :
600
+ ] # cut away the first one (we assert all configs are in "configs")
601
+ cfg_name = os.path.splitext(os.path.split(opt.base[0])[-1])[0]
602
+ cfg_name = "-".join(cfg_path) + f"-{cfg_name}"
603
+ name = "_" + cfg_name
604
+ else:
605
+ name = ""
606
+ if not opt.no_date:
607
+ nowname = now + name + opt.postfix
608
+ else:
609
+ nowname = name + opt.postfix
610
+ if nowname.startswith("_"):
611
+ nowname = nowname[1:]
612
+ logdir = os.path.join(opt.logdir, nowname)
613
+ print(f"LOGDIR: {logdir}")
614
+
615
+ ckptdir = os.path.join(logdir, "checkpoints")
616
+ cfgdir = os.path.join(logdir, "configs")
617
+ seed_everything(opt.seed, workers=True)
618
+
619
+ # move before model init, in case a torch.compile(...) is called somewhere
620
+ if opt.enable_tf32:
621
+ # pt_version = version.parse(torch.__version__)
622
+ torch.backends.cuda.matmul.allow_tf32 = True
623
+ torch.backends.cudnn.allow_tf32 = True
624
+ print(f"Enabling TF32 for PyTorch {torch.__version__}")
625
+ else:
626
+ print(f"Using default TF32 settings for PyTorch {torch.__version__}:")
627
+ print(
628
+ f"torch.backends.cuda.matmul.allow_tf32={torch.backends.cuda.matmul.allow_tf32}"
629
+ )
630
+ print(f"torch.backends.cudnn.allow_tf32={torch.backends.cudnn.allow_tf32}")
631
+
632
+ try:
633
+ # init and save configs
634
+ configs = [OmegaConf.load(cfg) for cfg in opt.base]
635
+ cli = OmegaConf.from_dotlist(unknown)
636
+ config = OmegaConf.merge(*configs, cli)
637
+ lightning_config = config.pop("lightning", OmegaConf.create())
638
+ # merge trainer cli with config
639
+ trainer_config = lightning_config.get("trainer", OmegaConf.create())
640
+
641
+ # default to gpu
642
+ trainer_config["accelerator"] = "gpu"
643
+ #
644
+ standard_args = default_trainer_args()
645
+ for k in standard_args:
646
+ if getattr(opt, k) != standard_args[k]:
647
+ trainer_config[k] = getattr(opt, k)
648
+
649
+ ckpt_resume_path = opt.resume_from_checkpoint
650
+
651
+ if not "devices" in trainer_config and trainer_config["accelerator"] != "gpu":
652
+ del trainer_config["accelerator"]
653
+ cpu = True
654
+ else:
655
+ gpuinfo = trainer_config["devices"]
656
+ print(f"Running on GPUs {gpuinfo}")
657
+ cpu = False
658
+ trainer_opt = argparse.Namespace(**trainer_config)
659
+ lightning_config.trainer = trainer_config
660
+
661
+ # model
662
+ model = instantiate_from_config(config.model)
663
+
664
+ # trainer and callbacks
665
+ trainer_kwargs = dict()
666
+
667
+ # default logger configs
668
+ default_logger_cfgs = {
669
+ "wandb": {
670
+ "target": "pytorch_lightning.loggers.WandbLogger",
671
+ "params": {
672
+ "name": nowname,
673
+ # "save_dir": logdir,
674
+ "offline": opt.debug,
675
+ "id": nowname,
676
+ "project": opt.projectname,
677
+ "log_model": False,
678
+ # "dir": logdir,
679
+ },
680
+ },
681
+ "csv": {
682
+ "target": "pytorch_lightning.loggers.CSVLogger",
683
+ "params": {
684
+ "name": "testtube", # hack for sbord fanatics
685
+ "save_dir": logdir,
686
+ },
687
+ },
688
+ }
689
+ default_logger_cfg = default_logger_cfgs["wandb" if opt.wandb else "csv"]
690
+ if opt.wandb:
691
+ # TODO change once leaving "swiffer" config directory
692
+ try:
693
+ group_name = nowname.split(now)[-1].split("-")[1]
694
+ except:
695
+ group_name = nowname
696
+ default_logger_cfg["params"]["group"] = group_name
697
+ init_wandb(
698
+ os.path.join(os.getcwd(), logdir),
699
+ opt=opt,
700
+ group_name=group_name,
701
+ config=config,
702
+ name_str=nowname,
703
+ )
704
+ if "logger" in lightning_config:
705
+ logger_cfg = lightning_config.logger
706
+ else:
707
+ logger_cfg = OmegaConf.create()
708
+ logger_cfg = OmegaConf.merge(default_logger_cfg, logger_cfg)
709
+ trainer_kwargs["logger"] = instantiate_from_config(logger_cfg)
710
+
711
+ # modelcheckpoint - use TrainResult/EvalResult(checkpoint_on=metric) to
712
+ # specify which metric is used to determine best models
713
+ default_modelckpt_cfg = {
714
+ "target": "pytorch_lightning.callbacks.ModelCheckpoint",
715
+ "params": {
716
+ "dirpath": ckptdir,
717
+ "filename": "{epoch:06}",
718
+ "verbose": True,
719
+ "save_last": True,
720
+ },
721
+ }
722
+ if hasattr(model, "monitor"):
723
+ print(f"Monitoring {model.monitor} as checkpoint metric.")
724
+ default_modelckpt_cfg["params"]["monitor"] = model.monitor
725
+ default_modelckpt_cfg["params"]["save_top_k"] = 3
726
+
727
+ if "modelcheckpoint" in lightning_config:
728
+ modelckpt_cfg = lightning_config.modelcheckpoint
729
+ else:
730
+ modelckpt_cfg = OmegaConf.create()
731
+ modelckpt_cfg = OmegaConf.merge(default_modelckpt_cfg, modelckpt_cfg)
732
+ print(f"Merged modelckpt-cfg: \n{modelckpt_cfg}")
733
+
734
+ # https://pytorch-lightning.readthedocs.io/en/stable/extensions/strategy.html
735
+ # default to ddp if not further specified
736
+ default_strategy_config = {"target": "pytorch_lightning.strategies.DDPStrategy"}
737
+
738
+ if "strategy" in lightning_config:
739
+ strategy_cfg = lightning_config.strategy
740
+ else:
741
+ strategy_cfg = OmegaConf.create()
742
+ default_strategy_config["params"] = {
743
+ "find_unused_parameters": False,
744
+ # "static_graph": True,
745
+ # "ddp_comm_hook": default.fp16_compress_hook # TODO: experiment with this, also for DDPSharded
746
+ }
747
+ strategy_cfg = OmegaConf.merge(default_strategy_config, strategy_cfg)
748
+ print(
749
+ f"strategy config: \n ++++++++++++++ \n {strategy_cfg} \n ++++++++++++++ "
750
+ )
751
+ trainer_kwargs["strategy"] = instantiate_from_config(strategy_cfg)
752
+
753
+ # add callback which sets up log directory
754
+ default_callbacks_cfg = {
755
+ "setup_callback": {
756
+ "target": "main.SetupCallback",
757
+ "params": {
758
+ "resume": opt.resume,
759
+ "now": now,
760
+ "logdir": logdir,
761
+ "ckptdir": ckptdir,
762
+ "cfgdir": cfgdir,
763
+ "config": config,
764
+ "lightning_config": lightning_config,
765
+ "debug": opt.debug,
766
+ "ckpt_name": melk_ckpt_name,
767
+ },
768
+ },
769
+ "image_logger": {
770
+ "target": "main.ImageLogger",
771
+ "params": {"batch_frequency": 1000, "max_images": 4, "clamp": True},
772
+ },
773
+ "learning_rate_logger": {
774
+ "target": "pytorch_lightning.callbacks.LearningRateMonitor",
775
+ "params": {
776
+ "logging_interval": "step",
777
+ # "log_momentum": True
778
+ },
779
+ },
780
+ }
781
+ if version.parse(pl.__version__) >= version.parse("1.4.0"):
782
+ default_callbacks_cfg.update({"checkpoint_callback": modelckpt_cfg})
783
+
784
+ if "callbacks" in lightning_config:
785
+ callbacks_cfg = lightning_config.callbacks
786
+ else:
787
+ callbacks_cfg = OmegaConf.create()
788
+
789
+ if "metrics_over_trainsteps_checkpoint" in callbacks_cfg:
790
+ print(
791
+ "Caution: Saving checkpoints every n train steps without deleting. This might require some free space."
792
+ )
793
+ default_metrics_over_trainsteps_ckpt_dict = {
794
+ "metrics_over_trainsteps_checkpoint": {
795
+ "target": "pytorch_lightning.callbacks.ModelCheckpoint",
796
+ "params": {
797
+ "dirpath": os.path.join(ckptdir, "trainstep_checkpoints"),
798
+ "filename": "{epoch:06}-{step:09}",
799
+ "verbose": True,
800
+ "save_top_k": -1,
801
+ "every_n_train_steps": 10000,
802
+ "save_weights_only": True,
803
+ },
804
+ }
805
+ }
806
+ default_callbacks_cfg.update(default_metrics_over_trainsteps_ckpt_dict)
807
+
808
+ callbacks_cfg = OmegaConf.merge(default_callbacks_cfg, callbacks_cfg)
809
+ if "ignore_keys_callback" in callbacks_cfg and ckpt_resume_path is not None:
810
+ callbacks_cfg.ignore_keys_callback.params["ckpt_path"] = ckpt_resume_path
811
+ elif "ignore_keys_callback" in callbacks_cfg:
812
+ del callbacks_cfg["ignore_keys_callback"]
813
+
814
+ trainer_kwargs["callbacks"] = [
815
+ instantiate_from_config(callbacks_cfg[k]) for k in callbacks_cfg
816
+ ]
817
+ if not "plugins" in trainer_kwargs:
818
+ trainer_kwargs["plugins"] = list()
819
+
820
+ # cmd line trainer args (which are in trainer_opt) have always priority over config-trainer-args (which are in trainer_kwargs)
821
+ trainer_opt = vars(trainer_opt)
822
+ trainer_kwargs = {
823
+ key: val for key, val in trainer_kwargs.items() if key not in trainer_opt
824
+ }
825
+ trainer = Trainer(**trainer_opt, **trainer_kwargs)
826
+
827
+ trainer.logdir = logdir ###
828
+
829
+ # data
830
+ data = instantiate_from_config(config.data)
831
+ # NOTE according to https://pytorch-lightning.readthedocs.io/en/latest/datamodules.html
832
+ # calling these ourselves should not be necessary but it is.
833
+ # lightning still takes care of proper multiprocessing though
834
+ data.prepare_data()
835
+ # data.setup()
836
+ print("#### Data #####")
837
+ try:
838
+ for k in data.datasets:
839
+ print(
840
+ f"{k}, {data.datasets[k].__class__.__name__}, {len(data.datasets[k])}"
841
+ )
842
+ except:
843
+ print("datasets not yet initialized.")
844
+
845
+ # configure learning rate
846
+ if "batch_size" in config.data.params:
847
+ bs, base_lr = config.data.params.batch_size, config.model.base_learning_rate
848
+ else:
849
+ bs, base_lr = (
850
+ config.data.params.train.loader.batch_size,
851
+ config.model.base_learning_rate,
852
+ )
853
+ if not cpu:
854
+ ngpu = len(lightning_config.trainer.devices.strip(",").split(","))
855
+ else:
856
+ ngpu = 1
857
+ if "accumulate_grad_batches" in lightning_config.trainer:
858
+ accumulate_grad_batches = lightning_config.trainer.accumulate_grad_batches
859
+ else:
860
+ accumulate_grad_batches = 1
861
+ print(f"accumulate_grad_batches = {accumulate_grad_batches}")
862
+ lightning_config.trainer.accumulate_grad_batches = accumulate_grad_batches
863
+ if opt.scale_lr:
864
+ model.learning_rate = accumulate_grad_batches * ngpu * bs * base_lr
865
+ print(
866
+ "Setting learning rate to {:.2e} = {} (accumulate_grad_batches) * {} (num_gpus) * {} (batchsize) * {:.2e} (base_lr)".format(
867
+ model.learning_rate, accumulate_grad_batches, ngpu, bs, base_lr
868
+ )
869
+ )
870
+ else:
871
+ model.learning_rate = base_lr
872
+ print("++++ NOT USING LR SCALING ++++")
873
+ print(f"Setting learning rate to {model.learning_rate:.2e}")
874
+
875
+ # allow checkpointing via USR1
876
+ def melk(*args, **kwargs):
877
+ # run all checkpoint hooks
878
+ if trainer.global_rank == 0:
879
+ print("Summoning checkpoint.")
880
+ if melk_ckpt_name is None:
881
+ ckpt_path = os.path.join(ckptdir, "last.ckpt")
882
+ else:
883
+ ckpt_path = os.path.join(ckptdir, melk_ckpt_name)
884
+ trainer.save_checkpoint(ckpt_path)
885
+
886
+ def divein(*args, **kwargs):
887
+ if trainer.global_rank == 0:
888
+ import pudb
889
+
890
+ pudb.set_trace()
891
+
892
+ import signal
893
+
894
+ signal.signal(signal.SIGUSR1, melk)
895
+ signal.signal(signal.SIGUSR2, divein)
896
+
897
+ # run
898
+ if opt.train:
899
+ try:
900
+ trainer.fit(model, data, ckpt_path=ckpt_resume_path)
901
+ except Exception:
902
+ if not opt.debug:
903
+ melk()
904
+ raise
905
+ if not opt.no_test and not trainer.interrupted:
906
+ trainer.test(model, data)
907
+ except RuntimeError as err:
908
+ if MULTINODE_HACKS:
909
+ import datetime
910
+ import os
911
+ import socket
912
+
913
+ import requests
914
+
915
+ device = os.environ.get("CUDA_VISIBLE_DEVICES", "?")
916
+ hostname = socket.gethostname()
917
+ ts = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
918
+ resp = requests.get("http://169.254.169.254/latest/meta-data/instance-id")
919
+ print(
920
+ f"ERROR at {ts} on {hostname}/{resp.text} (CUDA_VISIBLE_DEVICES={device}): {type(err).__name__}: {err}",
921
+ flush=True,
922
+ )
923
+ raise err
924
+ except Exception:
925
+ if opt.debug and trainer.global_rank == 0:
926
+ try:
927
+ import pudb as debugger
928
+ except ImportError:
929
+ import pdb as debugger
930
+ debugger.post_mortem()
931
+ raise
932
+ finally:
933
+ # move newly created debug project to debug_runs
934
+ if opt.debug and not opt.resume and trainer.global_rank == 0:
935
+ dst, name = os.path.split(logdir)
936
+ dst = os.path.join(dst, "debug_runs", name)
937
+ os.makedirs(os.path.split(dst)[0], exist_ok=True)
938
+ os.rename(logdir, dst)
939
+
940
+ if opt.wandb:
941
+ wandb.finish()
942
+ # if trainer.global_rank == 0:
943
+ # print(trainer.profiler.summary())
main/inference/motionctrl_cmcm.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import datetime
3
+ import json
4
+ import math
5
+ import os
6
+ import sys
7
+ import time
8
+ from glob import glob
9
+ from pathlib import Path
10
+ from typing import Optional
11
+
12
+ import cv2
13
+ import numpy as np
14
+ import torch
15
+ import torchvision
16
+ from einops import rearrange, repeat
17
+ from fire import Fire
18
+ from omegaconf import OmegaConf
19
+ from PIL import Image
20
+ from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
21
+
22
+ sys.path.insert(1, os.path.join(sys.path[0], '..', '..'))
23
+ from sgm.util import default, instantiate_from_config
24
+
25
+ camera_poses = [
26
+ 'test_camera_L',
27
+ 'test_camera_D',
28
+ 'test_camera_I',
29
+ 'test_camera_O',
30
+ 'test_camera_R',
31
+ 'test_camera_U',
32
+ 'test_camera_Round-ZoomIn',
33
+ 'test_camera_Round-RI_90',
34
+ ]
35
+
36
+ def to_relative_RT2(org_pose, keyframe_idx=0, keyframe_zero=False):
37
+ org_pose = org_pose.reshape(-1, 3, 4) # [t, 3, 4]
38
+ R_dst = org_pose[:, :, :3]
39
+ T_dst = org_pose[:, :, 3:]
40
+
41
+ R_src = R_dst[keyframe_idx: keyframe_idx+1].repeat(org_pose.shape[0], axis=0) # [t, 3, 3]
42
+ T_src = T_dst[keyframe_idx: keyframe_idx+1].repeat(org_pose.shape[0], axis=0)
43
+
44
+ R_src_inv = R_src.transpose(0, 2, 1) # [t, 3, 3]
45
+
46
+ R_rel = R_dst @ R_src_inv # [t, 3, 3]
47
+ T_rel = T_dst - R_rel@T_src
48
+
49
+ RT_rel = np.concatenate([R_rel, T_rel], axis=-1) # [t, 3, 4]
50
+ RT_rel = RT_rel.reshape(-1, 12) # [t, 12]
51
+
52
+ if keyframe_zero:
53
+ RT_rel[keyframe_idx] = np.zeros_like(RT_rel[keyframe_idx])
54
+
55
+ return RT_rel
56
+
57
+ def get_RT(pose_dir='', video_frames=14, frame_stride=1, speed=1.0, **kwargs):
58
+ pose_file = [f'{pose_dir}/{pose}.json' for pose in camera_poses]
59
+ pose_sample_num = len(pose_file)
60
+
61
+ pose_sample_num = len(pose_file)
62
+
63
+ data_list = []
64
+ pose_name = []
65
+
66
+
67
+ for idx in range(pose_sample_num):
68
+ cur_pose_name = camera_poses[idx].replace('test_camera_', '')
69
+ pose_name.append(cur_pose_name)
70
+
71
+ with open(pose_file[idx], 'r') as f:
72
+ pose = json.load(f)
73
+ pose = np.array(pose) # [t, 12]
74
+
75
+ while frame_stride * video_frames > pose.shape[0]:
76
+ frame_stride -= 1
77
+
78
+ pose = pose[::frame_stride]
79
+ if video_frames < 16:
80
+ half = (pose.shape[0] - video_frames) // 2
81
+ pose = pose[half:half+video_frames]
82
+ # pose = pose[:video_frames]
83
+ pose = pose.reshape(-1, 3, 4) # [t, 3, 4]
84
+ # rescale
85
+ pose[:, :, -1] = pose[:, :, -1] * np.array([3, 1, 4]) * speed
86
+ pose = to_relative_RT2(pose)
87
+
88
+
89
+ pose = torch.tensor(pose).float() # [t, 12]
90
+ data_list.append(pose)
91
+
92
+ # data_list = torch.stack(data_list, dim=0) # [pose_sample_num, t, 12]
93
+ return data_list, pose_name
94
+
95
+ def sample(
96
+ input_path: str = "examples/camera_poses", # Can either be image file or folder with image files
97
+ ckpt: str = "checkpoints/motionctrl_svd.ckpt",
98
+ config: str = None,
99
+ num_frames: Optional[int] = None,
100
+ num_steps: Optional[int] = None,
101
+ version: str = "svd",
102
+ fps_id: int = 6,
103
+ motion_bucket_id: int = 127,
104
+ cond_aug: float = 0.02,
105
+ seed: int = 23,
106
+ decoding_t: int = 1, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
107
+ device: str = "cuda",
108
+ output_folder: Optional[str] = None,
109
+ save_fps: int = 10,
110
+ resize: Optional[bool] = False,
111
+ pose_dir: str = '',
112
+ sample_num: int = 1,
113
+ height: int = 576,
114
+ width: int = 1024,
115
+ transform: Optional[bool] = False,
116
+ save_images: Optional[bool] = False,
117
+ speed: float = 1.0,
118
+ ):
119
+ """
120
+ Simple script to generate a single sample conditioned on an image `input_path` or multiple images, one for each
121
+ image file in folder `input_path`. If you run out of VRAM, try decreasing `decoding_t`.
122
+ """
123
+
124
+ assert (version == "svd"), "Only SVD is supported for now."
125
+ num_frames = default(num_frames, 14)
126
+ num_steps = default(num_steps, 25)
127
+ output_folder = default(output_folder, "outputs/motionctrl_svd/")
128
+ model_config = default(config, "configs/inference/config_motionctrl_cmcm.yaml")
129
+
130
+ model, filter = load_model(
131
+ model_config,
132
+ ckpt,
133
+ device,
134
+ num_frames,
135
+ num_steps,
136
+ )
137
+ torch.manual_seed(seed)
138
+
139
+ path = Path(input_path)
140
+ all_img_paths = []
141
+ if path.is_file():
142
+ if any([input_path.endswith(x) for x in ["jpg", "jpeg", "png"]]):
143
+ all_img_paths = [input_path]
144
+ else:
145
+ raise ValueError("Path is not valid image file.")
146
+ elif path.is_dir():
147
+ all_img_paths = sorted(
148
+ [
149
+ f
150
+ for f in path.iterdir()
151
+ if f.is_file() and f.suffix.lower() in [".jpg", ".jpeg", ".png"]
152
+ ]
153
+ )
154
+ if len(all_img_paths) == 0:
155
+ raise ValueError("Folder does not contain any images.")
156
+ else:
157
+ raise ValueError
158
+
159
+ if transform:
160
+ spatial_transform = Compose([
161
+ Resize(size=width),
162
+ CenterCrop(size=(height, width)),
163
+ ])
164
+
165
+ # get camera poses
166
+ RTs, pose_name = get_RT(pose_dir=pose_dir, video_frames=num_frames, frame_stride=1, speed=speed)
167
+
168
+ print(f'loaded {len(all_img_paths)} images.')
169
+ os.makedirs(output_folder, exist_ok=True)
170
+ for no, input_img_path in enumerate(all_img_paths):
171
+
172
+ filepath, fullflname = os.path.split(input_img_path)
173
+ filename, ext = os.path.splitext(fullflname)
174
+ print(f'-sample {no+1}: {filename} ...')
175
+
176
+ # RTs = RTs[0:1]
177
+ for RT_idx in range(len(RTs)):
178
+ cur_pose_name = pose_name[RT_idx]
179
+ print(f'--pose: {cur_pose_name} ...')
180
+ RT = RTs[RT_idx]
181
+ RT = RT.unsqueeze(0).repeat(2,1,1)
182
+ RT = RT.to(device)
183
+
184
+ with Image.open(input_img_path) as image:
185
+ if image.mode == "RGBA":
186
+ image = image.convert("RGB")
187
+ if transform:
188
+ image = spatial_transform(image)
189
+ if resize:
190
+ image = image.resize((width, height))
191
+ w, h = image.size
192
+
193
+ if h % 64 != 0 or w % 64 != 0:
194
+ width, height = map(lambda x: x - x % 64, (w, h))
195
+ image = image.resize((width, height))
196
+ print(
197
+ f"WARNING: Your image is of size {h}x{w} which is not divisible by 64. We are resizing to {height}x{width}!"
198
+ )
199
+
200
+ image = ToTensor()(image)
201
+ image = image * 2.0 - 1.0
202
+
203
+ image = image.unsqueeze(0).to(device)
204
+ H, W = image.shape[2:]
205
+ assert image.shape[1] == 3
206
+ F = 8
207
+ C = 4
208
+ shape = (num_frames, C, H // F, W // F)
209
+ if (H, W) != (576, 1024):
210
+ print(
211
+ "WARNING: The conditioning frame you provided is not 576x1024. This leads to suboptimal performance as model was only trained on 576x1024. Consider increasing `cond_aug`."
212
+ )
213
+ if motion_bucket_id > 255:
214
+ print(
215
+ "WARNING: High motion bucket! This may lead to suboptimal performance."
216
+ )
217
+
218
+ if fps_id < 5:
219
+ print("WARNING: Small fps value! This may lead to suboptimal performance.")
220
+
221
+ if fps_id > 30:
222
+ print("WARNING: Large fps value! This may lead to suboptimal performance.")
223
+
224
+ value_dict = {}
225
+ value_dict["motion_bucket_id"] = motion_bucket_id
226
+ value_dict["fps_id"] = fps_id
227
+ value_dict["cond_aug"] = cond_aug
228
+ value_dict["cond_frames_without_noise"] = image
229
+ value_dict["cond_frames"] = image + cond_aug * torch.randn_like(image)
230
+
231
+ with torch.no_grad():
232
+ with torch.autocast(device):
233
+ batch, batch_uc = get_batch(
234
+ get_unique_embedder_keys_from_conditioner(model.conditioner),
235
+ value_dict,
236
+ [1, num_frames],
237
+ T=num_frames,
238
+ device=device,
239
+ )
240
+ c, uc = model.conditioner.get_unconditional_conditioning(
241
+ batch,
242
+ batch_uc=batch_uc,
243
+ force_uc_zero_embeddings=[
244
+ "cond_frames",
245
+ "cond_frames_without_noise",
246
+ ],
247
+ )
248
+
249
+ for k in ["crossattn", "concat"]:
250
+ uc[k] = repeat(uc[k], "b ... -> b t ...", t=num_frames)
251
+ uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=num_frames)
252
+ c[k] = repeat(c[k], "b ... -> b t ...", t=num_frames)
253
+ c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=num_frames)
254
+
255
+
256
+
257
+ additional_model_inputs = {}
258
+ additional_model_inputs["image_only_indicator"] = torch.zeros(
259
+ 2, num_frames
260
+ ).to(device)
261
+ #additional_model_inputs["image_only_indicator"][:,0] = 1
262
+ additional_model_inputs["num_video_frames"] = batch["num_video_frames"]
263
+
264
+
265
+ additional_model_inputs["RT"] = RT
266
+
267
+ def denoiser(input, sigma, c):
268
+ return model.denoiser(
269
+ model.model, input, sigma, c, **additional_model_inputs
270
+ )
271
+
272
+ results = []
273
+ for j in range(sample_num):
274
+ randn = torch.randn(shape, device=device)
275
+ samples_z = model.sampler(denoiser, randn, cond=c, uc=uc)
276
+ model.en_and_decode_n_samples_a_time = decoding_t
277
+ samples_x = model.decode_first_stage(samples_z)
278
+ samples = torch.clamp((samples_x + 1.0) / 2.0, min=0.0, max=1.0) # [1*t, c, h, w]
279
+ results.append(samples)
280
+
281
+ samples = torch.stack(results, dim=0) # [sample_num, t, c, h, w]
282
+ samples = samples.data.cpu()
283
+
284
+ video_path = os.path.join(output_folder, f"{filename}_{cur_pose_name}.mp4")
285
+ save_results(samples, video_path, fps=save_fps)
286
+
287
+ if save_images:
288
+ for i in range(sample_num):
289
+ cur_output_folder = os.path.join(output_folder, f"{filename}", f"{cur_pose_name}", f"{i}")
290
+ os.makedirs(cur_output_folder, exist_ok=True)
291
+ for j in range(num_frames):
292
+ cur_img_path = os.path.join(cur_output_folder, f"{j:06d}.png")
293
+ torchvision.utils.save_image(samples[i,j], cur_img_path)
294
+
295
+ print(f'Done! results saved in {output_folder}.')
296
+
297
+ def save_results(resutls, filename, fps=10):
298
+ video = resutls.permute(1, 0, 2, 3, 4) # [t, sample_num, c, h, w]
299
+ frame_grids = [torchvision.utils.make_grid(framesheet, nrow=int(video.shape[1])) for framesheet in video] #[3, 1*h, n*w]
300
+ grid = torch.stack(frame_grids, dim=0) # stack in temporal dim [t, 3, n*h, w]
301
+ # already in [0,1]
302
+ grid = (grid * 255).to(torch.uint8).permute(0, 2, 3, 1)
303
+ torchvision.io.write_video(filename, grid, fps=fps, video_codec='h264', options={'crf': '10'})
304
+
305
+ def get_unique_embedder_keys_from_conditioner(conditioner):
306
+ return list(set([x.input_key for x in conditioner.embedders]))
307
+
308
+
309
+ def get_batch(keys, value_dict, N, T, device):
310
+ batch = {}
311
+ batch_uc = {}
312
+
313
+ for key in keys:
314
+ if key == "fps_id":
315
+ batch[key] = (
316
+ torch.tensor([value_dict["fps_id"]])
317
+ .to(device)
318
+ .repeat(int(math.prod(N)))
319
+ )
320
+ elif key == "motion_bucket_id":
321
+ batch[key] = (
322
+ torch.tensor([value_dict["motion_bucket_id"]])
323
+ .to(device)
324
+ .repeat(int(math.prod(N)))
325
+ )
326
+ elif key == "cond_aug":
327
+ batch[key] = repeat(
328
+ torch.tensor([value_dict["cond_aug"]]).to(device),
329
+ "1 -> b",
330
+ b=math.prod(N),
331
+ )
332
+ elif key == "cond_frames":
333
+ batch[key] = repeat(value_dict["cond_frames"], "1 ... -> b ...", b=N[0])
334
+ elif key == "cond_frames_without_noise":
335
+ batch[key] = repeat(
336
+ value_dict["cond_frames_without_noise"], "1 ... -> b ...", b=N[0]
337
+ )
338
+ else:
339
+ batch[key] = value_dict[key]
340
+
341
+ if T is not None:
342
+ batch["num_video_frames"] = T
343
+
344
+ for key in batch.keys():
345
+ if key not in batch_uc and isinstance(batch[key], torch.Tensor):
346
+ batch_uc[key] = torch.clone(batch[key])
347
+ return batch, batch_uc
348
+
349
+
350
+ def load_model(
351
+ config: str,
352
+ ckpt: str,
353
+ device: str,
354
+ num_frames: int,
355
+ num_steps: int,
356
+ ):
357
+
358
+ config = OmegaConf.load(config)
359
+ config.model.params.ckpt_path = ckpt
360
+ if device == "cuda":
361
+ config.model.params.conditioner_config.params.emb_models[
362
+ 0
363
+ ].params.open_clip_embedding_config.params.init_device = device
364
+
365
+ config.model.params.sampler_config.params.num_steps = num_steps
366
+ config.model.params.sampler_config.params.guider_config.params.num_frames = (
367
+ num_frames
368
+ )
369
+
370
+ model = instantiate_from_config(config.model)
371
+
372
+ model = model.to(device).eval()
373
+
374
+ filter = None #DeepFloydDataFiltering(verbose=False, device=device)
375
+ return model, filter
376
+
377
+
378
+ def get_parser():
379
+ parser = argparse.ArgumentParser()
380
+ parser.add_argument("--seed", type=int, default=23, help="seed for seed_everything")
381
+ parser.add_argument("--ckpt", type=str, default=None, help="checkpoint path")
382
+ parser.add_argument("--config", type=str, help="config (yaml) path")
383
+ parser.add_argument("--input", type=str, default=None, help="image path or folder")
384
+ parser.add_argument("--savedir", type=str, default=None, help="results saving path")
385
+ parser.add_argument("--savefps", type=int, default=10, help="video fps to generate")
386
+ parser.add_argument("--n_samples", type=int, default=1, help="num of samples per prompt",)
387
+ parser.add_argument("--ddim_steps", type=int, default=50, help="steps of ddim if positive, otherwise use DDPM",)
388
+ parser.add_argument("--ddim_eta", type=float, default=1.0, help="eta for ddim sampling (0.0 yields deterministic sampling)",)
389
+ parser.add_argument("--frames", type=int, default=-1, help="frames num to inference")
390
+ parser.add_argument("--fps", type=int, default=6, help="control the fps")
391
+ parser.add_argument("--motion", type=int, default=127, help="control the motion magnitude")
392
+ parser.add_argument("--cond_aug", type=float, default=0.02, help="adding noise to input image")
393
+ parser.add_argument("--decoding_t", type=int, default=1, help="frames num to decoding per time")
394
+ parser.add_argument("--resize", action='store_true', default=False, help="resize all input to default resolution")
395
+ parser.add_argument("--sample_num", type=int, default=1, help="frames num to decoding per time")
396
+ parser.add_argument("--pose_dir", type=str, default='', help="checkpoint path")
397
+ parser.add_argument("--height", type=int, default=576, help="frames num to decoding per time")
398
+ parser.add_argument("--width", type=int, default=1024, help="frames num to decoding per time")
399
+ parser.add_argument("--transform", action='store_true', default=False, help="resize all input to specific resolution")
400
+ parser.add_argument("--save_images", action='store_true', default=False, help="save images")
401
+ parser.add_argument("--speed", type=float, default=1.0, help="speed of camera motion")
402
+ return parser
403
+
404
+
405
+ if __name__ == "__main__":
406
+ now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
407
+ print("@MotionCrl+SVD Inference: %s"%now)
408
+ #Fire(sample)
409
+ parser = get_parser()
410
+ args = parser.parse_args()
411
+ sample(input_path=args.input, ckpt=args.ckpt, config=args.config, num_frames=args.frames, num_steps=args.ddim_steps, \
412
+ fps_id=args.fps, motion_bucket_id=args.motion, cond_aug=args.cond_aug, seed=args.seed, \
413
+ decoding_t=args.decoding_t, output_folder=args.savedir, save_fps=args.savefps, resize=args.resize,
414
+ pose_dir=args.pose_dir, sample_num=args.sample_num, height=args.height, width=args.width,
415
+ transform=args.transform, save_images=args.save_images, speed=args.speed)
416
+
pytest.ini ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [pytest]
2
+ markers =
3
+ inference: mark as inference test (deselect with '-m "not inference"')
requirements.txt ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Pytorch-Lightning==1.9.0
2
+ decord
3
+ kornia
4
+ timm
5
+ open_clip_torch
6
+ av
7
+ omegaconf
8
+ transformers
9
+ einops
10
+ scikit-learn
11
+ taming-transformers-rom1504
12
+ pandas
13
+ triton
14
+ xformers==0.0.16
15
+ torch==1.13.1
16
+ torchvision
17
+ fairscale
18
+ psutil==5.9.5
19
+ annotated-types==0.5.0
20
+ plotly
21
+ imageio==2.14.1
22
+ imageio-ffmpeg==0.4.7
23
+ opencv-python==4.8.0.74
24
+ moviepy
25
+ Pillow
26
+ tqdm
27
+ gradio==3.37.0
28
+ webdataset
29
+ Fire
30
+ natsort
31
+ wandb
32
+ clip @ git+https://github.com/openai/CLIP.git
scripts/__init__.py ADDED
File without changes
scripts/demo/__init__.py ADDED
File without changes
scripts/demo/detect.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+ import cv2
4
+ import numpy as np
5
+
6
+ try:
7
+ from imwatermark import WatermarkDecoder
8
+ except ImportError as e:
9
+ try:
10
+ # Assume some of the other dependencies such as torch are not fulfilled
11
+ # import file without loading unnecessary libraries.
12
+ import importlib.util
13
+ import sys
14
+
15
+ spec = importlib.util.find_spec("imwatermark.maxDct")
16
+ assert spec is not None
17
+ maxDct = importlib.util.module_from_spec(spec)
18
+ sys.modules["maxDct"] = maxDct
19
+ spec.loader.exec_module(maxDct)
20
+
21
+ class WatermarkDecoder(object):
22
+ """A minimal version of
23
+ https://github.com/ShieldMnt/invisible-watermark/blob/main/imwatermark/watermark.py
24
+ to only reconstruct bits using dwtDct"""
25
+
26
+ def __init__(self, wm_type="bytes", length=0):
27
+ assert wm_type == "bits", "Only bits defined in minimal import"
28
+ self._wmType = wm_type
29
+ self._wmLen = length
30
+
31
+ def reconstruct(self, bits):
32
+ if len(bits) != self._wmLen:
33
+ raise RuntimeError("bits are not matched with watermark length")
34
+
35
+ return bits
36
+
37
+ def decode(self, cv2Image, method="dwtDct", **configs):
38
+ (r, c, channels) = cv2Image.shape
39
+ if r * c < 256 * 256:
40
+ raise RuntimeError("image too small, should be larger than 256x256")
41
+
42
+ bits = []
43
+ assert method == "dwtDct"
44
+ embed = maxDct.EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
45
+ bits = embed.decode(cv2Image)
46
+ return self.reconstruct(bits)
47
+
48
+ except:
49
+ raise e
50
+
51
+
52
+ # A fixed 48-bit message that was choosen at random
53
+ # WATERMARK_MESSAGE = 0xB3EC907BB19E
54
+ WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110
55
+ # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
56
+ WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
57
+ MATCH_VALUES = [
58
+ [27, "No watermark detected"],
59
+ [33, "Partial watermark match. Cannot determine with certainty."],
60
+ [
61
+ 35,
62
+ (
63
+ "Likely watermarked. In our test 0.02% of real images were "
64
+ 'falsely detected as "Likely watermarked"'
65
+ ),
66
+ ],
67
+ [
68
+ 49,
69
+ (
70
+ "Very likely watermarked. In our test no real images were "
71
+ 'falsely detected as "Very likely watermarked"'
72
+ ),
73
+ ],
74
+ ]
75
+
76
+
77
+ class GetWatermarkMatch:
78
+ def __init__(self, watermark):
79
+ self.watermark = watermark
80
+ self.num_bits = len(self.watermark)
81
+ self.decoder = WatermarkDecoder("bits", self.num_bits)
82
+
83
+ def __call__(self, x: np.ndarray) -> np.ndarray:
84
+ """
85
+ Detects the number of matching bits the predefined watermark with one
86
+ or multiple images. Images should be in cv2 format, e.g. h x w x c BGR.
87
+
88
+ Args:
89
+ x: ([B], h w, c) in range [0, 255]
90
+
91
+ Returns:
92
+ number of matched bits ([B],)
93
+ """
94
+ squeeze = len(x.shape) == 3
95
+ if squeeze:
96
+ x = x[None, ...]
97
+
98
+ bs = x.shape[0]
99
+ detected = np.empty((bs, self.num_bits), dtype=bool)
100
+ for k in range(bs):
101
+ detected[k] = self.decoder.decode(x[k], "dwtDct")
102
+ result = np.sum(detected == self.watermark, axis=-1)
103
+ if squeeze:
104
+ return result[0]
105
+ else:
106
+ return result
107
+
108
+
109
+ get_watermark_match = GetWatermarkMatch(WATERMARK_BITS)
110
+
111
+
112
+ if __name__ == "__main__":
113
+ parser = argparse.ArgumentParser()
114
+ parser.add_argument(
115
+ "filename",
116
+ nargs="+",
117
+ type=str,
118
+ help="Image files to check for watermarks",
119
+ )
120
+ opts = parser.parse_args()
121
+
122
+ print(
123
+ """
124
+ This script tries to detect watermarked images. Please be aware of
125
+ the following:
126
+ - As the watermark is supposed to be invisible, there is the risk that
127
+ watermarked images may not be detected.
128
+ - To maximize the chance of detection make sure that the image has the same
129
+ dimensions as when the watermark was applied (most likely 1024x1024
130
+ or 512x512).
131
+ - Specific image manipulation may drastically decrease the chance that
132
+ watermarks can be detected.
133
+ - There is also the chance that an image has the characteristics of the
134
+ watermark by chance.
135
+ - The watermark script is public, anybody may watermark any images, and
136
+ could therefore claim it to be generated.
137
+ - All numbers below are based on a test using 10,000 images without any
138
+ modifications after applying the watermark.
139
+ """
140
+ )
141
+
142
+ for fn in opts.filename:
143
+ image = cv2.imread(fn)
144
+ if image is None:
145
+ print(f"Couldn't read {fn}. Skipping")
146
+ continue
147
+
148
+ num_bits = get_watermark_match(image)
149
+ k = 0
150
+ while num_bits > MATCH_VALUES[k][0]:
151
+ k += 1
152
+ print(
153
+ f"{fn}: {MATCH_VALUES[k][1]}",
154
+ f"Bits that matched the watermark {num_bits} from {len(WATERMARK_BITS)}\n",
155
+ sep="\n\t",
156
+ )