blanchon commited on
Commit
76c1d81
1 Parent(s): cb2de0f

Update code

Browse files
Files changed (3) hide show
  1. server.py +514 -317
  2. services/gaussian_splatting_cuda.py +19 -3
  3. services/rerun.py +92 -74
server.py CHANGED
@@ -1,303 +1,255 @@
 
1
  from pathlib import Path
2
  import shutil
3
  import tempfile
 
4
  import gradio as gr
5
  import uuid
6
  from typing_extensions import TypedDict, Tuple
7
 
8
  from fastapi import FastAPI
9
- from fastapi.staticfiles import StaticFiles
10
- import uvicorn
11
 
12
  app = FastAPI()
13
 
14
  # create a static directory to store the static files
15
- gs_dir = Path(str(tempfile.gettempdir())) / "gaussian_splatting_gradio"
16
- gs_dir.mkdir(parents=True, exist_ok=True)
17
-
18
- # mount FastAPI StaticFiles server
19
- app.mount("/static", StaticFiles(directory=gs_dir), name="static")
20
 
21
  StateDict = TypedDict("StateDict", {
22
  "uuid": str,
23
  })
24
 
25
- def getHTML():
26
- html_body = """
27
- <body>
28
- <div id="progress"></div>
29
- <div id="message"></div>
30
- <div class="scene" id="spinner">
31
- <div class="cube-wrapper">
32
- <div class="cube">
33
- <div class="cube-faces">
34
- <div class="cube-face bottom"></div>
35
- <div class="cube-face top"></div>
36
- <div class="cube-face left"></div>
37
- <div class="cube-face right"></div>
38
- <div class="cube-face back"></div>
39
- <div class="cube-face front"></div>
40
- </div>
41
- </div>
42
- </div>
43
- </div>
44
- <canvas id="canvas"></canvas>
45
-
46
- <div id="quality">
47
- <span id="fps"></span>
48
- </div>
49
-
50
- <style>
51
- .cube-wrapper {
52
- transform-style: preserve-3d;
53
- }
54
-
55
- .cube {
56
- transform-style: preserve-3d;
57
- transform: rotateX(45deg) rotateZ(45deg);
58
- animation: rotation 2s infinite;
59
- }
60
-
61
- .cube-faces {
62
- transform-style: preserve-3d;
63
- height: 80px;
64
- width: 80px;
65
- position: relative;
66
- transform-origin: 0 0;
67
- transform: translateX(0) translateY(0) translateZ(-40px);
68
- }
69
-
70
- .cube-face {
71
- position: absolute;
72
- inset: 0;
73
- background: #0017ff;
74
- border: solid 1px #ffffff;
75
- }
76
- .cube-face.top {
77
- transform: translateZ(80px);
78
- }
79
- .cube-face.front {
80
- transform-origin: 0 50%;
81
- transform: rotateY(-90deg);
82
- }
83
- .cube-face.back {
84
- transform-origin: 0 50%;
85
- transform: rotateY(-90deg) translateZ(-80px);
86
- }
87
- .cube-face.right {
88
- transform-origin: 50% 0;
89
- transform: rotateX(-90deg) translateY(-80px);
90
- }
91
- .cube-face.left {
92
- transform-origin: 50% 0;
93
- transform: rotateX(-90deg) translateY(-80px) translateZ(80px);
94
- }
95
-
96
- @keyframes rotation {
97
- 0% {
98
- transform: rotateX(45deg) rotateY(0) rotateZ(45deg);
99
- animation-timing-function: cubic-bezier(
100
- 0.17,
101
- 0.84,
102
- 0.44,
103
- 1
104
- );
105
- }
106
- 50% {
107
- transform: rotateX(45deg) rotateY(0) rotateZ(225deg);
108
- animation-timing-function: cubic-bezier(
109
- 0.76,
110
- 0.05,
111
- 0.86,
112
- 0.06
113
- );
114
- }
115
- 100% {
116
- transform: rotateX(45deg) rotateY(0) rotateZ(405deg);
117
- animation-timing-function: cubic-bezier(
118
- 0.17,
119
- 0.84,
120
- 0.44,
121
- 1
122
- );
123
- }
124
- }
125
-
126
- .scene,
127
- #message {
128
- position: absolute;
129
- display: flex;
130
- top: 0;
131
- right: 0;
132
- left: 0;
133
- bottom: 0;
134
- z-index: 2;
135
- height: 100%;
136
- width: 100%;
137
- align-items: center;
138
- justify-content: center;
139
- }
140
- #message {
141
- font-weight: bold;
142
- font-size: large;
143
- color: red;
144
- pointer-events: none;
145
- }
146
-
147
- #progress {
148
- position: absolute;
149
- top: 0;
150
- height: 5px;
151
- background: blue;
152
- z-index: 99;
153
- transition: width 0.1s ease-in-out;
154
- }
155
-
156
- #quality {
157
- position: absolute;
158
- bottom: 10px;
159
- z-index: 999;
160
- right: 10px;
161
- }
162
-
163
- #canvas {
164
- display: block;
165
- position: absolute;
166
- top: 0;
167
- left: 0;
168
- width: 100%;
169
- height: 100%;
170
- touch-action: none;
171
- }
172
-
173
- #instructions {
174
- background: rgba(0,0,0,0.6);
175
- white-space: pre-wrap;
176
- padding: 10px;
177
- border-radius: 10px;
178
- font-size: x-small;
179
- }
180
- </style>
181
- </body>
182
  """
183
 
184
- html = f"""
185
- <head>
186
- <title>3D Gaussian Splatting Viewer</title>
187
- <script src="http://zeus.blanchon.cc/dropshare/main.js"></script>
188
- </head>
189
 
190
- {html_body}
 
 
 
 
191
  """
192
- return f"""<iframe style="width: 100%; height: 900px" srcdoc='{html}'></iframe>"""
193
 
194
- def createStateSession() -> StateDict:
195
- # Create new session
196
- session_uuid = str(uuid.uuid4())
197
- print("createStateSession")
198
- print(session_uuid)
199
- return StateDict(
200
- uuid=session_uuid,
201
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
  def removeStateSession(session_state_value: StateDict):
204
  # Clean up previous session
 
 
 
 
205
  return StateDict(
206
  uuid=None,
207
  )
208
 
209
- def makeButtonVisible() -> Tuple[gr.Button, gr.Button]:
210
- process_button = gr.Button(visible=True)
211
- reset_button = gr.Button(visible=False) #TODO: I will bring this back when I figure out how to stop the process
212
- return process_button, reset_button
213
-
214
- def resetSession(state: StateDict) -> Tuple[StateDict, gr.Button, gr.Button]:
215
- print("resetSession")
216
- new_state = removeStateSession(state)
217
- process_button = gr.Button(visible=False)
218
- reset_button = gr.Button(visible=False)
219
- return new_state, process_button, reset_button
220
-
221
- def process(
222
- # *args, **kwargs
223
  session_state_value: StateDict,
224
- filepath: str,
225
  ffmpeg_fps: int,
226
  ffmpeg_qscale: int,
227
- colmap_camera: str,
228
- ):
229
  if session_state_value["uuid"] is None:
230
  return
231
- print("process")
232
- # print(args)
233
- # print(kwargs)
234
- # return
235
- print(session_state_value)
236
- print(f"Processing {filepath}")
237
 
238
- try:
239
- session_tmpdirname = gs_dir / str(session_state_value['uuid'])
240
- session_tmpdirname.mkdir(parents=True, exist_ok=True)
241
- print('Created temporary directory', session_tmpdirname)
242
 
243
- gs_dir_path = Path(session_tmpdirname)
244
- logfile_path = Path(session_tmpdirname) / "log.txt"
245
- logfile_path.touch()
246
  with logfile_path.open("w") as log_file:
247
- # Create log file
248
- logfile_path.touch()
249
-
250
- from services.ffmpeg import ffmpeg_run
251
  ffmpeg_run(
252
- video_path = Path(filepath),
253
- output_path = gs_dir_path,
254
  fps = int(ffmpeg_fps),
255
  qscale = int(ffmpeg_qscale),
256
  stream_file=log_file
257
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
 
259
- from services.colmap import colmap
 
 
 
 
 
 
 
260
  colmap(
261
- source_path=gs_dir_path,
262
  camera=str(colmap_camera),
263
  stream_file=log_file
264
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
- print("Done with colmap")
267
-
268
- # Create a zip of the gs_dir_path folder
269
- print(gs_dir, gs_dir_path)
270
- print(gs_dir_path.name)
271
- archive = shutil.make_archive("result", 'zip', gs_dir, gs_dir_path)
272
- print('Created zip file', archive)
273
 
274
- # Move the zip file to the gs_dir_path folder
275
- shutil.move(archive, gs_dir_path)
276
 
277
- from services.gaussian_splatting_cuda import gaussian_splatting_cuda
 
 
278
  gaussian_splatting_cuda(
279
- data_path = gs_dir_path,
280
- output_path = gs_dir_path / "output",
281
  gs_command = str(Path(__file__).parent.absolute() / "build" / 'gaussian_splatting_cuda'),
282
- iterations = 100,
283
- convergence_rate = 0.01,
284
- resolution = 512,
285
  enable_cr_monitoring = False,
286
  force = False,
287
  empty_gpu_cache = False,
288
  stream_file = log_file
289
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
 
291
- except Exception:
292
- pass
293
- # print('Error - Removing temporary directory', session_tmpdirname)
294
- # shutil.rmtree(session_tmpdirname)
295
-
296
- def updateLog(session_state_value: StateDict) -> str:
297
  if session_state_value["uuid"] is None:
298
  return ""
299
 
300
- log_file = gs_dir / str(session_state_value['uuid']) / "log.txt"
301
  if not log_file.exists():
302
  return ""
303
 
@@ -306,117 +258,362 @@ def updateLog(session_state_value: StateDict) -> str:
306
 
307
  return logs
308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
  with gr.Blocks() as demo:
 
 
 
 
310
  session_state = gr.State({
311
  "uuid": None,
312
  })
313
 
314
- with gr.Row():
315
-
316
- with gr.Column():
317
- video_input = gr.PlayableVideo(
318
- format="mp4",
319
- source="upload",
320
- label="Upload a video",
321
- include_audio=False
322
- )
323
- with gr.Row(variant="panel"):
324
- ffmpeg_fps = gr.Number(
325
- label="FFMPEG FPE",
326
- value=1,
327
- minimum=1,
328
- maximum=5,
329
- step=0.10,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  )
331
- ffmpeg_qscale = gr.Number(
332
- label="FFMPEG QSCALE",
333
- value=1,
334
- minimum=1,
335
- maximum=5,
336
- step=1,
 
 
 
 
 
 
 
 
 
 
 
 
337
  )
338
- colmap_camera = gr.Dropdown(
339
- label="COLMAP Camera",
340
- value="OPENCV",
341
- choices=["OPENCV", "SIMPLE_PINHOLE", "PINHOLE", "SIMPLE_RADIAL", "RADIAL"],
342
- )
343
-
344
- text_log = gr.Textbox(
345
- label="Logs",
346
- info="Logs",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  interactive=False,
348
- show_copy_button=True
349
  )
350
- # text_log = gr.Code(
351
- # label="Logs",
352
- # language=None,
353
- # interactive=False,
354
- # )
355
 
356
-
357
- process_button = gr.Button("Process", visible=False)
358
- reset_button = gr.ClearButton(
359
- components=[video_input, text_log, ffmpeg_fps, ffmpeg_qscale, colmap_camera],
360
- label="Reset",
361
- visible=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  )
363
-
364
- process_event = process_button.click(
365
- fn=process,
366
- inputs=[session_state, video_input, ffmpeg_fps, ffmpeg_qscale, colmap_camera],
367
- outputs=[],
 
 
 
 
 
 
 
 
368
  )
369
 
370
- upload_event = video_input.upload(
371
- fn=makeButtonVisible,
372
- inputs=[],
373
- outputs=[process_button, reset_button]
374
- ).then(
 
 
 
 
 
 
375
  fn=createStateSession,
376
- inputs=[],
377
- outputs=[session_state],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
  ).then(
379
- fn=updateLog,
 
 
 
 
 
 
 
380
  inputs=[session_state],
381
- outputs=[text_log],
382
  every=2,
383
  )
384
 
385
- reset_button.click(
386
- fn=resetSession,
 
 
387
  inputs=[session_state],
388
- outputs=[session_state, process_button, reset_button],
389
- cancels=[process_event]
 
 
 
390
  )
391
-
392
- video_input.clear(
393
- fn=resetSession,
 
 
 
 
 
 
 
 
 
 
 
394
  inputs=[session_state],
395
- outputs=[session_state, process_button, reset_button],
396
- cancels=[process_event]
397
  )
398
 
399
- demo.close
400
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
 
402
  # gr.LoginButton, gr.LogoutButton
403
  # gr.HuggingFaceDatasetSaver
404
  # gr.OAuthProfile
405
-
406
-
407
-
408
-
409
-
410
 
411
  # with gr.Tab("jsdn"):
412
- # input_mic = gr.HTML(getHTML())
 
 
 
413
 
414
  demo.queue()
415
- # demo.launch()
416
 
417
  # mount Gradio app to FastAPI app
418
- app = gr.mount_gradio_app(app, demo, path="/")
419
 
420
 
421
- if __name__ == "__main__":
422
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
+ import os
2
  from pathlib import Path
3
  import shutil
4
  import tempfile
5
+ from typing import List
6
  import gradio as gr
7
  import uuid
8
  from typing_extensions import TypedDict, Tuple
9
 
10
  from fastapi import FastAPI
 
 
11
 
12
  app = FastAPI()
13
 
14
  # create a static directory to store the static files
15
+ GS_DIR = Path(str(tempfile.gettempdir())) / "gaussian_splatting_gradio"
16
+ GS_DIR.mkdir(parents=True, exist_ok=True)
 
 
 
17
 
18
  StateDict = TypedDict("StateDict", {
19
  "uuid": str,
20
  })
21
 
22
+ # http://localhost:7860/file=/tmp/gradio/c2110a7de804b39754d229de426dc9307bc03aea/page.svelte
23
+
24
+ HOST = "localhost"
25
+ PORT = 7860
26
+
27
+ home_markdown = """
28
+ ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  """
30
 
31
+ step1_markdown = """
32
+ # Step 1 - Split Video into Frames
 
 
 
33
 
34
+ In the journey of transforming a video into a 3D Gaussian Splatting, the initial step is the conversion of the video into individual frames. You can here provide a **video file** and specify how much image you want to extract per second (*fps*). The application will then automatically extract the frames from the video and prepare them for the next step in the process.
35
+
36
+ However, you can also do this step manually and upload the frames directory by yourself in the next step. In this case, you can skip this step and go directly to the next step.
37
+
38
+ Please not that blurry frames will mostlikely result in a bad 3D model. So, make sure that the video is clear enough.
39
  """
 
40
 
41
+ step2_markdown = """
42
+ # Step 2 - SfM using Colmap
43
+
44
+ In this step we use Colmap (https://github.com/colmap/colmap). This process utilizes the frames extracted from the uploaded video to generate camera parameters and a point cloud, which are essential components for the 3D Gaussian Splatting process.
45
+
46
+ This step could take a while depending on the number of frames and the resolution. So, please be patient.
47
+ You might want to do this step manually and upload the frames directory by yourself in the next step. In this case, you can skip this step and go directly to the next step.
48
+ """
49
+
50
+ step3_markdown = """
51
+ # Step 3 - 3D Gaussian Splatting
52
+
53
+ In this final step we use the 3D Gaussian Splatting Cuda implementation by MrNeRF (https://twitter.com/janusch_patas): https://github.com/MrNeRF/gaussian-splatting-cuda.
54
+ As it's quite rapid to train, you can easily use a high number of iterations.
55
+ """
56
+
57
+ def getPlyFile(session_state_value: StateDict) -> str:
58
+ return f"/tmp/gaussian_splatting_gradio/{session_state_value['uuid']}/output/final_point_cloud.ply"
59
+
60
+ def getCamerasFile(session_state_value: StateDict) -> str:
61
+ return f"/tmp/gaussian_splatting_gradio/{session_state_value['uuid']}/output/cameras.json"
62
+
63
+ def getZipFile(session_state_value: StateDict) -> str:
64
+ return f"/tmp/gaussian_splatting_gradio/{session_state_value['uuid']}/result.zip"
65
+
66
+ def makeResult(session_state_value: StateDict) -> tuple[str, str, str]:
67
+ ply_file = getPlyFile(session_state_value)
68
+ cameras_file = getCamerasFile(session_state_value)
69
+ zip_file = getZipFile(session_state_value)
70
+ return [ply_file, cameras_file, zip_file]
71
+
72
+
73
+ # Utility functions
74
+ def createStateSession(previous_session: StateDict) -> StateDict:
75
+ if previous_session["uuid"] is None:
76
+ # Create new session
77
+ session_uuid = str(uuid.uuid4())
78
+ print("Creating new session: ", session_uuid)
79
+ session_tmpdirname = GS_DIR / str(session_uuid)
80
+ session_tmpdirname.mkdir(parents=True, exist_ok=True)
81
+ print('Created temporary directory: ', session_tmpdirname)
82
+ session = StateDict(
83
+ uuid=session_uuid,
84
+ )
85
+ else:
86
+ # Use previous session
87
+ session = previous_session
88
+ return session
89
 
90
  def removeStateSession(session_state_value: StateDict):
91
  # Clean up previous session
92
+ session_uuid = session_state_value["uuid"]
93
+ session_tmpdirname = GS_DIR / str(session_uuid)
94
+ print('Removing temporary directory: ', session_tmpdirname)
95
+ shutil.rmtree(session_tmpdirname)
96
  return StateDict(
97
  uuid=None,
98
  )
99
 
100
+ def makeButtonVisible(btn_value: str) -> gr.Button:
101
+ return gr.Button(btn_value, visible=True)
102
+
103
+
104
+ # Process functions
105
+ def process_ffmpeg(
 
 
 
 
 
 
 
 
106
  session_state_value: StateDict,
107
+ ffmpeg_input: str,
108
  ffmpeg_fps: int,
109
  ffmpeg_qscale: int,
110
+ ) -> list[str]:
111
+ # Ensure that a session is active
112
  if session_state_value["uuid"] is None:
113
  return
 
 
 
 
 
 
114
 
115
+ # Set up session directory
116
+ session_path = GS_DIR / str(session_state_value['uuid'])
117
+ logfile_path = Path(session_path) / "ffmpeg_log.txt"
118
+ logfile_path.touch()
119
 
120
+ try:
121
+ from services.ffmpeg import ffmpeg_run
 
122
  with logfile_path.open("w") as log_file:
 
 
 
 
123
  ffmpeg_run(
124
+ video_path = Path(ffmpeg_input),
125
+ output_path = session_path,
126
  fps = int(ffmpeg_fps),
127
  qscale = int(ffmpeg_qscale),
128
  stream_file=log_file
129
  )
130
+ print("Done with ffmpeg")
131
+ except Exception as e:
132
+ print(f"Error - {e}")
133
+ # print('Error - Removing temporary directory', session_path)
134
+ # shutil.rmtree(session_path)
135
+ # Get the list of all the file of (session_path / "input")
136
+ list_of_jpgs = [str(f) for f in (session_path / "input").glob("*.jpg")]
137
+ return list_of_jpgs
138
+
139
+ def processColmap(
140
+ session_state_value: StateDict,
141
+ colmap_inputs: List[tempfile.NamedTemporaryFile],
142
+ colmap_camera: str,
143
+ enable_rerun: bool
144
+ ) -> Tuple[str, str]:
145
+ # Ensure that a session is active
146
+ if session_state_value["uuid"] is None:
147
+ return "", ""
148
+
149
+ # Set up session directory
150
+ session_path = GS_DIR / str(session_state_value['uuid'])
151
+ logfile_path = Path(session_path) / "colmap_log.txt"
152
+ logfile_path.touch()
153
+
154
+ rerunfile_path = Path(session_path) / "rerun_page.html"
155
+ rerunfile_path.touch()
156
 
157
+ (session_path / "input").mkdir(parents=True, exist_ok=True)
158
+ for file in colmap_inputs:
159
+ print("copying", file.name, "to", session_path / "input")
160
+ shutil.copy(file.name, session_path / "input")
161
+
162
+ try:
163
+ from services.colmap import colmap
164
+ with logfile_path.open("w") as log_file:
165
  colmap(
166
+ source_path=session_path,
167
  camera=str(colmap_camera),
168
  stream_file=log_file
169
  )
170
+ print("Done with colmap")
171
+
172
+ if enable_rerun:
173
+ from services.rerun import read_and_log_sparse_reconstruction
174
+ html = read_and_log_sparse_reconstruction(
175
+ exp_name = str(session_state_value['uuid']),
176
+ dataset_path = session_path,
177
+ )
178
+ print("Done with rerun")
179
+ else:
180
+ html = "Rerun was disable !"
181
+ with rerunfile_path.open("w") as rerunfile:
182
+ rerunfile.write(html)
183
+ except Exception as e:
184
+ print(f"Error - {e}")
185
+ # print('Error - Removing temporary directory', session_path)
186
+ # shutil.rmtree(session_path)
187
+
188
+ # zip the session_path folder
189
+ archive = shutil.make_archive("result", 'zip', GS_DIR, session_path)
190
+ print('Created zip file', archive)
191
+ return archive, rerunfile_path
192
+
193
+ def processGaussianSplattingCuda(
194
+ session_state_value: StateDict,
195
+ gs_input: tempfile.NamedTemporaryFile,
196
+ gs_iterations: int,
197
+ gs_convergence_rate: float,
198
+ gs_resolution: int,
199
+ ) -> Tuple[str, str]:
200
+ # Ensure that a session is active
201
+ if session_state_value["uuid"] is None:
202
+ return
203
+
204
+ # Set up session directory
205
+ session_path = GS_DIR / str(session_state_value['uuid'])
206
+ logfile_path = Path(session_path) / "gaussian_splatting_cuda_log.txt"
207
+ logfile_path.touch()
208
 
209
+ # Unzip the gs_input file to the session_path
210
+ shutil.unpack_archive(gs_input.name, session_path)
 
 
 
 
 
211
 
212
+ # Copy the gs_input directory to the session_path
213
+ # shutil.copytree(gs_input, session_path)
214
 
215
+ try:
216
+ from services.gaussian_splatting_cuda import gaussian_splatting_cuda
217
+ with logfile_path.open("w") as log_file:
218
  gaussian_splatting_cuda(
219
+ data_path = session_path,
220
+ output_path = session_path / "output",
221
  gs_command = str(Path(__file__).parent.absolute() / "build" / 'gaussian_splatting_cuda'),
222
+ iterations = int(gs_iterations),
223
+ convergence_rate = float(gs_convergence_rate),
224
+ resolution = int(gs_resolution),
225
  enable_cr_monitoring = False,
226
  force = False,
227
  empty_gpu_cache = False,
228
  stream_file = log_file
229
  )
230
+ print("Done with gaussian_splatting_cuda")
231
+
232
+ # Create a zip of the session_path folder
233
+ archive = shutil.make_archive("result", 'zip', GS_DIR, session_path)
234
+ print('Created zip file', archive)
235
+
236
+ # Move the zip file to the session_path folder
237
+ shutil.move(archive, session_path)
238
+ except Exception as e:
239
+ print(f"Error - {e}")
240
+ # print('Error - Removing temporary directory', session_path)
241
+ # shutil.rmtree(session_path)
242
+
243
+ return (
244
+ session_path / "output" / "final_point_cloud.ply",
245
+ session_path / "output" / "cameras.json",
246
+ )
247
 
248
+ def updateLog(logname:str, session_state_value: StateDict) -> str:
 
 
 
 
 
249
  if session_state_value["uuid"] is None:
250
  return ""
251
 
252
+ log_file = GS_DIR / str(session_state_value['uuid']) / f"{logname}.txt"
253
  if not log_file.exists():
254
  return ""
255
 
 
258
 
259
  return logs
260
 
261
+ def bindStep1Step2(step1_output: list[tempfile.NamedTemporaryFile]) -> list[str]:
262
+ return [file.name for file in step1_output]
263
+
264
+ def bindStep2Step3(step2_output: tempfile.NamedTemporaryFile) -> str:
265
+ return step2_output.name
266
+
267
+ def makeRerunIframe(rerun_html : tempfile.NamedTemporaryFile) -> str:
268
+ # If rerun_html is bigger than 300MB, then we don't show it
269
+ print(f"Rerun file size: {os.stat(rerun_html.name).st_size}")
270
+ if os.stat(rerun_html.name).st_size > 100_000_000:
271
+ print("Rerun file is too big, not showing it")
272
+ return ""
273
+ filepath = rerun_html.name
274
+ print("filepath", filepath)
275
+ return f"""<iframe src="/file={filepath}" width="100%"; height="1080px"></iframe>"""
276
+
277
  with gr.Blocks() as demo:
278
+ #############################
279
+ ########## State ############
280
+ #############################
281
+
282
  session_state = gr.State({
283
  "uuid": None,
284
  })
285
 
286
+ #############################
287
+ ###### UI Components ########
288
+ #############################
289
+
290
+ gr.Markdown("# Gaussian Splatting Kit")
291
+ gr.Markdown("Click on the **Duplicate** button to create a new instance of this app.")
292
+ duplicate_button = gr.DuplicateButton()
293
+ gr.Markdown(value=home_markdown)
294
+
295
+ with gr.Tab("Slit Video into Frames"):
296
+ step1_description = gr.Markdown(step1_markdown)
297
+ # Video Frames
298
+ with gr.Row():
299
+ # Video Frames - Inputs
300
+ with gr.Column():
301
+ # Video Frames - Inputs - Video File
302
+ step1_input = gr.PlayableVideo(
303
+ format="mp4",
304
+ source="upload",
305
+ label="Upload a video",
306
+ include_audio=False
307
+ )
308
+ # Video Frames - Inputs - Parameters
309
+ with gr.Row(variant="panel"):
310
+ # Video Frames - Inputs - Parameters - FFMPEG FPS
311
+ step1_fps = gr.Number(
312
+ label="FFMPEG Fps",
313
+ value=1,
314
+ minimum=1,
315
+ maximum=5,
316
+ step=0.10,
317
+ )
318
+ # Video Frames - Inputs - Parameters - FFMPEG Qscale
319
+ step1_qscale = gr.Number(
320
+ label="FFMPEG Qscale",
321
+ value=1,
322
+ minimum=1,
323
+ maximum=5,
324
+ step=1,
325
+ )
326
+ # Video Frames - Outputs
327
+ with gr.Column():
328
+ # Video Frames - Outputs - Video File
329
+ step1_output = gr.File(
330
+ label="Frames",
331
+ file_count="directory",
332
+ type="file",
333
+ interactive=False,
334
+ )
335
+ # Video Frames - Outputs - Logs
336
+ step1_logs = gr.Textbox(
337
+ label="Videos Logs",
338
+ interactive=False,
339
+ show_copy_button=True
340
  )
341
+ # Video Frames - Process Button
342
+ step1_processbtn = gr.Button("Process", visible=True)
343
+ # Video Frames - Visualize
344
+ # Video Frames - Visualize -
345
+ # step1_visualize_gallery = gr.Gallery()
346
+
347
+ with gr.Tab("Colmap"):
348
+ step2_description = gr.Markdown(step2_markdown)
349
+ # Colmap
350
+ with gr.Row():
351
+ # Colmap - Inputs
352
+ with gr.Column():
353
+ # Colmap - Inputs - Frames Directory
354
+ step2_input = gr.File(
355
+ label="Upload a frames directory",
356
+ file_count="directory",
357
+ type="file",
358
+ interactive=True,
359
  )
360
+ # Colmap - Inputs - Parameters
361
+ with gr.Row(variant="panel"):
362
+ # Colmap - Inputs - Parameters - Colmap Camera
363
+ step2_camera = gr.Dropdown(
364
+ label="COLMAP Camera",
365
+ value="OPENCV",
366
+ choices=["OPENCV", "SIMPLE_PINHOLE", "PINHOLE", "SIMPLE_RADIAL", "RADIAL"],
367
+ )
368
+ # Colmap - Inputs - Parameters - Enable Rerun
369
+ step2_rerun = gr.Checkbox(
370
+ value=True,
371
+ label="Enable Rerun",
372
+ )
373
+ # Colmap - Outputs
374
+ with gr.Column():
375
+ # Colmap - Outputs - Video File
376
+ step2_output = gr.File(
377
+ label="Colmap",
378
+ file_count="single",
379
+ file_types=[".zip"],
380
+ type="file",
381
+ interactive=False,
382
+ )
383
+ # Colmap - Outputs - Logs
384
+ step2_logs = gr.Textbox(
385
+ label="Colmap Logs",
386
+ interactive=False,
387
+ show_copy_button=True
388
+ )
389
+
390
+ # Colmap - Process Button
391
+ step2_processbtn = gr.Button("Process", visible=True)
392
+
393
+ # Colmap - Visualize
394
+ # Colmap - Visualize - Rerun HTML File
395
+ step_2_visualize_html = gr.File(
396
+ label="Rerun HTML",
397
+ file_count="single",
398
+ file_types=[".html"],
399
+ type="file",
400
  interactive=False,
401
+ visible=False
402
  )
403
+ # Colmap - Visualize - Rerun HTML
404
+ step_2_visualize = gr.HTML("Rerun", visible=True)
 
 
 
405
 
406
+ with gr.Tab("Gaussian Splatting"):
407
+ step3_description = gr.Markdown(step3_markdown)
408
+ # Gaussian Splatting
409
+ with gr.Row():
410
+ # Gaussian Splatting - Inputs
411
+ with gr.Column():
412
+ # Gaussian Splatting - Inputs - Colmap + Frames
413
+ step3_input = gr.File(
414
+ label="Upload a colmap + frames directory",
415
+ file_count="single",
416
+ file_types=[".zip"],
417
+ type="file",
418
+ interactive=True,
419
+ )
420
+ # Gaussian Splatting - Inputs - Parameters
421
+ with gr.Row(variant="panel"):
422
+ # Gaussian Splatting - Inputs - Parameters - GS Iterations
423
+ step3_iterations = gr.Number(
424
+ label="GS Iterations",
425
+ value=10_000,
426
+ minimum=1_000,
427
+ maximum=50_000,
428
+ step=1_000,
429
+ )
430
+ # Gaussian Splatting - Inputs - Parameters - GS Convergence Rate
431
+ step3_convergence_rate = gr.Number(
432
+ label="GS Convergence Rate",
433
+ value=0.01,
434
+ minimum=0.01,
435
+ maximum=1,
436
+ step=0.01,
437
+ )
438
+ # Gaussian Splatting - Inputs - Parameters - GS Resolution
439
+ step3_resolution = gr.Number(
440
+ label="GS Resolution",
441
+ value=512,
442
+ minimum=128,
443
+ maximum=1024,
444
+ step=128,
445
+ )
446
+ # Gaussian Splatting - Outputs
447
+ with gr.Column():
448
+ with gr.Row():
449
+ # Gaussian Splatting - Outputs - PLY File
450
+ step3_output1 = gr.File(
451
+ label="PLY File",
452
+ file_count="single",
453
+ type="file",
454
+ interactive=False,
455
+ )
456
+
457
+ # Gaussian Splatting - Outputs - Cameras File
458
+ step3_output2 = gr.File(
459
+ label="Cameras File",
460
+ file_count="single",
461
+ type="file",
462
+ interactive=False,
463
+ )
464
+ # Gaussian Splatting - Outputs - Logs
465
+ step3_logs = gr.Textbox(
466
+ label="Gaussian Splatting Logs",
467
+ interactive=False,
468
+ show_copy_button=True
469
+ )
470
+ # Gaussian Splatting - Process Button
471
+ step3_processbtn = gr.Button("Process", visible=True)
472
+ # Gaussian Splatting - Visualize
473
+ # Gaussian Splatting - Visualize - Antimatter15 HTML
474
+ # step_3_visualize = gr.HTML(getAntimatter15HTML(), visible=True)
475
+ step_3_visualize = gr.Button("Visualize", visible=True, link="https://antimatter15.com/splat/")
476
+
477
+ #############################
478
+ ########## Events ###########
479
+ #############################
480
+ ### Step 1
481
+ # Make the process button visible when a video is uploaded
482
+ step1_upload_event = step1_input.upload(
483
+ fn=createStateSession,
484
+ inputs=[session_state],
485
+ outputs=[session_state]
486
+ ).success(
487
+ fn=makeButtonVisible,
488
+ inputs=[step1_processbtn],
489
+ outputs=[step1_processbtn],
490
  )
491
+ # Do the processing when the process button is clicked
492
+ step1_processevent = step1_processbtn.click(
493
+ fn=process_ffmpeg,
494
+ inputs=[session_state, step1_input, step1_fps, step1_qscale],
495
+ outputs=[step1_output],
496
+ ).success(
497
+ fn=bindStep1Step2,
498
+ inputs=[step1_output],
499
+ outputs=[step2_input],
500
+ ).success(
501
+ fn=makeButtonVisible,
502
+ inputs=[step2_processbtn],
503
+ outputs=[step2_processbtn],
504
  )
505
 
506
+ # Update the logs every 2 seconds
507
+ step1_logsevent = step1_processbtn.click(
508
+ fn=lambda session: updateLog("ffmpeg_log", session),
509
+ inputs=[session_state],
510
+ outputs=[step1_logs],
511
+ every=2,
512
+ )
513
+
514
+ ## Step 2
515
+ # Make the process button visible when a video is uploaded
516
+ step2_upload_event = step2_input.upload(
517
  fn=createStateSession,
518
+ inputs=[session_state],
519
+ outputs=[session_state]
520
+ ).success(
521
+ fn=makeButtonVisible,
522
+ inputs=[step2_processbtn],
523
+ outputs=[step2_processbtn],
524
+ )
525
+ # Do the processing when the process button is clicked
526
+ step2_processevent = step2_processbtn.click(
527
+ fn=processColmap,
528
+ inputs=[session_state, step2_input, step2_camera, step2_rerun],
529
+ outputs=[step2_output, step_2_visualize_html]
530
+ ).success(
531
+ fn=bindStep2Step3,
532
+ inputs=[step2_output],
533
+ outputs=[step3_input],
534
+ ).success(
535
+ fn=makeButtonVisible,
536
+ inputs=[step3_processbtn],
537
+ outputs=[step3_processbtn],
538
  ).then(
539
+ fn=makeRerunIframe,
540
+ inputs=[step_2_visualize_html],
541
+ outputs=[step_2_visualize],
542
+ )
543
+
544
+ # Update the logs every 2 seconds
545
+ step2_logsevent = step2_processbtn.click(
546
+ fn=lambda session: updateLog("colmap_log", session),
547
  inputs=[session_state],
548
+ outputs=[step2_logs],
549
  every=2,
550
  )
551
 
552
+ ## Step 3
553
+ # Make the process button visible when a video is uploaded
554
+ step3_upload_event = step3_input.upload(
555
+ fn=createStateSession,
556
  inputs=[session_state],
557
+ outputs=[session_state]
558
+ ).success(
559
+ fn=makeButtonVisible,
560
+ inputs=[step3_processbtn],
561
+ outputs=[step3_processbtn],
562
  )
563
+ # Do the processing when the process button is clicked
564
+ step3_processevent = step3_processbtn.click(
565
+ fn=processGaussianSplattingCuda,
566
+ inputs=[session_state, step3_input, step3_iterations, step3_convergence_rate, step3_resolution],
567
+ outputs=[step3_output1, step3_output2]
568
+ )
569
+ # .success(
570
+ # fn=lambda x: x,
571
+ # inputs=[step3_output1, step3_output2],
572
+ # outputs=[],
573
+ # )
574
+ # Update the logs every 2 seconds
575
+ step3_logsevent = step3_processbtn.click(
576
+ fn=lambda session: updateLog("gaussian_splatting_cuda_log", session),
577
  inputs=[session_state],
578
+ outputs=[step3_logs],
579
+ every=2,
580
  )
581
 
582
+ # reset_button = gr.ClearButton(
583
+ # components=[video_input, text_log, ffmpeg_fps, ffmpeg_qscale, colmap_camera],
584
+ # label="Reset",
585
+ # visible=False,
586
+ # )
587
+ # print(f"async (x) => {{ {getJS(url='http://0.0.0.0:7860/output/37c7ae54-7752-4e7b-8ba9-bab32c86b316/output/point_cloud/iteration_100/point_cloud.ply')} }}")
588
+
589
+ # show_button.click(
590
+ # fn=None,
591
+ # inputs=[],
592
+ # outputs=[],
593
+ # _js=f"async (x) => {{ {getJS(url='http://0.0.0.0:7860/output/37c7ae54-7752-4e7b-8ba9-bab32c86b316/output/point_cloud/iteration_100/point_cloud.ply')} }}"
594
+ # ).then(
595
+ # fn=None,
596
+ # inputs=[],
597
+ # outputs=[],
598
+ # _js=f"async (x) => {{ {getJS(url='http://0.0.0.0:7860/output/37c7ae54-7752-4e7b-8ba9-bab32c86b316/output/point_cloud/iteration_100/point_cloud.ply')} }}"
599
+ # )
600
 
601
  # gr.LoginButton, gr.LogoutButton
602
  # gr.HuggingFaceDatasetSaver
603
  # gr.OAuthProfile
 
 
 
 
 
604
 
605
  # with gr.Tab("jsdn"):
606
+ # input_mic = gr.HTML(getRerunHTML())
607
+
608
+
609
+
610
 
611
  demo.queue()
612
+ demo.launch()
613
 
614
  # mount Gradio app to FastAPI app
615
+ # app = gr.mount_gradio_app(app, demo, path="/")
616
 
617
 
618
+ # if __name__ == "__main__":
619
+ # uvicorn.run(app, host="0.0.0.0", port=7860, ws_max_size=16777216*1000)
services/gaussian_splatting_cuda.py CHANGED
@@ -3,6 +3,8 @@ from pathlib import Path
3
  import subprocess
4
  from typing import Optional
5
  from rich.console import Console
 
 
6
 
7
  console = Console()
8
 
@@ -45,10 +47,16 @@ def gaussian_splatting_cuda_training(
45
  Set custom average onvergence rate for the training process. Requires the flag --enable-cr-monitoring to be set.
46
  """
47
 
 
 
 
 
 
 
48
  cmd = [
49
  gs_command,
50
- f"--data-path={data_path.as_posix()}"
51
- f"--output-path={output_path.as_posix()}"
52
  f"--iter={iterations}",
53
  # TODO: Enable these options and put the right defaults in the function signature
54
  # f"--convergence-rate={convergence_rate}",
@@ -105,4 +113,12 @@ def gaussian_splatting_cuda(
105
  force,
106
  empty_gpu_cache,
107
  stream_file
108
- )
 
 
 
 
 
 
 
 
 
3
  import subprocess
4
  from typing import Optional
5
  from rich.console import Console
6
+ import os
7
+ import shutil
8
 
9
  console = Console()
10
 
 
47
  Set custom average onvergence rate for the training process. Requires the flag --enable-cr-monitoring to be set.
48
  """
49
 
50
+ # export LC_ALL=C
51
+ # export LANG=C
52
+ os.environ["LC_ALL"] = "C"
53
+ os.environ["LANG"] = "C"
54
+
55
+
56
  cmd = [
57
  gs_command,
58
+ f"--data-path={data_path.as_posix()}",
59
+ f"--output-path={output_path.as_posix()}",
60
  f"--iter={iterations}",
61
  # TODO: Enable these options and put the right defaults in the function signature
62
  # f"--convergence-rate={convergence_rate}",
 
113
  force,
114
  empty_gpu_cache,
115
  stream_file
116
+ )
117
+
118
+ # Copy the /output/point_cloud/iteration_{iteration}/point_cloud.ply to the output_path
119
+ shutil.copyfile(
120
+ src=output_path / "point_cloud" / f"iteration_{iterations}" / "point_cloud.ply",
121
+ dst=output_path / "final_point_cloud.ply"
122
+ )
123
+
124
+ console.log(f"📄 Final point cloud saved to {output_path / 'final_point_cloud.ply'}")
services/rerun.py CHANGED
@@ -1,85 +1,103 @@
1
  import os
2
  import re
3
  from pathlib import Path
 
4
 
5
  import numpy as np
6
  import rerun as rr # pip install rerun-sdk
7
- from utils.read_write_model import read_model
8
 
9
  # From https://github.com/rerun-io/rerun/tree/main/examples/python/structure_from_motion
10
  def read_and_log_sparse_reconstruction(
11
  exp_name: str,
12
  dataset_path: Path,
13
- output_path: Path,
14
- filter_output: bool = False,
15
- filter_min_visible: int = 2_000
16
- ) -> None:
17
- rr.init(exp_name)
18
-
19
- cameras, images, points3D = read_model(dataset_path / "sparse", ext=".bin")
20
-
21
- if filter_output:
22
- # Filter out noisy points
23
- points3D = {id: point for id, point in points3D.items() if point.rgb.any() and len(point.image_ids) > 4}
24
-
25
- rr.log_view_coordinates("/", up="-Y", timeless=True)
26
-
27
- # Iterate through images (video frames) logging data related to each frame.
28
- for image in sorted(images.values(), key=lambda im: im.name): # type: ignore[no-any-return]
29
- image_file = dataset_path / "images" / image.name
30
-
31
- if not os.path.exists(image_file):
32
- continue
33
-
34
- # COLMAP sets image ids that don't match the original video frame
35
- idx_match = re.search(r"\d+", image.name)
36
- assert idx_match is not None
37
- frame_idx = int(idx_match.group(0))
38
-
39
- quat_xyzw = image.qvec[[1, 2, 3, 0]] # COLMAP uses wxyz quaternions
40
- camera = cameras[image.camera_id]
41
- np.array([1.0, 1.0])
42
-
43
- visible = [id != -1 and points3D.get(id) is not None for id in image.point3D_ids]
44
- visible_ids = image.point3D_ids[visible]
45
-
46
- if filter_output and len(visible_ids) < filter_min_visible:
47
- continue
48
-
49
- visible_xyzs = [points3D[id] for id in visible_ids]
50
- visible_xys = image.xys[visible]
51
-
52
- rr.set_time_sequence("frame", frame_idx)
53
-
54
- points = [point.xyz for point in visible_xyzs]
55
- point_colors = [point.rgb for point in visible_xyzs]
56
- point_errors = [point.error for point in visible_xyzs]
57
-
58
- rr.log_scalar("plot/avg_reproj_err", np.mean(point_errors), color=[240, 45, 58])
59
-
60
- rr.log_points("points", points, colors=point_colors, ext={"error": point_errors})
61
-
62
- # COLMAP's camera transform is "camera from world"
63
- rr.log_transform3d(
64
- "camera", rr.TranslationRotationScale3D(image.tvec, rr.Quaternion(xyzw=quat_xyzw)), from_parent=True
65
- )
66
- rr.log_view_coordinates("camera", xyz="RDF") # X=Right, Y=Down, Z=Forward
67
-
68
- # Log camera intrinsics
69
- assert camera.model == "PINHOLE"
70
- rr.log_pinhole(
71
- "camera/image",
72
- width=camera.width,
73
- height=camera.height,
74
- focal_length_px=camera.params[:2],
75
- principal_point_px=camera.params[2:],
76
- )
77
-
78
- rr.log_image_file("camera/image", img_path=dataset_path / "images" / image.name)
79
- rr.log_points("camera/image/keypoints", visible_xys, colors=[34, 138, 167])
80
-
81
- rerun_output_directory = output_path / "rerun"
82
- rerun_output_directory.mkdir(parents=True, exist_ok=True)
83
- rerun_output_file = rerun_output_directory / "recording.rrd"
84
- rr.save(rerun_output_file.as_posix())
85
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import re
3
  from pathlib import Path
4
+ from typing import Optional
5
 
6
  import numpy as np
7
  import rerun as rr # pip install rerun-sdk
8
+ from services.utils.read_write_model import read_model
9
 
10
  # From https://github.com/rerun-io/rerun/tree/main/examples/python/structure_from_motion
11
  def read_and_log_sparse_reconstruction(
12
  exp_name: str,
13
  dataset_path: Path,
14
+ max_image_number: Optional[int] = 15,
15
+ filter_output: bool = True,
16
+ filter_min_visible: int = 50,
17
+ filter_max_visible: int = 500
18
+ ) -> str:
19
+ try:
20
+ rr.init(exp_name)
21
+ rec = rr.memory_recording()
22
+
23
+ cameras, images, points3D = read_model(dataset_path / "sparse", ext=".bin")
24
+ print(f"Loaded {len(cameras)} cameras, {len(images)} images, and {len(points3D)} points3D")
25
+ if filter_output:
26
+ # Filter out noisy points
27
+ points3D = {id: point for id, point in points3D.items() if point.rgb.any() and len(point.image_ids) > 4}
28
+
29
+ rr.log_view_coordinates("/", up="-Y", timeless=True)
30
+ print(f"Number of image frames: {len(images)}")
31
+ if max_image_number is not None:
32
+ # Sample the image sequence to reduce output size
33
+ image_ids = sorted(images.keys())
34
+ image_ids = np.random.permutation(image_ids)[:max_image_number]
35
+ images = {id: images[id] for id in image_ids}
36
+ print(f"Number of image frames: {len(images)}")
37
+
38
+ # Iterate through images (video frames) logging data related to each frame.
39
+ for image in sorted(images.values(), key=lambda im: im.name): # type: ignore[no-any-return]
40
+ image_file = dataset_path / "images" / image.name
41
+
42
+ if not os.path.exists(image_file):
43
+ continue
44
+
45
+ # COLMAP sets image ids that don't match the original video frame
46
+ idx_match = re.search(r"\d+", image.name)
47
+ assert idx_match is not None
48
+ frame_idx = int(idx_match.group(0))
49
+
50
+ quat_xyzw = image.qvec[[1, 2, 3, 0]] # COLMAP uses wxyz quaternions
51
+ camera = cameras[image.camera_id]
52
+ np.array([1.0, 1.0])
53
+
54
+ visible = [id != -1 and points3D.get(id) is not None for id in image.point3D_ids]
55
+
56
+ print(f"Frame {frame_idx} has {np.sum(visible)} visible points")
57
+ # Randomly sample points to reduce output size
58
+ if filter_output and np.sum(visible) > filter_max_visible:
59
+ visible_indices = np.random.permutation(np.where(visible)[0])[:filter_max_visible]
60
+ visible = np.zeros_like(visible, dtype=bool)
61
+ visible[visible_indices] = True
62
+ print(f"Frame {frame_idx} has {np.sum(visible)} visible points after sampling")
63
+
64
+ visible_ids = image.point3D_ids[visible]
65
+
66
+ if filter_output and len(visible_ids) < filter_min_visible:
67
+ continue
68
+
69
+ visible_xyzs = [points3D[id] for id in visible_ids]
70
+ visible_xys = image.xys[visible]
71
+
72
+ rr.set_time_sequence("frame", frame_idx)
73
+
74
+ points = [point.xyz for point in visible_xyzs]
75
+ point_colors = [point.rgb for point in visible_xyzs]
76
+ point_errors = [point.error for point in visible_xyzs]
77
+
78
+ rr.log_scalar("plot/avg_reproj_err", np.mean(point_errors), color=[240, 45, 58])
79
+
80
+ rr.log_points("points", points, colors=point_colors, ext={"error": point_errors})
81
+
82
+ # COLMAP's camera transform is "camera from world"
83
+ rr.log_transform3d(
84
+ "camera", rr.TranslationRotationScale3D(image.tvec, rr.Quaternion(xyzw=quat_xyzw)), from_parent=True
85
+ )
86
+ rr.log_view_coordinates("camera", xyz="RDF") # X=Right, Y=Down, Z=Forward
87
+
88
+ # Log camera intrinsics
89
+ assert camera.model == "PINHOLE"
90
+ rr.log_pinhole(
91
+ "camera/image",
92
+ width=camera.width,
93
+ height=camera.height,
94
+ focal_length_px=camera.params[:2],
95
+ principal_point_px=camera.params[2:],
96
+ )
97
+
98
+ rr.log_image_file("camera/image", img_path=dataset_path / "images" / image.name)
99
+ rr.log_points("camera/image/keypoints", visible_xys, colors=[34, 138, 167])
100
+
101
+ return rec.as_html()
102
+ except Exception as e:
103
+ print(e)