Spaces:
Runtime error
Runtime error
jiaweir
commited on
Commit
β’
b73b3dd
1
Parent(s):
cdc7dcc
optimize
Browse files- app.py +5 -5
- configs/4d_demo.yaml +1 -1
- lgm/core/models.py +2 -1
app.py
CHANGED
@@ -224,8 +224,8 @@ def optimize_stage_2(image_block: Image.Image, seed_slider: int):
|
|
224 |
process_dg4d(os.path.join("configs", "4d_demo.yaml"), os.path.join("tmp_data", f"{img_hash}_rgba.png"), guidance_zero123)
|
225 |
# os.rename(os.path.join('logs', f'{img_hash}_rgba_frames'), os.path.join('logs', f'{img_hash}_{seed_slider:03d}_rgba_frames'))
|
226 |
image_dir = os.path.join('logs', f'{img_hash}_rgba_frames')
|
227 |
-
|
228 |
-
return [image_dir+f'/{t:03d}.ply' for t in range(28)]
|
229 |
|
230 |
|
231 |
if __name__ == "__main__":
|
@@ -287,8 +287,8 @@ if __name__ == "__main__":
|
|
287 |
dirving_video = gr.Video(label="video",height=290)
|
288 |
with gr.Column(scale=5):
|
289 |
obj3d = gr.Video(label="3D Model",height=290)
|
290 |
-
|
291 |
-
obj4d = Model4DGS(label="4D Model", height=500, fps=
|
292 |
|
293 |
|
294 |
img_run_btn.click(check_img_input, inputs=[image_block], queue=False).success(optimize_stage_0,
|
@@ -304,7 +304,7 @@ if __name__ == "__main__":
|
|
304 |
seed_slider2],
|
305 |
outputs=[
|
306 |
obj3d])
|
307 |
-
fourd_run_btn.click(check_video_3d_input, inputs=[image_block], queue=False).success(optimize_stage_2, inputs=[image_block, seed_slider], outputs=[obj4d])
|
308 |
|
309 |
# demo.queue().launch(share=True)
|
310 |
demo.queue(max_size=10) # <-- Sets up a queue with default parameters
|
|
|
224 |
process_dg4d(os.path.join("configs", "4d_demo.yaml"), os.path.join("tmp_data", f"{img_hash}_rgba.png"), guidance_zero123)
|
225 |
# os.rename(os.path.join('logs', f'{img_hash}_rgba_frames'), os.path.join('logs', f'{img_hash}_{seed_slider:03d}_rgba_frames'))
|
226 |
image_dir = os.path.join('logs', f'{img_hash}_rgba_frames')
|
227 |
+
return os.path.join('vis_data', f'{img_hash}_rgba.mp4'), [image_dir+f'/{t:03d}.ply' for t in range(28)]
|
228 |
+
# return [image_dir+f'/{t:03d}.ply' for t in range(28)]
|
229 |
|
230 |
|
231 |
if __name__ == "__main__":
|
|
|
287 |
dirving_video = gr.Video(label="video",height=290)
|
288 |
with gr.Column(scale=5):
|
289 |
obj3d = gr.Video(label="3D Model",height=290)
|
290 |
+
video4d = gr.Video(label="4D video",height=290)
|
291 |
+
obj4d = Model4DGS(label="4D Model", height=500, fps=28)
|
292 |
|
293 |
|
294 |
img_run_btn.click(check_img_input, inputs=[image_block], queue=False).success(optimize_stage_0,
|
|
|
304 |
seed_slider2],
|
305 |
outputs=[
|
306 |
obj3d])
|
307 |
+
fourd_run_btn.click(check_video_3d_input, inputs=[image_block], queue=False).success(optimize_stage_2, inputs=[image_block, seed_slider], outputs=[video4d, obj4d])
|
308 |
|
309 |
# demo.queue().launch(share=True)
|
310 |
demo.queue(max_size=10) # <-- Sets up a queue with default parameters
|
configs/4d_demo.yaml
CHANGED
@@ -30,7 +30,7 @@ lambda_svd: 0
|
|
30 |
# training batch size per iter
|
31 |
batch_size: 7
|
32 |
# training iterations for stage 1
|
33 |
-
iters:
|
34 |
# training iterations for stage 2
|
35 |
iters_refine: 50
|
36 |
# training camera radius
|
|
|
30 |
# training batch size per iter
|
31 |
batch_size: 7
|
32 |
# training iterations for stage 1
|
33 |
+
iters: 300
|
34 |
# training iterations for stage 2
|
35 |
iters_refine: 50
|
36 |
# training camera radius
|
lgm/core/models.py
CHANGED
@@ -155,7 +155,8 @@ class LGM(nn.Module):
|
|
155 |
gaussians_orig_res = torch.cat([pos, opacity, scale, rotation, rgbs], dim=-1) # [B, N, 14]
|
156 |
|
157 |
|
158 |
-
return gaussians, gaussians_orig_res
|
|
|
159 |
|
160 |
|
161 |
def forward(self, data, step_ratio=1):
|
|
|
155 |
gaussians_orig_res = torch.cat([pos, opacity, scale, rotation, rgbs], dim=-1) # [B, N, 14]
|
156 |
|
157 |
|
158 |
+
# return gaussians, gaussians_orig_res
|
159 |
+
return gaussians_orig_res, gaussians_orig_res
|
160 |
|
161 |
|
162 |
def forward(self, data, step_ratio=1):
|