Chao Xu commited on
Commit
1d24bdc
β€’
1 Parent(s): 22945de

support .glb export and update README

Browse files
Files changed (2) hide show
  1. README.md +20 -4
  2. app.py +11 -5
README.md CHANGED
@@ -1,13 +1,29 @@
1
  ---
2
- title: One 2 3 45
3
- emoji: πŸ“ˆ
4
  colorFrom: red
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.36.1
8
  app_file: app.py
9
- pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: One-2-3-45
3
+ emoji: πŸ“ΈπŸš€πŸŒŸ
4
  colorFrom: red
5
  colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.36.1
8
  app_file: app.py
9
+ pinned: true
10
  license: mit
11
  ---
12
 
13
+ # One-2-3-45: Any Single Image to 3D Mesh in 45 Seconds without Per-Shape Optimization
14
+
15
+ Paper: https://arxiv.org/abs/2306.16928
16
+ Code: https://github.com/One-2-3-45/One-2-3-45
17
+
18
+ ## BibTeX
19
+
20
+ ```bibtex
21
+ @misc{liu2023one2345,
22
+ title={One-2-3-45: Any Single Image to 3D Mesh in 45 Seconds without Per-Shape Optimization},
23
+ author={Minghua Liu and Chao Xu and Haian Jin and Linghao Chen and Mukund Varma T and Zexiang Xu and Hao Su},
24
+ year={2023},
25
+ eprint={2306.16928},
26
+ archivePrefix={arXiv},
27
+ primaryClass={cs.CV}
28
+ }
29
+ ```
app.py CHANGED
@@ -348,7 +348,7 @@ def stage1_run(models, device, cam_vis, tmp_dir,
348
  return (rerun_all, *reset, *outputs)
349
 
350
  def stage2_run(models, device, tmp_dir,
351
- elev, scale, rerun_all=[], stage2_steps=50):
352
  flag_lower_cam = 90-int(elev["label"]) <= 75
353
  is_rerun = True if rerun_all else False
354
  model = models['turncam'].half()
@@ -373,7 +373,8 @@ def stage2_run(models, device, tmp_dir,
373
  os.chdir(main_dir_path)
374
 
375
  ply_path = os.path.join(tmp_dir, f"meshes_val_bg/lod0/mesh_00215000_gradio_lod0.ply")
376
- mesh_path = os.path.join(tmp_dir, "mesh.obj")
 
377
  # Read the textured mesh from .ply file
378
  mesh = trimesh.load_mesh(ply_path)
379
  axis = [1, 0, 0]
@@ -388,7 +389,10 @@ def stage2_run(models, device, tmp_dir,
388
  mesh.vertices[:, 0] = -mesh.vertices[:, 0]
389
  mesh.faces = np.fliplr(mesh.faces)
390
  # Export the mesh as .obj file with colors
391
- mesh.export(mesh_path, file_type='obj', include_color=True)
 
 
 
392
  torch.cuda.empty_cache()
393
 
394
  if not is_rerun:
@@ -523,6 +527,8 @@ def run_demo(
523
  label='Diffusion guidance scale')
524
  steps_slider = gr.Slider(5, 200, value=75, step=5,
525
  label='Number of diffusion inference steps')
 
 
526
 
527
  run_btn = gr.Button('Run Generation', variant='primary', interactive=False)
528
  guide_text = gr.Markdown(_USER_GUIDE, visible=True)
@@ -642,7 +648,7 @@ def run_demo(
642
  outputs=[elev_output, vis_output, *views]
643
  ).success(fn=partial(update_guide, _GEN_2), outputs=[guide_text], queue=False
644
  ).success(fn=partial(stage2_run, models, device),
645
- inputs=[tmp_dir, elev_output, scale_slider],
646
  outputs=[mesh_output]
647
  ).success(fn=partial(update_guide, _DONE), outputs=[guide_text], queue=False)
648
 
@@ -652,7 +658,7 @@ def run_demo(
652
  outputs=[rerun_idx, *btn_retrys, *views]
653
  ).success(fn=partial(update_guide, _REGEN_1), outputs=[guide_text], queue=False)
654
  regen_mesh_btn.click(fn=partial(stage2_run, models, device),
655
- inputs=[tmp_dir, elev_output, scale_slider, rerun_idx],
656
  outputs=[mesh_output, rerun_idx, regen_view_btn, regen_mesh_btn]
657
  ).success(fn=partial(update_guide, _REGEN_2), outputs=[guide_text], queue=False)
658
 
 
348
  return (rerun_all, *reset, *outputs)
349
 
350
  def stage2_run(models, device, tmp_dir,
351
+ elev, scale, is_glb=False, rerun_all=[], stage2_steps=50):
352
  flag_lower_cam = 90-int(elev["label"]) <= 75
353
  is_rerun = True if rerun_all else False
354
  model = models['turncam'].half()
 
373
  os.chdir(main_dir_path)
374
 
375
  ply_path = os.path.join(tmp_dir, f"meshes_val_bg/lod0/mesh_00215000_gradio_lod0.ply")
376
+ mesh_ext = ".glb" if is_glb else ".obj"
377
+ mesh_path = os.path.join(tmp_dir, f"mesh{mesh_ext}")
378
  # Read the textured mesh from .ply file
379
  mesh = trimesh.load_mesh(ply_path)
380
  axis = [1, 0, 0]
 
389
  mesh.vertices[:, 0] = -mesh.vertices[:, 0]
390
  mesh.faces = np.fliplr(mesh.faces)
391
  # Export the mesh as .obj file with colors
392
+ if not is_glb:
393
+ mesh.export(mesh_path, file_type='obj', include_color=True)
394
+ else:
395
+ mesh.export(mesh_path, file_type='glb')
396
  torch.cuda.empty_cache()
397
 
398
  if not is_rerun:
 
527
  label='Diffusion guidance scale')
528
  steps_slider = gr.Slider(5, 200, value=75, step=5,
529
  label='Number of diffusion inference steps')
530
+ glb_chk = gr.Checkbox(
531
+ False, label='Export the mesh in .glb format')
532
 
533
  run_btn = gr.Button('Run Generation', variant='primary', interactive=False)
534
  guide_text = gr.Markdown(_USER_GUIDE, visible=True)
 
648
  outputs=[elev_output, vis_output, *views]
649
  ).success(fn=partial(update_guide, _GEN_2), outputs=[guide_text], queue=False
650
  ).success(fn=partial(stage2_run, models, device),
651
+ inputs=[tmp_dir, elev_output, scale_slider, glb_chk],
652
  outputs=[mesh_output]
653
  ).success(fn=partial(update_guide, _DONE), outputs=[guide_text], queue=False)
654
 
 
658
  outputs=[rerun_idx, *btn_retrys, *views]
659
  ).success(fn=partial(update_guide, _REGEN_1), outputs=[guide_text], queue=False)
660
  regen_mesh_btn.click(fn=partial(stage2_run, models, device),
661
+ inputs=[tmp_dir, elev_output, scale_slider, glb_chk, rerun_idx],
662
  outputs=[mesh_output, rerun_idx, regen_view_btn, regen_mesh_btn]
663
  ).success(fn=partial(update_guide, _REGEN_2), outputs=[guide_text], queue=False)
664