perler commited on
Commit
8fee2d3
1 Parent(s): 4bc9faf

towards PPSurf model, checking UI

Browse files
Files changed (2) hide show
  1. app.py +96 -51
  2. model.py +0 -93
app.py CHANGED
@@ -4,91 +4,136 @@ from __future__ import annotations
4
 
5
  import os
6
  import datetime
 
7
 
8
  import gradio as gr
9
  import spaces
10
 
11
 
12
  @spaces.GPU(duration=60 * 3)
13
- def run_on_gpu(input_shape, text, seed, guidance_scale):
14
- print('Starting inference at {}'.format(datetime.datetime.now()))
15
- from model import Model
16
- model = Model()
17
- res_generator = model.run(shape_path=input_shape, text=text, seed=seed, guidance_scale=guidance_scale)
 
 
 
 
18
  print('Finished inference at {}'.format(datetime.datetime.now()))
19
- return list(res_generator)
20
 
 
21
 
22
 
23
  def main():
24
- DESCRIPTION = '''# [TEXTure](https://github.com/TEXTurePaper/TEXTurePaper)
25
 
26
- - This demo only accepts as input `.obj` files with less than 100,000 faces.
27
- - Inference takes about 10 minutes on a T4 GPU.
 
 
 
 
 
 
28
  '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
30
- DESCRIPTION += (f'\n<p>For faster inference without waiting in queue, '
31
  f'you may duplicate the space and upgrade to GPU in settings. '
32
  f'<a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true">'
33
  f'<img style="display: inline; margin-top: 0em; margin-bottom: 0em" '
34
  f'src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>')
35
 
36
  with gr.Blocks(css='style.css') as demo:
37
- gr.Markdown(DESCRIPTION)
38
  with gr.Row():
39
  with gr.Column():
40
- input_shape = gr.Model3D(label='Input 3D mesh')
41
- text = gr.Text(label='Text')
42
- seed = gr.Slider(label='Seed',
43
- minimum=0,
44
- maximum=100000,
45
- value=3,
46
- step=1)
47
- guidance_scale = gr.Slider(label='Guidance scale',
48
- minimum=0,
49
- maximum=50,
50
- value=7.5,
51
- step=0.1)
52
- run_button = gr.Button('Run')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  with gr.Column():
54
  progress_text = gr.Text(label='Progress')
55
  with gr.Tabs():
56
- with gr.TabItem(label='Images from each viewpoint'):
57
- viewpoint_images = gr.Gallery(show_label=False, columns=4)
58
- with gr.TabItem(label='Result 3D model'):
59
  result_3d_model = gr.Model3D(show_label=False)
60
  with gr.TabItem(label='Output mesh file'):
61
  output_file = gr.File(show_label=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  with gr.Row():
63
- examples = [
64
- ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5],
65
- ['shapes/dragon2.obj', 'a photo of a dragon', 0, 7.5],
66
- ['shapes/eagle.obj', 'a photo of an eagle', 0, 7.5],
67
- ['shapes/napoleon.obj', 'a photo of Napoleon Bonaparte', 3, 7.5],
68
- ['shapes/nascar.obj', 'A next gen nascar', 2, 10],
69
- ]
70
- gr.Examples(examples=examples,
71
- inputs=[
72
- input_shape,
73
- text,
74
- seed,
75
- guidance_scale,
76
- ],
77
- outputs=[
78
- result_3d_model,
79
- output_file,
80
- ],
81
- cache_examples=False)
82
 
83
  run_button.click(fn=run_on_gpu,
84
  inputs=[
85
- input_shape,
86
- text,
87
- seed,
88
- guidance_scale,
 
89
  ],
90
  outputs=[
91
- viewpoint_images,
92
  result_3d_model,
93
  output_file,
94
  progress_text,
 
4
 
5
  import os
6
  import datetime
7
+ import subprocess
8
 
9
  import gradio as gr
10
  import spaces
11
 
12
 
13
  @spaces.GPU(duration=60 * 3)
14
+ def run_on_gpu(input_shape):
15
+ print('Started inference at {}'.format(datetime.datetime.now()))
16
+ call_base = ['python', 'ppsurf/pps.py', 'rec']
17
+ call_args = ['pps.py',
18
+ 'rec',
19
+ 'data/{}'.format(input_shape),
20
+ 'results/rec/{}'.format(input_shape),
21
+ ]
22
+ res = subprocess.check_output(call_base + call_args)
23
  print('Finished inference at {}'.format(datetime.datetime.now()))
 
24
 
25
+ return res
26
 
27
 
28
  def main():
29
+ description = '''# [PPSurf](https://github.com/cg-tuwien/ppsurf)
30
 
31
+ Supported file formats: PLY, STL, OBJ and other mesh files, XYZ as whitespace-separated text file,
32
+ NPY and NPZ (key='arr_0'), LAS and LAZ (version 1.0-1.4), COPC and CRS.
33
+ Best results for 50k-250k points.
34
+
35
+ This method is meant for scans of single and few objects.
36
+ Quality for scenes and landscapes will be lower.
37
+
38
+ Inference takes about 2 minutes.
39
  '''
40
+
41
+ def convert_to_ply(input_point_cloud_upload: gr.File):
42
+ print('inputs:', input_point_cloud_upload.value)
43
+ input_shape = input_point_cloud_upload.value[0]
44
+ if not input_shape.endswith('.ply'):
45
+ # load file
46
+ from ppsurf.source.occupancy_data_module import OccupancyDataModule
47
+ pts_np = OccupancyDataModule.load_pts(input_shape)
48
+
49
+ # convert to ply
50
+ import trimesh
51
+ mesh = trimesh.Trimesh(vertices=pts_np[:, :3])
52
+ input_shape = input_shape + '.ply'
53
+ mesh.export(input_shape)
54
+
55
+ # show in viewer
56
+ input_tabs.selected = 'pc_viewer'
57
+ input_point_cloud_viewer.value = input_shape
58
+
59
  if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
60
+ description += (f'\n<p>For faster inference without waiting in queue, '
61
  f'you may duplicate the space and upgrade to GPU in settings. '
62
  f'<a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true">'
63
  f'<img style="display: inline; margin-top: 0em; margin-bottom: 0em" '
64
  f'src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>')
65
 
66
  with gr.Blocks(css='style.css') as demo:
67
+ gr.Markdown(description)
68
  with gr.Row():
69
  with gr.Column():
70
+ with gr.Tabs() as input_tabs:
71
+ with gr.TabItem(label='Input Point Cloud Upload', id='pc_upload'):
72
+ input_point_cloud_upload = gr.File(
73
+ show_label=False, file_count='single')
74
+ input_point_cloud_upload.upload(fn=convert_to_ply,
75
+ inputs=[
76
+ input_point_cloud_upload,
77
+ ],
78
+ outputs=[
79
+ # input_point_cloud_viewer, # not available here
80
+ ])
81
+ # input_point_cloud_upload.attach_load_event(convert_to_ply, every=None)
82
+ with gr.TabItem(label='Input Point Cloud Viewer', id='pc_viewer'):
83
+ input_point_cloud_viewer = gr.Model3D(show_label=False)
84
+ gen_resolution_global = gr.Slider(
85
+ label='Grid Resolution (larger for more details)',
86
+ minimum=17, maximum=513, value=129, step=2)
87
+ padding_factor = gr.Slider(
88
+ label='Padding Factor (larger if object is cut off at boundaries)',
89
+ minimum=0, maximum=1.0, value=0.05, step=1)
90
+ gen_subsample_manifold_iter = gr.Slider(
91
+ label='Subsample Manifold Iterations (larger for larger point clouds)',
92
+ minimum=3, maximum=30, value=10, step=1)
93
+ gen_refine_iter = gr.Slider(
94
+ label='Edge Refinement Iterations (larger for more details)',
95
+ minimum=3, maximum=30, value=10, step=1)
96
+ # run_button = gr.Button('Run')
97
  with gr.Column():
98
  progress_text = gr.Text(label='Progress')
99
  with gr.Tabs():
100
+ with gr.TabItem(label='Reconstructed 3D model'):
 
 
101
  result_3d_model = gr.Model3D(show_label=False)
102
  with gr.TabItem(label='Output mesh file'):
103
  output_file = gr.File(show_label=False)
104
+ # with gr.Row():
105
+ # examples = [
106
+ # ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5],
107
+ # ['shapes/dragon2.obj', 'a photo of a dragon', 0, 7.5],
108
+ # ['shapes/eagle.obj', 'a photo of an eagle', 0, 7.5],
109
+ # ['shapes/napoleon.obj', 'a photo of Napoleon Bonaparte', 3, 7.5],
110
+ # ['shapes/nascar.obj', 'A next gen nascar', 2, 10],
111
+ # ]
112
+ # gr.Examples(examples=examples,
113
+ # inputs=[
114
+ # input_point_cloud_viewer,
115
+ # text,
116
+ # seed,
117
+ # guidance_scale,
118
+ # ],
119
+ # outputs=[
120
+ # result_3d_model,
121
+ # output_file,
122
+ # ],
123
+ # cache_examples=False)
124
+
125
  with gr.Row():
126
+ run_button = gr.Button('=> Run PPSurf =>')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
  run_button.click(fn=run_on_gpu,
129
  inputs=[
130
+ input_point_cloud_viewer,
131
+ gen_resolution_global,
132
+ padding_factor,
133
+ gen_subsample_manifold_iter,
134
+ gen_refine_iter,
135
  ],
136
  outputs=[
 
137
  result_3d_model,
138
  output_file,
139
  progress_text,
model.py DELETED
@@ -1,93 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import datetime
4
- import pathlib
5
- import shlex
6
- import subprocess
7
- import sys
8
- from typing import Generator, Optional
9
-
10
- import gradio as gr
11
- import trimesh
12
-
13
- sys.path.append('TEXTurePaper')
14
-
15
- from src.configs.train_config import GuideConfig, LogConfig, TrainConfig
16
- from src.training.trainer import TEXTure
17
-
18
-
19
- class Model:
20
- def __init__(self):
21
- self.max_num_faces = 100000
22
-
23
- def load_config(self, shape_path: str, text: str, seed: int,
24
- guidance_scale: float) -> TrainConfig:
25
- text += ', {} view'
26
-
27
- log = LogConfig(exp_name=self.gen_exp_name())
28
- guide = GuideConfig(text=text)
29
- guide.background_img = 'TEXTurePaper/textures/brick_wall.png'
30
- guide.shape_path = 'TEXTurePaper/shapes/spot_triangulated.obj'
31
- config = TrainConfig(log=log, guide=guide)
32
-
33
- config.guide.shape_path = shape_path
34
- config.optim.seed = seed
35
- config.guide.guidance_scale = guidance_scale
36
- return config
37
-
38
- def gen_exp_name(self) -> str:
39
- now = datetime.datetime.now()
40
- return now.strftime('%Y-%m-%d-%H-%M-%S')
41
-
42
- def check_num_faces(self, path: str) -> bool:
43
- with open(path) as f:
44
- lines = [line for line in f.readlines() if line.startswith('f')]
45
- return len(lines) <= self.max_num_faces
46
-
47
- def zip_results(self, exp_dir: pathlib.Path) -> str:
48
- mesh_dir = exp_dir / 'mesh'
49
- out_path = f'{exp_dir.name}.zip'
50
- subprocess.run(shlex.split(f'zip -r {out_path} {mesh_dir}'))
51
- return out_path
52
-
53
- def run(
54
- self, shape_path: str, text: str, seed: int, guidance_scale: float
55
- ) -> Generator[tuple[list[str], Optional[str], Optional[str], str], None, None]:
56
- if not shape_path.endswith('.obj'):
57
- raise gr.Error('The input file is not .obj file.')
58
- if not self.check_num_faces(shape_path):
59
- raise gr.Error('The number of faces is over 100,000.')
60
-
61
- config = self.load_config(shape_path, text, seed, guidance_scale)
62
- trainer = TEXTure(config)
63
-
64
- trainer.mesh_model.train()
65
-
66
- total_steps = len(trainer.dataloaders['train'])
67
- for step, data in enumerate(trainer.dataloaders['train'], start=1):
68
- trainer.paint_step += 1
69
- trainer.paint_viewpoint(data)
70
- trainer.evaluate(trainer.dataloaders['val'],
71
- trainer.eval_renders_path)
72
- trainer.mesh_model.train()
73
-
74
- sample_image_dir = config.log.exp_dir / 'vis' / 'eval'
75
- sample_image_paths = sorted(
76
- sample_image_dir.glob(f'step_{trainer.paint_step:05d}_*.jpg'))
77
- sample_image_paths = [
78
- path.as_posix() for path in sample_image_paths
79
- ]
80
- yield sample_image_paths, None, None, f'{step}/{total_steps}'
81
-
82
- trainer.mesh_model.change_default_to_median()
83
-
84
- save_dir = trainer.exp_path / 'mesh'
85
- save_dir.mkdir(exist_ok=True, parents=True)
86
- trainer.mesh_model.export_mesh(save_dir)
87
- model_path = save_dir / 'mesh.obj'
88
- mesh = trimesh.load(model_path)
89
- mesh_path = save_dir / 'mesh.glb'
90
- mesh.export(mesh_path, file_type='glb')
91
-
92
- zip_path = self.zip_results(config.log.exp_dir)
93
- yield sample_image_paths, mesh_path.as_posix(), zip_path, 'Done!'