ppsurf / app.py
perler's picture
more accurate vars in point cloud conversion
4800b6d
raw
history blame
7.13 kB
#!/usr/bin/env python
from __future__ import annotations
import os
import datetime
import subprocess
import gradio as gr
import spaces
@spaces.GPU(duration=60 * 3)
def run_on_gpu(input_point_cloud_viewer,
gen_resolution_global,
padding_factor,
gen_subsample_manifold_iter,
gen_refine_iter):
print('Started inference at {}'.format(datetime.datetime.now()))
import os
os.chdir(os.path.dirname('./ppsurf'))
in_file = 'data/{}'.format(input_point_cloud_viewer)
out_file = 'results/rec/{}'.format(input_point_cloud_viewer)
call_base = ['python', 'pps.py', 'rec']
call_args = [in_file,
out_file,
'--gen_resolution_global', str(gen_resolution_global),
'--padding_factor', str(padding_factor),
'--gen_subsample_manifold_iter', str(gen_subsample_manifold_iter),
'--gen_refine_iter', str(gen_refine_iter),
]
subprocess.run(call_base + call_args)
print('Finished inference at {}'.format(datetime.datetime.now()))
result_3d_model = out_file
output_file = out_file
progress_text = 'done'
return result_3d_model, output_file, progress_text
def main():
description = '''# [PPSurf](https://github.com/cg-tuwien/ppsurf)
Supported file formats:
- PLY, STL, OBJ and other mesh files,
- XYZ as whitespace-separated text file,
- NPY and NPZ (key='arr_0'),
- LAS and LAZ (version 1.0-1.4), COPC and CRS.
Best results for 50k-250k points.
This method is meant for scans of single and few objects.
Quality for scenes and landscapes will be lower.
Inference takes about 2 minutes.
'''
def convert_to_ply(input_point_cloud_upload: gr.utils.NamedString):
# add absolute path to import dirs
# import sys
# import os
# sys.path.append(os.path.abspath('ppsurf'))
#
# import os
# os.chdir('ppsurf')
print('Inputs:', input_point_cloud_upload, type(input_point_cloud_upload))
input_shape: str = input_point_cloud_upload.name
if not input_shape.endswith('.ply'):
# load file
from ppsurf.source.occupancy_data_module import OccupancyDataModule
pts_np = OccupancyDataModule.load_pts(input_shape)
# convert to ply
import trimesh
mesh = trimesh.Trimesh(vertices=pts_np[:, :3])
input_shape = input_shape + '.ply'
mesh.export(input_shape)
print('ls\n', subprocess.check_output(['ls', os.path.dirname(input_shape)]))
# show in viewer
print(type(input_tabs))
print(type(input_point_cloud_viewer))
input_tabs.selected = 'pc_viewer'
input_point_cloud_viewer.value = input_shape
if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
description += (f'\n<p>For faster inference without waiting in queue, '
f'you may duplicate the space and upgrade to GPU in settings. '
f'<a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true">'
f'<img style="display: inline; margin-top: 0em; margin-bottom: 0em" '
f'src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>')
with gr.Blocks(css='style.css') as demo:
gr.Markdown(description)
with gr.Row():
with gr.Column():
with gr.Tabs() as input_tabs:
with gr.TabItem(label='Input Point Cloud Upload', id='pc_upload'):
input_point_cloud_upload = gr.File(
show_label=False, file_count='single')
input_point_cloud_upload.upload(
fn=convert_to_ply,
inputs=[
input_point_cloud_upload,
],
outputs=[
# input_point_cloud_viewer, # not available here
])
with gr.TabItem(label='Input Point Cloud Viewer', id='pc_viewer'):
input_point_cloud_viewer = gr.Model3D(show_label=False)
gen_resolution_global = gr.Slider(
label='Grid Resolution (larger for more details)',
minimum=17, maximum=513, value=129, step=2)
padding_factor = gr.Slider(
label='Padding Factor (larger if object is cut off at boundaries)',
minimum=0, maximum=1.0, value=0.05, step=0.05)
gen_subsample_manifold_iter = gr.Slider(
label='Subsample Manifold Iterations (larger for larger point clouds)',
minimum=3, maximum=30, value=10, step=1)
gen_refine_iter = gr.Slider(
label='Edge Refinement Iterations (larger for more details)',
minimum=3, maximum=30, value=10, step=1)
with gr.Column():
progress_text = gr.Text(label='Progress')
with gr.Tabs():
with gr.TabItem(label='Reconstructed 3D model'):
result_3d_model = gr.Model3D(show_label=False)
with gr.TabItem(label='Output mesh file'):
output_file = gr.File(show_label=False)
# with gr.Row():
# examples = [
# ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5],
# ['shapes/dragon2.obj', 'a photo of a dragon', 0, 7.5],
# ['shapes/eagle.obj', 'a photo of an eagle', 0, 7.5],
# ['shapes/napoleon.obj', 'a photo of Napoleon Bonaparte', 3, 7.5],
# ['shapes/nascar.obj', 'A next gen nascar', 2, 10],
# ]
# gr.Examples(examples=examples,
# inputs=[
# input_point_cloud_viewer,
# text,
# seed,
# guidance_scale,
# ],
# outputs=[
# result_3d_model,
# output_file,
# ],
# cache_examples=False)
with gr.Row():
run_button = gr.Button('Reconstruct with PPSurf')
run_button.click(fn=run_on_gpu,
inputs=[
input_point_cloud_viewer,
gen_resolution_global,
padding_factor,
gen_subsample_manifold_iter,
gen_refine_iter,
],
outputs=[
result_3d_model,
output_file,
progress_text,
])
demo.queue(max_size=5)
demo.launch(debug=True)
if __name__ == '__main__':
main()