#!/usr/bin/env python from __future__ import annotations import os import datetime import subprocess import gradio as gr import spaces @spaces.GPU(duration=60 * 3) def run_on_gpu(input_shape): print('Started inference at {}'.format(datetime.datetime.now())) call_base = ['python', 'ppsurf/pps.py', 'rec'] call_args = ['pps.py', 'rec', 'data/{}'.format(input_shape), 'results/rec/{}'.format(input_shape), ] res = subprocess.check_output(call_base + call_args) print('Finished inference at {}'.format(datetime.datetime.now())) return res def main(): description = '''# [PPSurf](https://github.com/cg-tuwien/ppsurf) Supported file formats: PLY, STL, OBJ and other mesh files, XYZ as whitespace-separated text file, NPY and NPZ (key='arr_0'), LAS and LAZ (version 1.0-1.4), COPC and CRS. Best results for 50k-250k points. This method is meant for scans of single and few objects. Quality for scenes and landscapes will be lower. Inference takes about 2 minutes. ''' def convert_to_ply(input_point_cloud_upload: gr.File): print('inputs:', input_point_cloud_upload.value) input_shape = input_point_cloud_upload.value[0] if not input_shape.endswith('.ply'): # load file from ppsurf.source.occupancy_data_module import OccupancyDataModule pts_np = OccupancyDataModule.load_pts(input_shape) # convert to ply import trimesh mesh = trimesh.Trimesh(vertices=pts_np[:, :3]) input_shape = input_shape + '.ply' mesh.export(input_shape) # show in viewer input_tabs.selected = 'pc_viewer' input_point_cloud_viewer.value = input_shape if (SPACE_ID := os.getenv('SPACE_ID')) is not None: description += (f'\n
For faster inference without waiting in queue, ' f'you may duplicate the space and upgrade to GPU in settings. ' f'' f'
') with gr.Blocks(css='style.css') as demo: gr.Markdown(description) with gr.Row(): with gr.Column(): with gr.Tabs() as input_tabs: with gr.TabItem(label='Input Point Cloud Upload', id='pc_upload'): input_point_cloud_upload = gr.File( show_label=False, file_count='single') input_point_cloud_upload.upload(fn=convert_to_ply, inputs=[ input_point_cloud_upload, ], outputs=[ # input_point_cloud_viewer, # not available here ]) # input_point_cloud_upload.attach_load_event(convert_to_ply, every=None) with gr.TabItem(label='Input Point Cloud Viewer', id='pc_viewer'): input_point_cloud_viewer = gr.Model3D(show_label=False) gen_resolution_global = gr.Slider( label='Grid Resolution (larger for more details)', minimum=17, maximum=513, value=129, step=2) padding_factor = gr.Slider( label='Padding Factor (larger if object is cut off at boundaries)', minimum=0, maximum=1.0, value=0.05, step=1) gen_subsample_manifold_iter = gr.Slider( label='Subsample Manifold Iterations (larger for larger point clouds)', minimum=3, maximum=30, value=10, step=1) gen_refine_iter = gr.Slider( label='Edge Refinement Iterations (larger for more details)', minimum=3, maximum=30, value=10, step=1) # run_button = gr.Button('Run') with gr.Column(): progress_text = gr.Text(label='Progress') with gr.Tabs(): with gr.TabItem(label='Reconstructed 3D model'): result_3d_model = gr.Model3D(show_label=False) with gr.TabItem(label='Output mesh file'): output_file = gr.File(show_label=False) # with gr.Row(): # examples = [ # ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5], # ['shapes/dragon2.obj', 'a photo of a dragon', 0, 7.5], # ['shapes/eagle.obj', 'a photo of an eagle', 0, 7.5], # ['shapes/napoleon.obj', 'a photo of Napoleon Bonaparte', 3, 7.5], # ['shapes/nascar.obj', 'A next gen nascar', 2, 10], # ] # gr.Examples(examples=examples, # inputs=[ # input_point_cloud_viewer, # text, # seed, # guidance_scale, # ], # outputs=[ # result_3d_model, # output_file, # ], # cache_examples=False) with gr.Row(): run_button = gr.Button('=> Run PPSurf =>') run_button.click(fn=run_on_gpu, inputs=[ input_point_cloud_viewer, gen_resolution_global, padding_factor, gen_subsample_manifold_iter, gen_refine_iter, ], outputs=[ result_3d_model, output_file, progress_text, ]) demo.queue(max_size=5) demo.launch(debug=True) if __name__ == '__main__': main()