# install import glob import gradio as gr import os import random import subprocess if os.getenv('SYSTEM') == 'spaces': subprocess.run('pip install pyembree'.split()) subprocess.run('pip install rembg'.split()) subprocess.run( 'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split()) subprocess.run( 'pip install git+https://github.com/YuliangXiu/kaolin.git'.split()) subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split()) subprocess.run( 'pip install git+https://github.com/Project-Splinter/human_det.git'.split()) subprocess.run( 'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split()) from apps.infer import generate_model # running description = ''' # ICON Clothed Human Digitization ### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)
Twitter Follow YouTube Video Views
#### The reconstruction + refinement + video take about 80~120 seconds for single image.
More #### Citation ``` @inproceedings{xiu2022icon, title = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals}, author = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {13296-13306} } ``` #### Acknowledgments: - [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/) - [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu) - [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization) #### Image Credits * [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox) #### Related works * [ICON @ MPI](https://icon.is.tue.mpg.de/) * [MonoPort @ USC](https://xiuyuliang.cn/monoport) * [Phorhum @ Google](https://phorhum.github.io/) * [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/) * [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)
''' def generate_image(seed, psi): iface = gr.Interface.load("spaces/hysts/StyleGAN-Human") img = iface(seed, psi) return img random.seed(2022) model_types = ['icon-filter', 'pifu', 'pamir'] examples = [[item, random.choice(model_types)] for item in glob.glob('examples/*.png')] with gr.Blocks() as demo: gr.Markdown(description) out_lst = [] with gr.Row(): with gr.Column(): with gr.Row(): with gr.Column(): seed = gr.inputs.Slider( 0, 100, step=1, default=0, label='Seed (For Image Generation)') psi = gr.inputs.Slider( 0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)') radio_choice = gr.Radio( model_types, label='Method (For Reconstruction)', value='icon-filter') inp = gr.Image(type="filepath", label="Input Image") with gr.Row(): btn_sample = gr.Button("Sample Image") btn_submit = gr.Button("Submit Image") gr.Examples(examples=examples, inputs=[inp, radio_choice], cache_examples=False, fn=generate_model, outputs=out_lst) out_vid_download = gr.File( label="Download Video, welcome share on Twitter with #ICON") with gr.Column(): overlap_inp = gr.Image( type="filepath", label="Image Normal Overlap") out_smpl = gr.Model3D( clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL") out_smpl_download = gr.File(label="Download SMPL mesh") out_smpl_npy_download = gr.File(label="Download SMPL params") out_recon = gr.Model3D( clear_color=[0.0, 0.0, 0.0, 0.0], label="Recon") out_recon_download = gr.File(label="Download clothed human mesh") out_final = gr.Model3D( clear_color=[0.0, 0.0, 0.0, 0.0], label="Refined Recon") out_final_download = gr.File( label="Download refined clothed human mesh") out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, out_recon, out_recon_download, out_final, out_final_download, out_vid_download, overlap_inp] btn_submit.click(fn=generate_model, inputs=[ inp, radio_choice], outputs=out_lst) btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp) if __name__ == "__main__": # demo.launch(debug=False, enable_queue=False, # auth=(os.environ['USER'], os.environ['PASSWORD']), # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.") demo.launch(debug=True, enable_queue=True)