File size: 6,360 Bytes
2d5f249
 
 
 
 
a5aeba8
 
2d5f249
 
 
 
 
96d77c3
 
2d5f249
 
 
96d77c3
2d5f249
 
 
 
 
 
 
 
 
 
 
b0f2345
2d5f249
 
b0f2345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d5f249
 
de9b060
b0f2345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d5f249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5aeba8
b0f2345
de9b060
96d77c3
 
2d5f249
 
 
a5aeba8
2d5f249
 
 
 
 
 
 
 
 
a5aeba8
 
2d5f249
 
 
 
 
 
 
a89c249
2d5f249
 
 
3861796
a5aeba8
 
2d5f249
 
a5aeba8
 
2d5f249
 
 
 
 
a5aeba8
 
 
2d5f249
de9b060
96d77c3
2d5f249
a5aeba8
 
2d5f249
 
 
 
 
a3f1f2c
2d5f249
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
# install


import glob
import gradio as gr
import os
import random

import subprocess

if os.getenv('SYSTEM') == 'spaces':
    subprocess.run('pip install pyembree'.split())
    subprocess.run(
        'pip install git+https://github.com/YuliangXiu/rembg.git@hf'.split())
    subprocess.run(
        'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
    subprocess.run(
        'pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split())
    subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split())
    subprocess.run(
        'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
    subprocess.run(
        'pip install git+https://github.com/YuliangXiu/neural_voxelization_layer.git'.split())

from apps.infer import generate_model

# running

description = '''
# ICON Clothed Human Digitization 
### ICON: Implicit Clothed humans Obtained from Normals (CVPR 2022)

<table>
<th>
<ul>
<li><strong>Homepage</strong> <a href="http://icon.is.tue.mpg.de">icon.is.tue.mpg.de</a></li>
<li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ICON">YuliangXiu/ICON</a>
<li><strong>Paper</strong> <a href="https://arxiv.org/abs/2112.09127">arXiv</a>, <a href="https://readpaper.com/paper/4569785684533977089">ReadPaper</a>
<li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a>
</ul>
<a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a>
<iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ICON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
<a href="https://youtu.be/hZd6AYin2DE"><img alt="YouTube Video Views" src="https://img.shields.io/youtube/views/hZd6AYin2DE?style=social"></a>
</th>
<th>
<iframe width="560" height="315" src="https://www.youtube.com/embed/hZd6AYin2DE" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</th>
</table>

<h4> The reconstruction + refinement + video take about 200 seconds for single image. <span style="color:red"> If ERROR, try "Submit Image" again.</span></h4>

<details>

<summary>More</summary>

#### Citation
```
@inproceedings{xiu2022icon,
  title     = {{ICON}: {I}mplicit {C}lothed humans {O}btained from {N}ormals},
  author    = {Xiu, Yuliang and Yang, Jinlong and Tzionas, Dimitrios and Black, Michael J.},
  booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
  month     = {June},
  year      = {2022},
  pages     = {13296-13306}
} 
```

#### Acknowledgments:

- [StyleGAN-Human, ECCV 2022](https://stylegan-human.github.io/)
- [nagolinc/styleGanHuman_and_PIFu](https://huggingface.co/spaces/nagolinc/styleGanHuman_and_PIFu)
- [radames/PIFu-Clothed-Human-Digitization](https://huggingface.co/spaces/radames/PIFu-Clothed-Human-Digitization)

#### Image Credits

* [Pinterest](https://www.pinterest.com/search/pins/?q=parkour&rs=sitelinks_searchbox)

#### Related works

* [ICON @ MPI](https://icon.is.tue.mpg.de/)
* [MonoPort @ USC](https://xiuyuliang.cn/monoport)
* [Phorhum @ Google](https://phorhum.github.io/)
* [PIFuHD @ Meta](https://shunsukesaito.github.io/PIFuHD/)
* [PaMIR @ Tsinghua](http://www.liuyebin.com/pamir/pamir.html)

</details>
'''


def generate_image(seed, psi):
    iface = gr.Interface.load("spaces/hysts/StyleGAN-Human")
    img = iface(seed, psi)
    return img


random.seed(2022)
model_types = ['ICON', 'PIFu', 'PaMIR']
examples = [[item, random.choice(model_types)]
            for item in glob.glob('examples/*.png')]

with gr.Blocks() as demo:
    gr.Markdown(description)

    out_lst = []
    with gr.Row():
        with gr.Column():
            with gr.Row():
                with gr.Column():
                    seed = gr.inputs.Slider(
                        0, 100, step=1, default=0, label='Seed (For Image Generation)')
                    psi = gr.inputs.Slider(
                        0, 2, step=0.05, default=0.7, label='Truncation psi (For Image Generation)')
                    radio_choice = gr.Radio(
                        model_types, label='Method (For Reconstruction)', value='icon-filter')
                inp = gr.Image(type="filepath", label="Input Image")
            with gr.Row():
                btn_sample = gr.Button("Sample Image")
                btn_submit = gr.Button("Submit Image")

            gr.Examples(examples=examples,
                        inputs=[inp, radio_choice],
                        cache_examples=False,
                        fn=generate_model,
                        outputs=out_lst)

            out_vid = gr.Video(label="Image + Normal + Recon + Refined Recon")
            out_vid_download = gr.File(
                label="Download Video, welcome share on Twitter with #ICON")

        with gr.Column():
            overlap_inp = gr.Image(
                type="filepath", label="Image Normal Overlap")
            out_smpl = gr.Model3D(
                clear_color=[0.0, 0.0, 0.0, 0.0],  label="SMPL")
            out_smpl_download = gr.File(label="Download SMPL mesh")
            out_smpl_npy_download = gr.File(label="Download SMPL params")
            out_final = gr.Model3D(
                clear_color=[0.0, 0.0, 0.0, 0.0],  label="Refined Recon")
            out_final_download = gr.File(
                label="Download refined clothed human mesh")

    out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download,
               out_final, out_final_download, out_vid, out_vid_download, overlap_inp]

    btn_submit.click(fn=generate_model, inputs=[
                     inp, radio_choice], outputs=out_lst)
    btn_sample.click(fn=generate_image, inputs=[seed, psi], outputs=inp)

if __name__ == "__main__":

    # demo.launch(debug=False, enable_queue=False,
    #             auth=(os.environ['USER'], os.environ['PASSWORD']),
    #             auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")

    demo.launch(debug=True, enable_queue=True)