Yuliang commited on
Commit
e0ba903
1 Parent(s): b539850

upgrade to Gradio 4.14.0

Browse files
.gitignore CHANGED
@@ -17,4 +17,6 @@ dist
17
  *egg-info
18
  *.so
19
  run.sh
20
- *.log
 
 
 
17
  *egg-info
18
  *.so
19
  run.sh
20
+ *.log
21
+ gradio_cached_examples
22
+ !output.log
README.md CHANGED
@@ -5,10 +5,10 @@ emoji: 🤼
5
  colorFrom: green
6
  colorTo: pink
7
  sdk: gradio
8
- sdk_version: 3.50.2
9
  app_file: app.py
10
  pinned: true
11
- python_version: 4.14.0
12
  ---
13
 
14
  # Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet)
 
5
  colorFrom: green
6
  colorTo: pink
7
  sdk: gradio
8
+ sdk_version: 4.14.0
9
  app_file: app.py
10
  pinned: true
11
+ python_version: 3.8.15
12
  ---
13
 
14
  # Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet)
app.py CHANGED
@@ -1,6 +1,5 @@
1
  # install
2
 
3
- import glob
4
  import gradio as gr
5
  import os
6
 
@@ -8,10 +7,13 @@ import subprocess
8
 
9
  if os.getenv('SYSTEM') == 'spaces':
10
  # subprocess.run('pip install pyembree'.split())
11
- subprocess.run(
12
- 'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu116_pyt1130/download.html'
13
- .split()
14
- )
 
 
 
15
  subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libmesh/")
16
  subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libvoxelize/")
17
 
@@ -19,37 +21,12 @@ from apps.infer import generate_model, generate_video
19
 
20
  # running
21
 
22
- description = '''
23
  # Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet)
24
  ### ECON: Explicit Clothed humans Optimized via Normal integration (CVPR 2023, Highlight)
 
25
 
26
- <table>
27
- <th width="20%">
28
- <ul>
29
- <li><strong>Homepage</strong> <a href="https://econ.is.tue.mpg.de/">econ.is.tue.mpg.de</a></li>
30
- <li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ECON">YuliangXiu/ECON</a></li>
31
- <li><strong>Paper</strong> <a href="https://arxiv.org/abs/2212.07422">arXiv</a>, <a href="https://readpaper.com/paper/4736821012688027649">ReadPaper</a></li>
32
- <li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li>
33
- </ul>
34
- <br>
35
- <ul>
36
- <li><strong>Colab Notebook</strong> <a href='https://colab.research.google.com/drive/1YRgwoRCZIrSB2e7auEWFyG10Xzjbrbno?usp=sharing'><img style="display: inline-block;" src='https://colab.research.google.com/assets/colab-badge.svg' alt='Google Colab'></a></li>
37
- <li><strong>Blender Plugin</strong> <a href='https://carlosedubarreto.gumroad.com/l/CEB_ECON'><img style="display: inline-block;" src='https://img.shields.io/badge/Blender-F6DDCC.svg?logo=Blender' alt='Blender'></a></li>
38
- <li><strong>Docker Image</strong> <a href='https://github.com/YuliangXiu/ECON/blob/master/docs/installation-docker.md'><img style="display: inline-block;" src='https://img.shields.io/badge/Docker-9cf.svg?logo=Docker' alt='Docker'></a></li>
39
- <li><strong>Windows Setup</strong> <a href="https://github.com/YuliangXiu/ECON/blob/master/docs/installation-windows.md"><img style="display: inline-block;" src='https://img.shields.io/badge/Windows-00a2ed.svg?logo=Windows' akt='Windows'></a></li>
40
- </ul>
41
-
42
- <br>
43
- <a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a><br>
44
- <iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ECON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
45
- </th>
46
- <th width="40%">
47
- <iframe width="560" height="315" src="https://www.youtube.com/embed/5PEd_p90kS0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
48
- </th>
49
- <th width="40%">
50
- <iframe width="560" height="315" src="https://www.youtube.com/embed/sbWZbTf6ZYk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
51
- </th>
52
- </table>
53
 
54
 
55
  #### Citation
@@ -90,10 +67,40 @@ description = '''
90
  <center>
91
  <a href="https://huggingface.co/spaces/Yuliang/ECON?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg-dark.svg"/></a>
92
  <h2> Generate pose & prompt-guided images / Upload photos / Use examples &rarr; Submit Image (~3min) &rarr; Generate Video (~3min) </h2>
93
- <h2><span style="color:red">ECON is only suitable for humanoid images and will not work well on cartoons with non-human shapes.</span></h2>
94
  </center>
95
  '''
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  from controlnet_aux import OpenposeDetector
98
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
99
  from diffusers import UniPCMultistepScheduler
@@ -127,9 +134,6 @@ async (image_in_img, prompt, image_file_live_opt, live_conditioning) => {
127
  """
128
 
129
  # Constants
130
- low_threshold = 100
131
- high_threshold = 200
132
- default_step = 50
133
  cached = False
134
 
135
  # Models
@@ -167,11 +171,12 @@ blender, oc renderer, ultra high definition, 3d rendering
167
  def get_pose(image):
168
  return pose_model(image)
169
 
 
170
 
171
- # def generate_texture(input_shape, text, seed, guidance_scale):
172
- # iface = gr.Interface.load("spaces/TEXTurePaper/TEXTure")
173
- # output_shape = iface(input_shape, text, seed, guidance_scale)
174
- # return output_shape
175
 
176
 
177
  def generate_images(image, prompt, image_file_live_opt='file', live_conditioning=None):
@@ -189,7 +194,7 @@ def generate_images(image, prompt, image_file_live_opt='file', live_conditioning
189
  pose,
190
  generator=generator,
191
  num_images_per_prompt=3,
192
- num_inference_steps=20,
193
  )
194
  all_outputs = []
195
  all_outputs.append(pose)
@@ -207,11 +212,17 @@ def toggle(choice):
207
  return gr.update(visible=False, value=None), gr.update(visible=True, value=canvas_html)
208
 
209
 
210
- examples_pose = glob.glob('examples/pose/*')
211
- examples_cloth = glob.glob('examples/cloth/*')
 
 
 
212
 
213
  with gr.Blocks() as demo:
214
- gr.Markdown(description)
 
 
 
215
 
216
  out_lst = []
217
  with gr.Row():
@@ -227,7 +238,7 @@ with gr.Blocks() as demo:
227
 
228
  with gr.Row():
229
  image_in_img = gr.Image(
230
- source="upload", visible=True, type="pil", label="Image for Pose"
231
  )
232
  canvas = gr.HTML(None, elem_id="canvas_html", visible=False)
233
 
@@ -246,7 +257,7 @@ with gr.Blocks() as demo:
246
  gr.Markdown(hint_prompts)
247
 
248
  with gr.Column():
249
- gallery = gr.Gallery(label="Generated Images").style(grid=[2], height="auto")
250
  gallery_cache = gr.State()
251
 
252
  gr.Markdown(
@@ -259,12 +270,12 @@ with gr.Blocks() as demo:
259
  )
260
 
261
  inp = gr.Image(type="filepath", label="Input Image for Reconstruction")
262
- fitting_step = gr.inputs.Slider(
263
  10,
264
  100,
265
  step=10,
266
  label='Fitting steps (Slower yet Better-aligned SMPL-X)',
267
- default=default_step
268
  )
269
 
270
  with gr.Row():
@@ -275,7 +286,7 @@ with gr.Blocks() as demo:
275
  fn=generate_images,
276
  inputs=[image_in_img, prompt, image_file_live_opt, live_conditioning],
277
  outputs=[gallery, gallery_cache],
278
- _js=get_js_image
279
  )
280
 
281
  def get_select_index(cache, evt: gr.SelectData):
@@ -288,40 +299,41 @@ with gr.Blocks() as demo:
288
  )
289
 
290
  with gr.Row():
291
-
292
  gr.Examples(
293
- examples=list(examples_pose),
294
  inputs=[inp],
295
  cache_examples=cached,
296
- fn=None,
297
- outputs=None,
298
  label="Hard Pose Examples"
299
  )
300
 
301
  gr.Examples(
302
- examples=list(examples_cloth),
303
  inputs=[inp],
304
  cache_examples=cached,
305
- fn=None,
306
- outputs=None,
307
  label="Loose Cloth Examples"
308
  )
309
 
310
- out_vid = gr.Video(label="Shared on Twitter with #ECON")
311
 
312
  with gr.Column():
313
- overlap_inp = gr.Image(type="filepath", label="Image Normal Overlap").style(height=400)
314
  out_final = gr.Model3D(
315
  clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human", elem_id="avatar"
316
  )
317
  out_smpl = gr.Model3D(
318
- clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL-X body", elem_id="avatar"
319
  )
320
 
321
  vis_tensor_path = gr.State()
 
322
 
323
- with gr.Row():
324
- btn_video = gr.Button("Generate Video (~3min)")
 
325
 
326
  out_lst = [out_smpl, out_final, overlap_inp, vis_tensor_path]
327
 
@@ -332,10 +344,12 @@ with gr.Blocks() as demo:
332
  )
333
 
334
  btn_submit.click(fn=generate_model, inputs=[inp, fitting_step], outputs=out_lst)
335
-
336
- demo.load(None, None, None, _js=load_js)
 
337
 
338
  if __name__ == "__main__":
339
 
340
  demo.queue()
341
- demo.launch(max_threads=1)
 
 
1
  # install
2
 
 
3
  import gradio as gr
4
  import os
5
 
 
7
 
8
  if os.getenv('SYSTEM') == 'spaces':
9
  # subprocess.run('pip install pyembree'.split())
10
+ try:
11
+ import pytorch3d
12
+ except ImportError:
13
+ subprocess.run(
14
+ 'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu116_pyt1130/download.html'
15
+ .split()
16
+ )
17
  subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libmesh/")
18
  subprocess.run("python setup.py build_ext --inplace".split(), cwd="./lib/common/libvoxelize/")
19
 
 
21
 
22
  # running
23
 
24
+ title = '''
25
  # Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet)
26
  ### ECON: Explicit Clothed humans Optimized via Normal integration (CVPR 2023, Highlight)
27
+ '''
28
 
29
+ bottom = '''
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
 
32
  #### Citation
 
67
  <center>
68
  <a href="https://huggingface.co/spaces/Yuliang/ECON?duplicate=true"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-lg-dark.svg"/></a>
69
  <h2> Generate pose & prompt-guided images / Upload photos / Use examples &rarr; Submit Image (~3min) &rarr; Generate Video (~3min) </h2>
70
+ <h2><span style="color:red">ECON is only suitable for "humanoid images" and will not work well on cartoons with non-human shapes.</span></h2>
71
  </center>
72
  '''
73
 
74
+ description = '''
75
+ <table>
76
+ <th width="20%">
77
+ <ul>
78
+ <li><strong>Homepage</strong> <a href="https://econ.is.tue.mpg.de/">econ.is.tue.mpg.de</a></li>
79
+ <li><strong>Code</strong> <a href="https://github.com/YuliangXiu/ECON">YuliangXiu/ECON</a></li>
80
+ <li><strong>Paper</strong> <a href="https://arxiv.org/abs/2212.07422">arXiv</a>, <a href="https://readpaper.com/paper/4736821012688027649">ReadPaper</a></li>
81
+ <li><strong>Chatroom</strong> <a href="https://discord.gg/Vqa7KBGRyk">Discord</a></li>
82
+ </ul>
83
+ <br>
84
+ <ul>
85
+ <li><strong>Colab Notebook</strong> <a href='https://colab.research.google.com/drive/1YRgwoRCZIrSB2e7auEWFyG10Xzjbrbno?usp=sharing'><img style="display: inline-block;" src='https://colab.research.google.com/assets/colab-badge.svg' alt='Google Colab'></a></li>
86
+ <li><strong>Blender Plugin</strong> <a href='https://carlosedubarreto.gumroad.com/l/CEB_ECON'><img style="display: inline-block;" src='https://img.shields.io/badge/Blender-F6DDCC.svg?logo=Blender' alt='Blender'></a></li>
87
+ <li><strong>Docker Image</strong> <a href='https://github.com/YuliangXiu/ECON/blob/master/docs/installation-docker.md'><img style="display: inline-block;" src='https://img.shields.io/badge/Docker-9cf.svg?logo=Docker' alt='Docker'></a></li>
88
+ <li><strong>Windows Setup</strong> <a href="https://github.com/YuliangXiu/ECON/blob/master/docs/installation-windows.md"><img style="display: inline-block;" src='https://img.shields.io/badge/Windows-00a2ed.svg?logo=Windows' akt='Windows'></a></li>
89
+ </ul>
90
+
91
+ <br>
92
+ <a href="https://twitter.com/yuliangxiu"><img alt="Twitter Follow" src="https://img.shields.io/twitter/follow/yuliangxiu?style=social"></a><br>
93
+ <iframe src="https://ghbtns.com/github-btn.html?user=yuliangxiu&repo=ECON&type=star&count=true&v=2&size=small" frameborder="0" scrolling="0" width="100" height="20"></iframe>
94
+ </th>
95
+ <th width="40%">
96
+ <iframe width="560" height="315" src="https://www.youtube.com/embed/5PEd_p90kS0" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
97
+ </th>
98
+ <th width="40%">
99
+ <iframe width="560" height="315" src="https://www.youtube.com/embed/sbWZbTf6ZYk" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
100
+ </th>
101
+ </table>
102
+ '''
103
+
104
  from controlnet_aux import OpenposeDetector
105
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
106
  from diffusers import UniPCMultistepScheduler
 
134
  """
135
 
136
  # Constants
 
 
 
137
  cached = False
138
 
139
  # Models
 
171
  def get_pose(image):
172
  return pose_model(image)
173
 
174
+ import sys
175
 
176
+ def read_logs():
177
+ sys.stdout.flush()
178
+ with open("output.log", "r") as f:
179
+ return f.read()
180
 
181
 
182
  def generate_images(image, prompt, image_file_live_opt='file', live_conditioning=None):
 
194
  pose,
195
  generator=generator,
196
  num_images_per_prompt=3,
197
+ num_inference_steps=50,
198
  )
199
  all_outputs = []
200
  all_outputs.append(pose)
 
212
  return gr.update(visible=False, value=None), gr.update(visible=True, value=canvas_html)
213
 
214
 
215
+ examples_pose = 'examples/pose'
216
+ examples_cloth = 'examples/cloth'
217
+
218
+ def show_video():
219
+ return gr.update(visible=True), gr.update(visible=True)
220
 
221
  with gr.Blocks() as demo:
222
+
223
+ gr.Markdown(title)
224
+ gr.HTML(description)
225
+ gr.Markdown(bottom)
226
 
227
  out_lst = []
228
  with gr.Row():
 
238
 
239
  with gr.Row():
240
  image_in_img = gr.Image(
241
+ visible=True, type="pil", label="Image for Pose"
242
  )
243
  canvas = gr.HTML(None, elem_id="canvas_html", visible=False)
244
 
 
257
  gr.Markdown(hint_prompts)
258
 
259
  with gr.Column():
260
+ gallery = gr.Gallery(label="Generated Images", columns=[2],rows=[2])
261
  gallery_cache = gr.State()
262
 
263
  gr.Markdown(
 
270
  )
271
 
272
  inp = gr.Image(type="filepath", label="Input Image for Reconstruction")
273
+ fitting_step = gr.Slider(
274
  10,
275
  100,
276
  step=10,
277
  label='Fitting steps (Slower yet Better-aligned SMPL-X)',
278
+ value=50
279
  )
280
 
281
  with gr.Row():
 
286
  fn=generate_images,
287
  inputs=[image_in_img, prompt, image_file_live_opt, live_conditioning],
288
  outputs=[gallery, gallery_cache],
289
+ js=get_js_image
290
  )
291
 
292
  def get_select_index(cache, evt: gr.SelectData):
 
299
  )
300
 
301
  with gr.Row():
302
+
303
  gr.Examples(
304
+ examples=examples_pose,
305
  inputs=[inp],
306
  cache_examples=cached,
307
+ fn=generate_model,
308
+ outputs=out_lst,
309
  label="Hard Pose Examples"
310
  )
311
 
312
  gr.Examples(
313
+ examples=examples_cloth,
314
  inputs=[inp],
315
  cache_examples=cached,
316
+ fn=generate_model,
317
+ outputs=out_lst,
318
  label="Loose Cloth Examples"
319
  )
320
 
 
321
 
322
  with gr.Column():
323
+ overlap_inp = gr.Image(type="filepath", label="Image Normal Overlap")
324
  out_final = gr.Model3D(
325
  clear_color=[0.0, 0.0, 0.0, 0.0], label="Clothed human", elem_id="avatar"
326
  )
327
  out_smpl = gr.Model3D(
328
+ clear_color=[0.0, 0.0, 0.0, 0.0], label="SMPL-X body (via PIXIE)", elem_id="avatar"
329
  )
330
 
331
  vis_tensor_path = gr.State()
332
+
333
 
334
+ # logs = gr.Textbox(max_lines=10, label="Logs")
335
+ btn_video = gr.Button("Generate Video (~3min)", visible=False)
336
+ out_vid = gr.Video(label="Shared on Twitter with #ECON", visible=False)
337
 
338
  out_lst = [out_smpl, out_final, overlap_inp, vis_tensor_path]
339
 
 
344
  )
345
 
346
  btn_submit.click(fn=generate_model, inputs=[inp, fitting_step], outputs=out_lst)
347
+ btn_submit.click(fn=show_video, outputs=[btn_video, out_vid])
348
+ # demo.load(read_logs, None, logs, every=1, queue=True, scroll_to_output=True)
349
+ demo.load(None, None, None, js=load_js)
350
 
351
  if __name__ == "__main__":
352
 
353
  demo.queue()
354
+ demo.launch(max_threads=4)
355
+ # demo.launch(max_threads=2, debug=True, server_port=8888, server_name="0.0.0.0")
apps/benchmark.py CHANGED
@@ -90,7 +90,7 @@ if __name__ == "__main__":
90
  normal_net.netG.eval()
91
  print(
92
  colored(
93
- f"Resume Normal Estimator from {Format.start} {cfg.normal_path} {Format.end}", "green"
94
  )
95
  )
96
 
 
90
  normal_net.netG.eval()
91
  print(
92
  colored(
93
+ f"Resume Normal Estimator from: {cfg.normal_path}", "green"
94
  )
95
  )
96
 
apps/infer.py CHANGED
@@ -30,7 +30,7 @@ import trimesh
30
  from pytorch3d.ops import SubdivideMeshes
31
  from huggingface_hub import hf_hub_download
32
  from termcolor import colored
33
- from tqdm.auto import tqdm
34
 
35
  from apps.IFGeo import IFGeo
36
  from apps.Normal import Normal
@@ -65,14 +65,32 @@ def generate_video(vis_tensor_path):
65
 
66
  render.load_meshes(verts_lst, faces_lst)
67
  render.get_rendered_video_multi(in_tensor, tmp_path)
68
-
69
  os.system(f"ffmpeg -y -loglevel quiet -stats -i {tmp_path} -vcodec libx264 {out_path}")
70
-
71
  return out_path
72
 
73
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def generate_model(in_path, fitting_step=50):
75
-
 
 
76
  out_dir = "./results"
77
 
78
  # cfg read and merge
@@ -101,7 +119,7 @@ def generate_model(in_path, fitting_step=50):
101
  normal_net.netG.eval()
102
  print(
103
  colored(
104
- f"Resume Normal Estimator from {Format.start} {cfg.normal_path} {Format.end}", "green"
105
  )
106
  )
107
 
@@ -129,10 +147,10 @@ def generate_model(in_path, fitting_step=50):
129
  ifnet = ifnet.to(device)
130
  ifnet.netG.eval()
131
 
132
- print(colored(f"Resume IF-Net+ from {Format.start} {cfg.ifnet_path} {Format.end}", "green"))
133
- print(colored(f"Complete with {Format.start} IF-Nets+ (Implicit) {Format.end}", "green"))
134
  else:
135
- print(colored(f"Complete with {Format.start} SMPL-X (Explicit) {Format.end}", "green"))
136
 
137
  dataset = TestDataset(dataset_param, device)
138
 
@@ -142,7 +160,7 @@ def generate_model(in_path, fitting_step=50):
142
 
143
  losses = init_loss()
144
 
145
- print(f"{data['name']}")
146
 
147
  # final results rendered as image (PNG)
148
  # 1. Render the final fitted SMPL (xxx_smpl.png)
@@ -261,7 +279,8 @@ def generate_model(in_path, fitting_step=50):
261
 
262
  ghum_lmks = data["landmark"][:, SMPLX_object.ghum_smpl_pairs[:, 0], :2].to(device)
263
  ghum_conf = data["landmark"][:, SMPLX_object.ghum_smpl_pairs[:, 0], -1].to(device)
264
- smpl_lmks = smpl_joints_3d[:, SMPLX_object.ghum_smpl_pairs[:, 1], :2]
 
265
 
266
  # render optimized mesh as normal [-1,1]
267
  in_tensor["T_normal_F"], in_tensor["T_normal_B"] = dataset.render_normal(
@@ -293,7 +312,7 @@ def generate_model(in_path, fitting_step=50):
293
  # for highly occluded body, reply only on high-confidence landmarks, no silhouette+normal loss
294
 
295
  # BUG: PyTorch3D silhouette renderer generates dilated mask
296
- bg_value = in_tensor["T_normal_F"][0, 0, 0, 0]
297
  smpl_arr_fake = torch.cat([
298
  in_tensor["T_normal_F"][:, 0].ne(bg_value).float(),
299
  in_tensor["T_normal_B"][:, 0].ne(bg_value).float()
@@ -329,6 +348,7 @@ def generate_model(in_path, fitting_step=50):
329
  occlude_str = ''.join([str(j) for j in body_overlap_flag.int().tolist()])
330
  pbar_desc += colored(f"| loose:{loose_str}, occluded:{occlude_str}", "yellow")
331
  loop_smpl.set_description(pbar_desc)
 
332
 
333
  # save intermediate results
334
  if (i == fitting_step - 1):
@@ -611,7 +631,7 @@ def generate_model(in_path, fitting_step=50):
611
  cfg.bni.poisson_depth,
612
  )
613
  print(
614
- colored(f"\n Poisson completion to {Format.start} {final_path} {Format.end}", "yellow")
615
  )
616
 
617
  dataset.render.load_meshes(final_mesh.vertices, final_mesh.faces)
 
30
  from pytorch3d.ops import SubdivideMeshes
31
  from huggingface_hub import hf_hub_download
32
  from termcolor import colored
33
+ from tqdm import tqdm
34
 
35
  from apps.IFGeo import IFGeo
36
  from apps.Normal import Normal
 
65
 
66
  render.load_meshes(verts_lst, faces_lst)
67
  render.get_rendered_video_multi(in_tensor, tmp_path)
68
+
69
  os.system(f"ffmpeg -y -loglevel quiet -stats -i {tmp_path} -vcodec libx264 {out_path}")
70
+
71
  return out_path
72
 
73
+ import sys
74
+ class Logger:
75
+ def __init__(self, filename):
76
+ self.terminal = sys.stdout
77
+ self.log = open(filename, "w")
78
+
79
+ def write(self, message):
80
+ self.terminal.write(message)
81
+ self.log.write(message)
82
+
83
+ def flush(self):
84
+ self.terminal.flush()
85
+ self.log.flush()
86
+
87
+ def isatty(self):
88
+ return False
89
+
90
  def generate_model(in_path, fitting_step=50):
91
+
92
+ sys.stdout = Logger("./output.log")
93
+
94
  out_dir = "./results"
95
 
96
  # cfg read and merge
 
119
  normal_net.netG.eval()
120
  print(
121
  colored(
122
+ f"Resume Normal Estimator from : {cfg.normal_path} ", "green"
123
  )
124
  )
125
 
 
147
  ifnet = ifnet.to(device)
148
  ifnet.netG.eval()
149
 
150
+ print(colored(f"Resume IF-Net+ from : {cfg.ifnet_path} ", "green"))
151
+ print(colored(f"Complete with : IF-Nets+ (Implicit) ", "green"))
152
  else:
153
+ print(colored(f"Complete with : SMPL-X (Explicit) ", "green"))
154
 
155
  dataset = TestDataset(dataset_param, device)
156
 
 
160
 
161
  losses = init_loss()
162
 
163
+ print(f"Subject name: {data['name']}")
164
 
165
  # final results rendered as image (PNG)
166
  # 1. Render the final fitted SMPL (xxx_smpl.png)
 
279
 
280
  ghum_lmks = data["landmark"][:, SMPLX_object.ghum_smpl_pairs[:, 0], :2].to(device)
281
  ghum_conf = data["landmark"][:, SMPLX_object.ghum_smpl_pairs[:, 0], -1].to(device)
282
+ smpl_lmks = smpl_joints_3d[:, SMPLX_object.ghum_smpl_pairs[:, 1], :2].to(device)
283
+
284
 
285
  # render optimized mesh as normal [-1,1]
286
  in_tensor["T_normal_F"], in_tensor["T_normal_B"] = dataset.render_normal(
 
312
  # for highly occluded body, reply only on high-confidence landmarks, no silhouette+normal loss
313
 
314
  # BUG: PyTorch3D silhouette renderer generates dilated mask
315
+ bg_value = in_tensor["T_normal_F"][0, 0, 0, 0].to(device)
316
  smpl_arr_fake = torch.cat([
317
  in_tensor["T_normal_F"][:, 0].ne(bg_value).float(),
318
  in_tensor["T_normal_B"][:, 0].ne(bg_value).float()
 
348
  occlude_str = ''.join([str(j) for j in body_overlap_flag.int().tolist()])
349
  pbar_desc += colored(f"| loose:{loose_str}, occluded:{occlude_str}", "yellow")
350
  loop_smpl.set_description(pbar_desc)
351
+ print(pbar_desc)
352
 
353
  # save intermediate results
354
  if (i == fitting_step - 1):
 
631
  cfg.bni.poisson_depth,
632
  )
633
  print(
634
+ colored(f"Poisson completion to : {final_path} ", "yellow")
635
  )
636
 
637
  dataset.render.load_meshes(final_mesh.vertices, final_mesh.faces)
gradio_cached_examples/13/log.csv DELETED
@@ -1,2 +0,0 @@
1
- flag,username,timestamp
2
- ,,2023-04-15 18:15:46.412679
 
 
 
gradio_cached_examples/25/log.csv DELETED
@@ -1,6 +0,0 @@
1
- flag,username,timestamp
2
- ,,2023-04-16 10:48:00.715491
3
- ,,2023-04-16 10:50:02.250539
4
- ,,2023-04-16 10:52:15.683112
5
- ,,2023-04-16 10:54:18.253116
6
- ,,2023-04-16 10:56:22.892765
 
 
 
 
 
 
 
lib/common/imutils.py CHANGED
@@ -193,7 +193,7 @@ def process_image(img_file, hps_type, single, input_res, detector):
193
  predictions = detector(img_square / 255.)[0]
194
 
195
  if single:
196
- top_score = predictions["scores"][predictions["labels"] == 1].max()
197
  human_ids = torch.where(predictions["scores"] == top_score)[0]
198
  else:
199
  human_ids = torch.logical_and(predictions["labels"] == 1,
 
193
  predictions = detector(img_square / 255.)[0]
194
 
195
  if single:
196
+ top_score = max(predictions["scores"][predictions["labels"] == 1])
197
  human_ids = torch.where(predictions["scores"] == top_score)[0]
198
  else:
199
  human_ids = torch.logical_and(predictions["labels"] == 1,
lib/common/libmesh/triangle_hash.cpp CHANGED
The diff for this file is too large to render. See raw diff
 
lib/common/libvoxelize/voxelize.c CHANGED
The diff for this file is too large to render. See raw diff
 
lib/common/local_affine.py CHANGED
@@ -138,6 +138,8 @@ def register(target_mesh, src_mesh, device, verbose=True):
138
  cloth_loss.backward(retain_graph=True)
139
  optimizer_cloth.step()
140
  scheduler_cloth.step(cloth_loss)
 
 
141
 
142
  final = trimesh.Trimesh(
143
  src_mesh.verts_packed().detach().squeeze(0).cpu(),
 
138
  cloth_loss.backward(retain_graph=True)
139
  optimizer_cloth.step()
140
  scheduler_cloth.step(cloth_loss)
141
+
142
+ print(pbar_desc)
143
 
144
  final = trimesh.Trimesh(
145
  src_mesh.verts_packed().detach().squeeze(0).cpu(),
lib/common/render.py CHANGED
@@ -16,6 +16,7 @@
16
 
17
  import math
18
  import os
 
19
 
20
  import cv2
21
  import numpy as np
@@ -318,7 +319,7 @@ class Render:
318
  )
319
 
320
  pbar = tqdm(range(len(self.meshes)))
321
- pbar.set_description(colored(f"Normal Rendering {os.path.basename(save_path)}...", "blue"))
322
 
323
  mesh_renders = [] #[(N_cam, 3, res, res)*N_mesh]
324
 
@@ -343,10 +344,10 @@ class Render:
343
  )[..., :3].permute(0, 3, 1, 2)
344
  )
345
  mesh_renders.append(torch.cat(norm_lst).detach().cpu())
346
-
347
  # generate video frame by frame
348
  pbar = tqdm(range(len(self.cam_pos["around"])))
349
- pbar.set_description(colored(f"Video Exporting {os.path.basename(save_path)}...", "blue"))
350
 
351
  for cam_id in pbar:
352
  img_raw = data["img_raw"]
 
16
 
17
  import math
18
  import os
19
+ import sys
20
 
21
  import cv2
22
  import numpy as np
 
319
  )
320
 
321
  pbar = tqdm(range(len(self.meshes)))
322
+ print(colored(f"Normal Rendering {os.path.basename(save_path)}...", "blue"))
323
 
324
  mesh_renders = [] #[(N_cam, 3, res, res)*N_mesh]
325
 
 
344
  )[..., :3].permute(0, 3, 1, 2)
345
  )
346
  mesh_renders.append(torch.cat(norm_lst).detach().cpu())
347
+
348
  # generate video frame by frame
349
  pbar = tqdm(range(len(self.cam_pos["around"])))
350
+ print(colored(f"Video Exporting {os.path.basename(save_path)}...", "blue"))
351
 
352
  for cam_id in pbar:
353
  img_raw = data["img_raw"]
lib/dataset/TestDataset.py CHANGED
@@ -81,7 +81,7 @@ class TestDataset:
81
 
82
  print(
83
  colored(
84
- f"SMPL-X estimate with {Format.start} {self.hps_type.upper()} {Format.end}", "green"
85
  )
86
  )
87
 
 
81
 
82
  print(
83
  colored(
84
+ f"SMPL-X estimate with {self.hps_type.upper()}", "green"
85
  )
86
  )
87
 
lib/pymafx/utils/sample_mesh.py DELETED
@@ -1,66 +0,0 @@
1
- import os
2
-
3
- import numpy as np
4
- import trimesh
5
-
6
- from .utils.libmesh import check_mesh_contains
7
-
8
-
9
- def get_occ_gt(
10
- in_path=None,
11
- vertices=None,
12
- faces=None,
13
- pts_num=1000,
14
- points_sigma=0.01,
15
- with_dp=False,
16
- points=None,
17
- extra_points=None
18
- ):
19
- if in_path is not None:
20
- mesh = trimesh.load(in_path, process=False)
21
- print(type(mesh.vertices), mesh.vertices.shape, mesh.faces.shape)
22
-
23
- mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
24
-
25
- # print('get_occ_gt', type(mesh.vertices), mesh.vertices.shape, mesh.faces.shape)
26
-
27
- # points_size = 100000
28
- points_padding = 0.1
29
- # points_sigma = 0.01
30
- points_uniform_ratio = 0.5
31
- n_points_uniform = int(pts_num * points_uniform_ratio)
32
- n_points_surface = pts_num - n_points_uniform
33
-
34
- if points is None:
35
- points_scale = 2.0
36
- boxsize = points_scale + points_padding
37
- points_uniform = np.random.rand(n_points_uniform, 3)
38
- points_uniform = boxsize * (points_uniform - 0.5)
39
- points_surface, index_surface = mesh.sample(n_points_surface, return_index=True)
40
- points_surface += points_sigma * np.random.randn(n_points_surface, 3)
41
- points = np.concatenate([points_uniform, points_surface], axis=0)
42
-
43
- if extra_points is not None:
44
- extra_points += points_sigma * np.random.randn(len(extra_points), 3)
45
- points = np.concatenate([points, extra_points], axis=0)
46
-
47
- occupancies = check_mesh_contains(mesh, points)
48
-
49
- index_surface = None
50
-
51
- # points = points.astype(dtype)
52
-
53
- # print('occupancies', occupancies.dtype, np.sum(occupancies), occupancies.shape)
54
- # occupancies = np.packbits(occupancies)
55
- # print('occupancies bit', occupancies.dtype, np.sum(occupancies), occupancies.shape)
56
-
57
- # print('occupancies', points.shape, occupancies.shape, occupancies.dtype, np.sum(occupancies), index_surface.shape)
58
-
59
- return_dict = {}
60
- return_dict['points'] = points
61
- return_dict['points.occ'] = occupancies
62
- return_dict['sf_sidx'] = index_surface
63
-
64
- # export_pointcloud(mesh, modelname, loc, scale, args)
65
- # export_points(mesh, modelname, loc, scale, args)
66
- return return_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
output.log ADDED
@@ -0,0 +1 @@
 
 
1
+
requirements.txt CHANGED
@@ -27,6 +27,6 @@ transformers
27
  controlnet_aux
28
  xformers==0.0.16
29
  triton
 
 
30
  git+https://github.com/YuliangXiu/rembg.git
31
- git+https://github.com/huggingface/diffusers.git
32
- git+https://github.com/huggingface/accelerate.git
 
27
  controlnet_aux
28
  xformers==0.0.16
29
  triton
30
+ diffusers
31
+ accelerate
32
  git+https://github.com/YuliangXiu/rembg.git