Yuliang commited on
Commit
a3f1f2c
1 Parent(s): 2d5f249

change pytorch verision to avoid OOM error

Browse files
app.py CHANGED
@@ -14,7 +14,6 @@ if os.getenv('SYSTEM') == 'spaces':
14
  'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
15
  subprocess.run(
16
  'pip install git+https://github.com/YuliangXiu/kaolin.git'.split())
17
- # subprocess.run('pip install https://download.is.tue.mpg.de/icon/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split())
18
  subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split())
19
  subprocess.run(
20
  'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
@@ -73,7 +72,7 @@ def generate_image(seed, psi):
73
 
74
  random.seed(1993)
75
  model_types = ['icon-filter', 'pifu', 'pamir']
76
- examples = [[item, random.choice(model_types)] for item in sorted(glob.glob('examples/*.png'))]
77
 
78
  with gr.Blocks() as demo:
79
  gr.Markdown(description)
@@ -123,7 +122,7 @@ with gr.Blocks() as demo:
123
  if __name__ == "__main__":
124
 
125
  # demo.launch(debug=False, enable_queue=False,
126
- # auth=("icon@tue.mpg.de", "icon_2022"),
127
  # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
128
 
129
  demo.launch(debug=True, enable_queue=True)
 
14
  'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
15
  subprocess.run(
16
  'pip install git+https://github.com/YuliangXiu/kaolin.git'.split())
 
17
  subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split())
18
  subprocess.run(
19
  'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
 
72
 
73
  random.seed(1993)
74
  model_types = ['icon-filter', 'pifu', 'pamir']
75
+ examples = [[item, random.choice(model_types)] for item in random.sample(sorted(glob.glob('examples/*.png')), 4)]
76
 
77
  with gr.Blocks() as demo:
78
  gr.Markdown(description)
 
122
  if __name__ == "__main__":
123
 
124
  # demo.launch(debug=False, enable_queue=False,
125
+ # auth=(os.environ['USER'], os.environ['PASSWORD']),
126
  # auth_message="Register at icon.is.tue.mpg.de to get HuggingFace username and password.")
127
 
128
  demo.launch(debug=True, enable_queue=True)
apps/infer.py CHANGED
@@ -454,6 +454,7 @@ def generate_model(in_path, model_type):
454
  overlap_path = os.path.join(config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")
455
 
456
  torch.cuda.empty_cache()
 
457
  del model
458
  del dataset
459
  del local_affine_model
 
454
  overlap_path = os.path.join(config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")
455
 
456
  torch.cuda.empty_cache()
457
+
458
  del model
459
  del dataset
460
  del local_affine_model
examples/923d65f767c85a42212cae13fba3750b.png CHANGED

Git LFS Details

  • SHA256: 11310b5ef67f69d9efe7f00cced6e4e4a7c55ade2d928c3005ec102615d93ac0
  • Pointer size: 131 Bytes
  • Size of remote file: 616 kB

Git LFS Details

  • SHA256: 86f4eff6d64d036a91d193e2373a76fd6698b8a3cd8be01e65b96a742907838d
  • Pointer size: 131 Bytes
  • Size of remote file: 773 kB
examples/959c4c726a69901ce71b93a9242ed900.png CHANGED

Git LFS Details

  • SHA256: fc0b5e48a0cf3fbe664e2fcc54212167f8b973efdca74bbe1e8f5dd2ab23883e
  • Pointer size: 131 Bytes
  • Size of remote file: 476 kB

Git LFS Details

  • SHA256: d2b3933243b046b2c437a7dcc0f3da56f27d1787ff4931af37b1121e834da892
  • Pointer size: 131 Bytes
  • Size of remote file: 501 kB
lib/common/seg3d_lossless.py CHANGED
@@ -584,7 +584,7 @@ class Seg3dLossless(nn.Module):
584
 
585
  final = occupancys[1:, 1:, 1:].contiguous()
586
 
587
- if final.shape[0] > 256:
588
  # for voxelgrid larger than 256^3, the required GPU memory will be > 9GB
589
  # thus we use CPU marching_cube to avoid "CUDA out of memory"
590
  occu_arr = final.detach().cpu().numpy() # non-smooth surface
 
584
 
585
  final = occupancys[1:, 1:, 1:].contiguous()
586
 
587
+ if final.shape[0] >= 256:
588
  # for voxelgrid larger than 256^3, the required GPU memory will be > 9GB
589
  # thus we use CPU marching_cube to avoid "CUDA out of memory"
590
  occu_arr = final.detach().cpu().numpy() # non-smooth surface