chaoxu commited on
Commit
cf8ef65
β€’
1 Parent(s): 2354839

remove pip import

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +12 -9
  3. requirements.txt +1 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ“ΈπŸš€πŸŒŸ
4
  colorFrom: red
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.41.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: red
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 3.44.0
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
@@ -1,7 +1,8 @@
1
  import os, sys
 
2
  from huggingface_hub import snapshot_download
3
 
4
- is_local_run = False
5
 
6
  code_dir = snapshot_download("One-2-3-45/code") if not is_local_run else "../code" # , token=os.environ['TOKEN']
7
 
@@ -11,16 +12,18 @@ elev_est_dir = os.path.abspath(os.path.join(code_dir, "one2345_elev_est"))
11
  sys.path.append(elev_est_dir)
12
 
13
  if not is_local_run:
14
- import pip
15
- pip.main(['install', elev_est_dir])
16
  # export TORCH_CUDA_ARCH_LIST="7.0;7.2;8.0;8.6"
17
  # export IABN_FORCE_CUDA=1
18
  os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
19
  os.environ["IABN_FORCE_CUDA"] = "1"
20
  os.environ["FORCE_CUDA"] = "1"
21
- pip.main(["install", "inplace_abn"])
 
22
  # FORCE_CUDA=1 pip install --no-cache-dir git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0
23
- pip.main(["install", "--no-cache-dir", "git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0"])
 
24
 
25
  import shutil
26
  import torch
@@ -487,7 +490,7 @@ def run_demo(
487
  gr.Markdown(_DESCRIPTION)
488
 
489
  with gr.Row(variant='panel'):
490
- with gr.Column(scale=1.2):
491
  image_block = gr.Image(type='pil', image_mode='RGBA', height=290, label='Input image', tool=None)
492
 
493
  gr.Examples(
@@ -511,7 +514,7 @@ def run_demo(
511
  run_btn = gr.Button('Run Generation', variant='primary', interactive=False)
512
  guide_text = gr.Markdown(_USER_GUIDE, visible=True)
513
 
514
- with gr.Column(scale=.8):
515
  with gr.Row():
516
  bbox_block = gr.Image(type='pil', label="Bounding box", height=290, interactive=False)
517
  sam_block = gr.Image(type='pil', label="SAM output", interactive=False)
@@ -527,11 +530,11 @@ def run_demo(
527
  mesh_output = gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="One-2-3-45's Textured Mesh", elem_id="model-3d-out")
528
 
529
  with gr.Row(variant='panel'):
530
- with gr.Column(scale=0.85):
531
  elev_output = gr.Label(label='Estimated elevation (degree, w.r.t. the horizontal plane)')
532
  vis_output = gr.Plot(label='Camera poses of the input view (red) and predicted views (blue)', elem_id="plot-out")
533
 
534
- with gr.Column(scale=1.15):
535
  gr.Markdown('Predicted multi-view images')
536
  with gr.Row():
537
  view_1 = gr.Image(interactive=False, height=200, show_label=False)
 
1
  import os, sys
2
+ import subprocess
3
  from huggingface_hub import snapshot_download
4
 
5
+ is_local_run = True
6
 
7
  code_dir = snapshot_download("One-2-3-45/code") if not is_local_run else "../code" # , token=os.environ['TOKEN']
8
 
 
12
  sys.path.append(elev_est_dir)
13
 
14
  if not is_local_run:
15
+ # import pip
16
+ # pip.main(['install', elev_est_dir])
17
  # export TORCH_CUDA_ARCH_LIST="7.0;7.2;8.0;8.6"
18
  # export IABN_FORCE_CUDA=1
19
  os.environ["TORCH_CUDA_ARCH_LIST"] = "8.0;8.6"
20
  os.environ["IABN_FORCE_CUDA"] = "1"
21
  os.environ["FORCE_CUDA"] = "1"
22
+ # pip.main(["install", "inplace_abn"])
23
+ subprocess.run(['pip', 'install', 'inplace_abn'])
24
  # FORCE_CUDA=1 pip install --no-cache-dir git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0
25
+ # pip.main(["install", "--no-cache-dir", "git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0"])
26
+ subprocess.run(['pip', 'install', '--no-cache-dir', 'git+https://github.com/mit-han-lab/torchsparse.git@v1.4.0'])
27
 
28
  import shutil
29
  import torch
 
490
  gr.Markdown(_DESCRIPTION)
491
 
492
  with gr.Row(variant='panel'):
493
+ with gr.Column(scale=6):
494
  image_block = gr.Image(type='pil', image_mode='RGBA', height=290, label='Input image', tool=None)
495
 
496
  gr.Examples(
 
514
  run_btn = gr.Button('Run Generation', variant='primary', interactive=False)
515
  guide_text = gr.Markdown(_USER_GUIDE, visible=True)
516
 
517
+ with gr.Column(scale=4):
518
  with gr.Row():
519
  bbox_block = gr.Image(type='pil', label="Bounding box", height=290, interactive=False)
520
  sam_block = gr.Image(type='pil', label="SAM output", interactive=False)
 
530
  mesh_output = gr.Model3D(clear_color=[0.0, 0.0, 0.0, 0.0], label="One-2-3-45's Textured Mesh", elem_id="model-3d-out")
531
 
532
  with gr.Row(variant='panel'):
533
+ with gr.Column(scale=85):
534
  elev_output = gr.Label(label='Estimated elevation (degree, w.r.t. the horizontal plane)')
535
  vis_output = gr.Plot(label='Camera poses of the input view (red) and predicted views (blue)', elem_id="plot-out")
536
 
537
+ with gr.Column(scale=115):
538
  gr.Markdown('Predicted multi-view images')
539
  with gr.Row():
540
  view_1 = gr.Image(interactive=False, height=200, show_label=False)
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  --extra-index-url https://download.pytorch.org/whl/cu118
2
- torch>=2.0.0
3
  torchvision
4
  albumentations>=0.4.3
5
  opencv-python>=4.5.5.64
 
1
  --extra-index-url https://download.pytorch.org/whl/cu118
2
+ torch==2.0.1
3
  torchvision
4
  albumentations>=0.4.3
5
  opencv-python>=4.5.5.64