add duplicate spaces badge

#1
by akhaliq HF staff - opened
Files changed (7) hide show
  1. .gitignore +0 -1
  2. Dockerfile +2 -2
  3. README.md +0 -1
  4. app.py +18 -21
  5. model.py +5 -36
  6. patch +1 -1
  7. requirements.txt +10 -11
.gitignore CHANGED
@@ -1,6 +1,5 @@
1
  cache/
2
  experiments/
3
- *.zip
4
 
5
  # Byte-compiled / optimized / DLL files
6
  __pycache__/
 
1
  cache/
2
  experiments/
 
3
 
4
  # Byte-compiled / optimized / DLL files
5
  __pycache__/
Dockerfile CHANGED
@@ -35,7 +35,7 @@ WORKDIR ${HOME}/app
35
 
36
  RUN curl https://pyenv.run | bash
37
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
38
- ARG PYTHON_VERSION=3.9.16
39
  RUN pyenv install ${PYTHON_VERSION} && \
40
  pyenv global ${PYTHON_VERSION} && \
41
  pyenv rehash && \
@@ -45,7 +45,7 @@ RUN pip install --no-cache-dir -U torch==1.12.1+cu113 torchvision==0.13.1+cu113
45
  COPY --chown=1000 requirements.txt /tmp
46
  RUN pip install --no-cache-dir -r /tmp/requirements.txt
47
  RUN pip install --no-cache-dir -U kaolin==0.13.0 -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-1.12.1_cu113.html
48
- RUN pip install --no-cache-dir -U gradio==3.34.0
49
 
50
  COPY --chown=1000 . ${HOME}/app
51
  RUN cd TEXTurePaper && patch -p1 < ../patch
 
35
 
36
  RUN curl https://pyenv.run | bash
37
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
38
+ ENV PYTHON_VERSION=3.9.16
39
  RUN pyenv install ${PYTHON_VERSION} && \
40
  pyenv global ${PYTHON_VERSION} && \
41
  pyenv rehash && \
 
45
  COPY --chown=1000 requirements.txt /tmp
46
  RUN pip install --no-cache-dir -r /tmp/requirements.txt
47
  RUN pip install --no-cache-dir -U kaolin==0.13.0 -f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-1.12.1_cu113.html
48
+ RUN pip install --no-cache-dir -U gradio==3.17.1
49
 
50
  COPY --chown=1000 . ${HOME}/app
51
  RUN cd TEXTurePaper && patch -p1 < ../patch
README.md CHANGED
@@ -6,7 +6,6 @@ colorTo: red
6
  sdk: docker
7
  pinned: false
8
  license: mit
9
- suggested_hardware: a10g-small
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
6
  sdk: docker
7
  pinned: false
8
  license: mit
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -2,24 +2,30 @@
2
 
3
  from __future__ import annotations
4
 
5
- import os
6
-
7
  import gradio as gr
8
 
9
  from model import Model
10
 
11
- DESCRIPTION = '''# [TEXTure](https://github.com/TEXTurePaper/TEXTurePaper)
 
 
12
 
13
- - This demo only accepts as input `.obj` files with less than 100,000 faces.
14
- - Inference takes about 10 minutes on a T4 GPU.
 
15
  '''
16
- if (SPACE_ID := os.getenv('SPACE_ID')) is not None:
17
- DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
18
 
19
  model = Model()
20
 
21
  with gr.Blocks(css='style.css') as demo:
22
  gr.Markdown(DESCRIPTION)
 
 
 
 
 
 
 
23
  with gr.Row():
24
  with gr.Column():
25
  input_shape = gr.Model3D(label='Input 3D mesh')
@@ -36,15 +42,8 @@ with gr.Blocks(css='style.css') as demo:
36
  step=0.1)
37
  run_button = gr.Button('Run')
38
  with gr.Column():
39
- progress_text = gr.Text(label='Progress')
40
- with gr.Tabs():
41
- with gr.TabItem(label='Images from each viewpoint'):
42
- viewpoint_images = gr.Gallery(show_label=False).style(
43
- columns=4, height='auto')
44
- with gr.TabItem(label='Result 3D model'):
45
- result_3d_model = gr.Model3D(show_label=False)
46
- with gr.TabItem(label='Output mesh file'):
47
- output_file = gr.File(show_label=False)
48
  with gr.Row():
49
  examples = [
50
  ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5],
@@ -61,7 +60,7 @@ with gr.Blocks(css='style.css') as demo:
61
  guidance_scale,
62
  ],
63
  outputs=[
64
- result_3d_model,
65
  output_file,
66
  ],
67
  cache_examples=False)
@@ -74,10 +73,8 @@ with gr.Blocks(css='style.css') as demo:
74
  guidance_scale,
75
  ],
76
  outputs=[
77
- viewpoint_images,
78
- result_3d_model,
79
  output_file,
80
- progress_text,
81
  ])
82
 
83
- demo.queue(max_size=5).launch(debug=True)
 
2
 
3
  from __future__ import annotations
4
 
 
 
5
  import gradio as gr
6
 
7
  from model import Model
8
 
9
+ DESCRIPTION = '''# TEXTure
10
+
11
+ This is an unofficial demo for [https://github.com/TEXTurePaper/TEXTurePaper](https://github.com/TEXTurePaper/TEXTurePaper).
12
 
13
+ This demo only accepts as input `.obj` files with less than 100,000 faces.
14
+
15
+ Inference takes about 10 minutes on a T4 GPU.
16
  '''
 
 
17
 
18
  model = Model()
19
 
20
  with gr.Blocks(css='style.css') as demo:
21
  gr.Markdown(DESCRIPTION)
22
+
23
+ gr.HTML("""
24
+ <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
25
+ <br/>
26
+ <a href="https://huggingface.co/spaces/hysts/TEXTure?duplicate=true">
27
+ <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
28
+ <p/>""")
29
  with gr.Row():
30
  with gr.Column():
31
  input_shape = gr.Model3D(label='Input 3D mesh')
 
42
  step=0.1)
43
  run_button = gr.Button('Run')
44
  with gr.Column():
45
+ result = gr.Video(label='Result')
46
+ output_file = gr.File(label='Output mesh file')
 
 
 
 
 
 
 
47
  with gr.Row():
48
  examples = [
49
  ['shapes/dragon1.obj', 'a photo of a dragon', 0, 7.5],
 
60
  guidance_scale,
61
  ],
62
  outputs=[
63
+ result,
64
  output_file,
65
  ],
66
  cache_examples=False)
 
73
  guidance_scale,
74
  ],
75
  outputs=[
76
+ result,
 
77
  output_file,
 
78
  ])
79
 
80
+ demo.queue(max_size=5).launch()
model.py CHANGED
@@ -5,10 +5,8 @@ import pathlib
5
  import shlex
6
  import subprocess
7
  import sys
8
- from typing import Generator, Optional
9
 
10
  import gradio as gr
11
- import trimesh
12
 
13
  sys.path.append('TEXTurePaper')
14
 
@@ -50,10 +48,8 @@ class Model:
50
  subprocess.run(shlex.split(f'zip -r {out_path} {mesh_dir}'))
51
  return out_path
52
 
53
- def run(
54
- self, shape_path: str, text: str, seed: int, guidance_scale: float
55
- ) -> Generator[tuple[list[str], Optional[str], Optional[str], str], None,
56
- None]:
57
  if not shape_path.endswith('.obj'):
58
  raise gr.Error('The input file is not .obj file.')
59
  if not self.check_num_faces(shape_path):
@@ -61,34 +57,7 @@ class Model:
61
 
62
  config = self.load_config(shape_path, text, seed, guidance_scale)
63
  trainer = TEXTure(config)
64
-
65
- trainer.mesh_model.train()
66
-
67
- total_steps = len(trainer.dataloaders['train'])
68
- for step, data in enumerate(trainer.dataloaders['train'], start=1):
69
- trainer.paint_step += 1
70
- trainer.paint_viewpoint(data)
71
- trainer.evaluate(trainer.dataloaders['val'],
72
- trainer.eval_renders_path)
73
- trainer.mesh_model.train()
74
-
75
- sample_image_dir = config.log.exp_dir / 'vis' / 'eval'
76
- sample_image_paths = sorted(
77
- sample_image_dir.glob(f'step_{trainer.paint_step:05d}_*.jpg'))
78
- sample_image_paths = [
79
- path.as_posix() for path in sample_image_paths
80
- ]
81
- yield sample_image_paths, None, None, f'{step}/{total_steps}'
82
-
83
- trainer.mesh_model.change_default_to_median()
84
-
85
- save_dir = trainer.exp_path / 'mesh'
86
- save_dir.mkdir(exist_ok=True, parents=True)
87
- trainer.mesh_model.export_mesh(save_dir)
88
- model_path = save_dir / 'mesh.obj'
89
- mesh = trimesh.load(model_path)
90
- mesh_path = save_dir / 'mesh.glb'
91
- mesh.export(mesh_path, file_type='glb')
92
-
93
  zip_path = self.zip_results(config.log.exp_dir)
94
- yield sample_image_paths, mesh_path.as_posix(), zip_path, 'Done!'
 
5
  import shlex
6
  import subprocess
7
  import sys
 
8
 
9
  import gradio as gr
 
10
 
11
  sys.path.append('TEXTurePaper')
12
 
 
48
  subprocess.run(shlex.split(f'zip -r {out_path} {mesh_dir}'))
49
  return out_path
50
 
51
+ def run(self, shape_path: str, text: str, seed: int,
52
+ guidance_scale: float) -> tuple[str, str]:
 
 
53
  if not shape_path.endswith('.obj'):
54
  raise gr.Error('The input file is not .obj file.')
55
  if not self.check_num_faces(shape_path):
 
57
 
58
  config = self.load_config(shape_path, text, seed, guidance_scale)
59
  trainer = TEXTure(config)
60
+ trainer.paint()
61
+ video_path = config.log.exp_dir / 'results' / 'step_00010_rgb.mp4'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  zip_path = self.zip_results(config.log.exp_dir)
63
+ return video_path.as_posix(), zip_path
patch CHANGED
@@ -28,7 +28,7 @@ index 610d2de..e172080 100644
28
  - with open('./TOKEN', 'r') as f:
29
  - self.token = f.read().replace('\n', '') # remove the last \n!
30
  - logger.info(f'loaded hugging face access token from ./TOKEN!')
31
- + self.token = os.getenv('HF_TOKEN')
32
  except FileNotFoundError as e:
33
  self.token = True
34
  logger.warning(
 
28
  - with open('./TOKEN', 'r') as f:
29
  - self.token = f.read().replace('\n', '') # remove the last \n!
30
  - logger.info(f'loaded hugging face access token from ./TOKEN!')
31
+ + self.token = os.environ['HF_TOKEN']
32
  except FileNotFoundError as e:
33
  self.token = True
34
  logger.warning(
requirements.txt CHANGED
@@ -1,16 +1,15 @@
1
- accelerate==0.19.0
2
- diffusers==0.16.1
3
- einops==0.6.1
4
- huggingface-hub==0.15.1
5
- imageio[ffmpeg,pyav]==2.28.1
6
- loguru==0.7.0
7
- matplotlib==3.7.1
8
  ninja==1.11.1
9
- opencv-python-headless==4.7.0.72
10
  pyrallis==0.3.1
11
  torch==1.12.1
12
  torchvision==0.13.1
13
- tqdm==4.65.0
14
- transformers==4.29.1
15
- trimesh==3.21.6
16
  xatlas==0.0.7
 
1
+ accelerate==0.16.0
2
+ diffusers==0.12.1
3
+ einops==0.6.0
4
+ huggingface-hub==0.12.0
5
+ imageio[ffmpeg,pyav]==2.25.0
6
+ loguru==0.6.0
7
+ matplotlib==3.6.3
8
  ninja==1.11.1
9
+ opencv-python-headless==4.7.0.68
10
  pyrallis==0.3.1
11
  torch==1.12.1
12
  torchvision==0.13.1
13
+ tqdm==4.64.1
14
+ transformers==4.26.0
 
15
  xatlas==0.0.7