Yuliang commited on
Commit
cd9b314
1 Parent(s): df6cc56

gradio done

Browse files
Files changed (5) hide show
  1. .gitattributes +0 -0
  2. README.md +5 -3
  3. app.py +20 -10
  4. packages.txt +0 -2
  5. requirements.txt +1 -1
.gitattributes ADDED
File without changes
README.md CHANGED
@@ -1,5 +1,6 @@
1
- title: Fully-textured Clothed Human Digitization (ECON + TEXTure)
2
- metaTitle: Avatarify yourself from single image, by Yuliang Xiu
 
3
  emoji: 🤼
4
  colorFrom: green
5
  colorTo: pink
@@ -7,4 +8,5 @@ sdk: gradio
7
  sdk_version: 3.27.0
8
  app_file: app.py
9
  pinned: true
10
- python_version: 3.8.15
 
1
+ ---
2
+ title: Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet)
3
+ metaTitle: Avatarify from Photo
4
  emoji: 🤼
5
  colorFrom: green
6
  colorTo: pink
8
  sdk_version: 3.27.0
9
  app_file: app.py
10
  pinned: true
11
+ python_version: 3.8.15
12
+ ---
app.py CHANGED
@@ -6,19 +6,28 @@ import os
6
 
7
  import subprocess
8
 
 
 
9
  if os.getenv('SYSTEM') == 'spaces':
10
  # subprocess.run('pip install pyembree'.split())
11
  subprocess.run(
12
  'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu116_pyt1130/download.html'
13
  .split()
14
  )
 
 
 
 
 
 
 
15
 
16
  from apps.infer import generate_model, generate_video
17
 
18
  # running
19
 
20
  description = '''
21
- # Fully-textured Clothed Human Digitization (ECON + ControlNet)
22
  ### ECON: Explicit Clothed humans Optimized via Normal integration (CVPR 2023, Highlight)
23
 
24
  <table>
@@ -149,7 +158,6 @@ pipe.enable_xformers_memory_efficient_attention()
149
  # Generator seed,
150
  generator = torch.manual_seed(0)
151
 
152
-
153
  hint_prompts = '''
154
  <strong>Hints</strong>: <br>
155
  best quality, extremely detailed, solid color background,
@@ -158,6 +166,7 @@ light and dark contrast, 8k, high detail, edge lighting,
158
  3d, c4d, blender, oc renderer, ultra high definition, 3d rendering
159
  '''
160
 
 
161
  def get_pose(image):
162
  return pose_model(image)
163
 
@@ -165,9 +174,9 @@ def get_pose(image):
165
  # def generate_texture(input_shape, text, seed, guidance_scale):
166
  # iface = gr.Interface.load("spaces/TEXTurePaper/TEXTure")
167
  # output_shape = iface(input_shape, text, seed, guidance_scale)
168
- # return output_shape
169
-
170
-
171
  def generate_images(image, prompt, image_file_live_opt='file', live_conditioning=None):
172
  if image is None and 'image' not in live_conditioning:
173
  raise gr.Error("Please provide an image")
@@ -222,7 +231,9 @@ with gr.Blocks() as demo:
222
  label="How would you like to upload your image?")
223
 
224
  with gr.Row():
225
- image_in_img = gr.Image(source="upload", visible=True, type="pil", label="Image for Pose")
 
 
226
  canvas = gr.HTML(None, elem_id="canvas_html", visible=False)
227
 
228
  image_file_live_opt.change(
@@ -234,12 +245,11 @@ with gr.Blocks() as demo:
234
  prompt = gr.Textbox(
235
  label="Enter your prompt to synthesise the image",
236
  max_lines=10,
237
- placeholder=
238
- "best quality, extremely detailed",
239
  )
240
-
241
  gr.Markdown(hint_prompts)
242
-
243
  with gr.Column():
244
  gallery = gr.Gallery().style(grid=[2], height="auto")
245
  gallery_cache = gr.State()
6
 
7
  import subprocess
8
 
9
+ curr_dir = os.path.dirname(__file__)
10
+
11
  if os.getenv('SYSTEM') == 'spaces':
12
  # subprocess.run('pip install pyembree'.split())
13
  subprocess.run(
14
  'pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu116_pyt1130/download.html'
15
  .split()
16
  )
17
+ subprocess.run(
18
+ f"cd {curr_dir}/lib/common/libmesh && python setup.py build_ext --inplace".split()
19
+ )
20
+ subprocess.run(
21
+ f"cd {curr_dir}/lib/common/libvoxelize && python setup.py build_ext --inplace".split()
22
+ )
23
+ subprocess.run(f"cd {curr_dir}".split())
24
 
25
  from apps.infer import generate_model, generate_video
26
 
27
  # running
28
 
29
  description = '''
30
+ # Unconstrained & Detailed Clothed Human Digitization (ECON + ControlNet)
31
  ### ECON: Explicit Clothed humans Optimized via Normal integration (CVPR 2023, Highlight)
32
 
33
  <table>
158
  # Generator seed,
159
  generator = torch.manual_seed(0)
160
 
 
161
  hint_prompts = '''
162
  <strong>Hints</strong>: <br>
163
  best quality, extremely detailed, solid color background,
166
  3d, c4d, blender, oc renderer, ultra high definition, 3d rendering
167
  '''
168
 
169
+
170
  def get_pose(image):
171
  return pose_model(image)
172
 
174
  # def generate_texture(input_shape, text, seed, guidance_scale):
175
  # iface = gr.Interface.load("spaces/TEXTurePaper/TEXTure")
176
  # output_shape = iface(input_shape, text, seed, guidance_scale)
177
+ # return output_shape
178
+
179
+
180
  def generate_images(image, prompt, image_file_live_opt='file', live_conditioning=None):
181
  if image is None and 'image' not in live_conditioning:
182
  raise gr.Error("Please provide an image")
231
  label="How would you like to upload your image?")
232
 
233
  with gr.Row():
234
+ image_in_img = gr.Image(
235
+ source="upload", visible=True, type="pil", label="Image for Pose"
236
+ )
237
  canvas = gr.HTML(None, elem_id="canvas_html", visible=False)
238
 
239
  image_file_live_opt.change(
245
  prompt = gr.Textbox(
246
  label="Enter your prompt to synthesise the image",
247
  max_lines=10,
248
+ placeholder="best quality, extremely detailed",
 
249
  )
250
+
251
  gr.Markdown(hint_prompts)
252
+
253
  with gr.Column():
254
  gallery = gr.Gallery().style(grid=[2], height="auto")
255
  gallery_cache = gr.State()
packages.txt CHANGED
@@ -8,6 +8,4 @@ libgl1-mesa-dri
8
  libegl1-mesa
9
  libgbm1
10
  build-essential
11
- python-wheel
12
- libturbojpeg
13
  libeigen3-dev
8
  libegl1-mesa
9
  libgbm1
10
  build-essential
 
 
11
  libeigen3-dev
requirements.txt CHANGED
@@ -4,7 +4,7 @@ torchvision==0.14.1+cu116
4
  fvcore
5
  iopath
6
  pyembree
7
- cupy
8
  cython
9
  matplotlib
10
  scikit-image
4
  fvcore
5
  iopath
6
  pyembree
7
+ cupy-cuda11x
8
  cython
9
  matplotlib
10
  scikit-image