xunsong.li commited on
Commit
172285b
β€’
1 Parent(s): ae534d1

update readme

Browse files

fix download

partial download

fix

fix

fix

fix

fix

fix

fix

update

update

Update app.py

Update app.py

Update app.py

Files changed (3) hide show
  1. README.md +11 -0
  2. app.py +23 -8
  3. src/utils/download_models.py +40 -0
README.md CHANGED
@@ -1,3 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
1
  # πŸ€— Introduction
2
 
3
  This repository reproduces [AnimateAnyone](https://github.com/HumanAIGC/AnimateAnyone). To align the results demonstrated by the original paper, we adopt various approaches and tricks, which may differ somewhat from the paper and another [implementation](https://github.com/guoqincode/Open-AnimateAnyone).
 
1
+ ---
2
+ title: Moore AnimateAnyone
3
+ emoji: πŸƒ
4
+ colorFrom: indigo
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.14.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
  # πŸ€— Introduction
13
 
14
  This repository reproduces [AnimateAnyone](https://github.com/HumanAIGC/AnimateAnyone). To align the results demonstrated by the original paper, we adopt various approaches and tricks, which may differ somewhat from the paper and another [implementation](https://github.com/guoqincode/Open-AnimateAnyone).
app.py CHANGED
@@ -7,7 +7,7 @@ import numpy as np
7
  import torch
8
  from diffusers import AutoencoderKL, DDIMScheduler
9
  from einops import repeat
10
- from huggingface_hub import snapshot_download
11
  from omegaconf import OmegaConf
12
  from PIL import Image
13
  from torchvision import transforms
@@ -17,16 +17,26 @@ from src.models.pose_guider import PoseGuider
17
  from src.models.unet_2d_condition import UNet2DConditionModel
18
  from src.models.unet_3d import UNet3DConditionModel
19
  from src.pipelines.pipeline_pose2vid_long import Pose2VideoPipeline
 
20
  from src.utils.util import get_fps, read_frames, save_videos_grid
21
 
 
 
 
 
22
  snapshot_download(
23
- repo_id="runwayml/stable-diffusion-v1-5",
24
- local_dir="./pretrained_weights/stable-diffusion-v1-5",
25
  )
26
  snapshot_download(
27
- repo_id="stabilityai/sd-vae-ft-mse", local_dir="./pretrained_weights/sd-vae-ft-mse"
 
28
  )
29
- snapshot_download(repo_id="patrolli/AnimateAnyone", local_dir="./pretrained_weights")
 
 
 
 
 
30
 
31
 
32
  class AnimateController:
@@ -226,9 +236,13 @@ def ui():
226
  return Image.fromarray(image)
227
 
228
  # when user uploads a new video
229
- motion_sequence.upload(read_video, motion_sequence, motion_sequence)
 
 
230
  # when `first_frame` is updated
231
- reference_image.upload(read_image, reference_image, reference_image)
 
 
232
  # when the `submit` button is clicked
233
  submit.click(
234
  controller.animate,
@@ -270,4 +284,5 @@ def ui():
270
 
271
 
272
  demo = ui()
273
- demo.launch(share=True)
 
 
7
  import torch
8
  from diffusers import AutoencoderKL, DDIMScheduler
9
  from einops import repeat
10
+ from huggingface_hub import hf_hub_download, snapshot_download
11
  from omegaconf import OmegaConf
12
  from PIL import Image
13
  from torchvision import transforms
 
17
  from src.models.unet_2d_condition import UNet2DConditionModel
18
  from src.models.unet_3d import UNet3DConditionModel
19
  from src.pipelines.pipeline_pose2vid_long import Pose2VideoPipeline
20
+ from src.utils.download_models import prepare_base_model, prepare_image_encoder
21
  from src.utils.util import get_fps, read_frames, save_videos_grid
22
 
23
+ # Partial download
24
+ prepare_base_model()
25
+ prepare_image_encoder()
26
+
27
  snapshot_download(
28
+ repo_id="stabilityai/sd-vae-ft-mse", local_dir="./pretrained_weights/sd-vae-ft-mse"
 
29
  )
30
  snapshot_download(
31
+ repo_id="patrolli/AnimateAnyone",
32
+ local_dir="./pretrained_weights",
33
  )
34
+ is_spaces = True if "SPACE_ID" in os.environ else False
35
+ true_for_shared_ui = False # This will be true only if you are in a shared UI
36
+ if is_spaces:
37
+ true_for_shared_ui = (
38
+ True if "xunsong/Moore-AnimateAnyone" in os.environ["SPACE_ID"] else False
39
+ )
40
 
41
 
42
  class AnimateController:
 
236
  return Image.fromarray(image)
237
 
238
  # when user uploads a new video
239
+ motion_sequence.upload(
240
+ read_video, motion_sequence, motion_sequence, queue=False
241
+ )
242
  # when `first_frame` is updated
243
+ reference_image.upload(
244
+ read_image, reference_image, reference_image, queue=False
245
+ )
246
  # when the `submit` button is clicked
247
  submit.click(
248
  controller.animate,
 
284
 
285
 
286
  demo = ui()
287
+ demo.queue(max_size=10)
288
+ demo.launch(share=True, show_api=False)
src/utils/download_models.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path, PurePosixPath
3
+
4
+ from huggingface_hub import hf_hub_download
5
+
6
+
7
+ def prepare_base_model():
8
+ local_dir = "./pretrained_weights/stable-diffusion-v1-5"
9
+ os.makedirs(local_dir, exist_ok=True)
10
+ for hub_file in ["unet/config.json", "unet/diffusion_pytorch_model.bin"]:
11
+ path = Path(hub_file)
12
+ saved_path = local_dir / path
13
+ if os.path.exists(saved_path):
14
+ continue
15
+ hf_hub_download(
16
+ repo_id="runwayml/stable-diffusion-v1-5",
17
+ subfolder=PurePosixPath(path.parent),
18
+ filename=PurePosixPath(path.name),
19
+ local_dir=local_dir,
20
+ )
21
+
22
+
23
+ def prepare_image_encoder():
24
+ local_dir = "./pretrained_weights"
25
+ os.makedirs(local_dir, exist_ok=True)
26
+ for hub_file in ["image_encoder/config.json", "image_encoder/pytorch_model.bin"]:
27
+ path = Path(hub_file)
28
+ saved_path = local_dir / path
29
+ if os.path.exists(saved_path):
30
+ continue
31
+ hf_hub_download(
32
+ repo_id="lambdalabs/sd-image-variations-diffusers",
33
+ subfolder=PurePosixPath(path.parent),
34
+ filename=PurePosixPath(path.name),
35
+ local_dir=local_dir,
36
+ )
37
+
38
+
39
+ def prepare_dwpose():
40
+ ...