bluestarburst commited on
Commit
5a57f66
1 Parent(s): a74fae4

Upload folder using huggingface_hub

Browse files
animatediff/models/__pycache__/unet.cpython-310.pyc CHANGED
Binary files a/animatediff/models/__pycache__/unet.cpython-310.pyc and b/animatediff/models/__pycache__/unet.cpython-310.pyc differ
 
animatediff/models/unet.py CHANGED
@@ -456,12 +456,15 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin):
456
  return UNet3DConditionOutput(sample=sample)
457
 
458
  @classmethod
459
- def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):
460
  if subfolder is not None:
461
  pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
462
  print(f"loaded temporal unet's pretrained weights from {pretrained_model_path} ...")
463
-
464
  config_file = os.path.join(pretrained_model_path, 'config.json')
 
 
 
 
465
  if not os.path.isfile(config_file):
466
  raise RuntimeError(f"{config_file} does not exist")
467
  with open(config_file, "r") as f:
@@ -482,7 +485,11 @@ class UNet3DConditionModel(ModelMixin, ConfigMixin):
482
 
483
  from diffusers.utils import WEIGHTS_NAME
484
  model = cls.from_config(config, **unet_additional_kwargs)
485
- model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
 
 
 
 
486
  if not os.path.isfile(model_file):
487
  raise RuntimeError(f"{model_file} does not exist")
488
  state_dict = torch.load(model_file, map_location="cpu")
 
456
  return UNet3DConditionOutput(sample=sample)
457
 
458
  @classmethod
459
+ def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None, config_path=None):
460
  if subfolder is not None:
461
  pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
462
  print(f"loaded temporal unet's pretrained weights from {pretrained_model_path} ...")
 
463
  config_file = os.path.join(pretrained_model_path, 'config.json')
464
+
465
+ if config_path is not None:
466
+ config_file = config_path
467
+
468
  if not os.path.isfile(config_file):
469
  raise RuntimeError(f"{config_file} does not exist")
470
  with open(config_file, "r") as f:
 
485
 
486
  from diffusers.utils import WEIGHTS_NAME
487
  model = cls.from_config(config, **unet_additional_kwargs)
488
+
489
+ if config_path is None:
490
+ model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
491
+ else:
492
+ model_file = pretrained_model_path
493
  if not os.path.isfile(model_file):
494
  raise RuntimeError(f"{model_file} does not exist")
495
  state_dict = torch.load(model_file, map_location="cpu")
handler.py CHANGED
@@ -3,7 +3,7 @@
3
  from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
4
  from transformers import CLIPTextModel, CLIPTokenizer
5
  from omegaconf import OmegaConf
6
- from huggingface_hub import hf_hub_download
7
 
8
  import os
9
 
@@ -21,8 +21,9 @@ from animatediff.utils.util import load_weights
21
  class EndpointHandler():
22
  def __init__(self, model_path: str = "bluestarburst/AnimateDiff-SceneFusion"):
23
 
24
- inference_config_path = "configs/inference/inference-v3.yaml"
25
- hf_hub_download(repo_id="bluestarburst/AnimateDiff-SceneFusion", filename="configs/inference/inference-v3.yaml")
 
26
 
27
  inference_config = OmegaConf.load(inference_config_path)
28
 
@@ -33,13 +34,12 @@ class EndpointHandler():
33
  text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="models/StableDiffusion/text_encoder")
34
  vae = AutoencoderKL.from_pretrained(model_path, subfolder="models/StableDiffusion/vae")
35
 
36
- if not os.path.isfile("models/StableDiffusion/unet/diffusion_pytorch_model.bin"):
37
- hf_hub_download(repo_id="bluestarburst/AnimateDiff-SceneFusion", filename="models/StableDiffusion/unet/config.json")
38
- hf_hub_download(repo_id="bluestarburst/AnimateDiff-SceneFusion", filename="models/StableDiffusion/unet/diffusion_pytorch_model.bin")
39
 
40
- unet_model_path = "models/StableDiffusion/unet"
41
 
42
- unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path=unet_model_path, unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs))
43
 
44
  if is_xformers_available(): unet.enable_xformers_memory_efficient_attention()
45
  else: assert False
 
3
  from diffusers import AutoencoderKL, DDPMScheduler, DDIMScheduler
4
  from transformers import CLIPTextModel, CLIPTokenizer
5
  from omegaconf import OmegaConf
6
+ from huggingface_hub import hf_hub_download, try_to_load_from_cache
7
 
8
  import os
9
 
 
21
  class EndpointHandler():
22
  def __init__(self, model_path: str = "bluestarburst/AnimateDiff-SceneFusion"):
23
 
24
+ # inference_config_path = "configs/inference/inference-v3.yaml"
25
+ inference_config_path = hf_hub_download(repo_id="bluestarburst/AnimateDiff-SceneFusion", filename="configs/inference/inference-v3.yaml")
26
+ print(inference_config_path)
27
 
28
  inference_config = OmegaConf.load(inference_config_path)
29
 
 
34
  text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="models/StableDiffusion/text_encoder")
35
  vae = AutoencoderKL.from_pretrained(model_path, subfolder="models/StableDiffusion/vae")
36
 
37
+ unet_model_path = hf_hub_download(repo_id="bluestarburst/AnimateDiff-SceneFusion", filename="models/StableDiffusion/unet/diffusion_pytorch_model.bin")
38
+ unet_config_path = hf_hub_download(repo_id="bluestarburst/AnimateDiff-SceneFusion", filename="models/StableDiffusion/unet/config.json")
 
39
 
40
+ print(unet_model_path)
41
 
42
+ unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path=unet_model_path, unet_additional_kwargs=OmegaConf.to_container(inference_config.unet_additional_kwargs), config_path=unet_config_path)
43
 
44
  if is_xformers_available(): unet.enable_xformers_memory_efficient_attention()
45
  else: assert False