bluestarburst commited on
Commit
eea7935
1 Parent(s): 8a7d8d8

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. handler.py +6 -3
  2. requirements.txt +0 -0
handler.py CHANGED
@@ -12,8 +12,8 @@ import torchvision
12
 
13
  import numpy as np
14
 
 
15
 
16
- from animatediff.pipelines.pipeline_animation import AnimationPipeline
17
  from animatediff.models.unet import UNet3DConditionModel
18
  from animatediff.pipelines.pipeline_animation import AnimationPipeline
19
  from animatediff.utils.util import save_videos_grid
@@ -23,7 +23,10 @@ from animatediff.utils.util import load_weights
23
  class EndpointHandler():
24
  def __init__(self, model_path: str = "models/StableDiffusion/", inference_config_path: str = "configs/inference/inference-v3.yaml", motion_module: str = "models/Motion_Module/mm_sd_v15.ckpt"):
25
 
26
- inference_config = OmegaConf.load(inference_config_path)
 
 
 
27
  ### >>> create validation pipeline >>> ###
28
  tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer")
29
  text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="text_encoder")
@@ -88,4 +91,4 @@ class EndpointHandler():
88
  # This function will be called during inference time.
89
 
90
 
91
- new_handler = EndpointHandler()
 
12
 
13
  import numpy as np
14
 
15
+ from diffusers import AutoPipelineForText2Image
16
 
 
17
  from animatediff.models.unet import UNet3DConditionModel
18
  from animatediff.pipelines.pipeline_animation import AnimationPipeline
19
  from animatediff.utils.util import save_videos_grid
 
23
  class EndpointHandler():
24
  def __init__(self, model_path: str = "models/StableDiffusion/", inference_config_path: str = "configs/inference/inference-v3.yaml", motion_module: str = "models/Motion_Module/mm_sd_v15.ckpt"):
25
 
26
+ # inference_config = OmegaConf.load(inference_config_path)
27
+
28
+ inference_config = {'unet_additional_kwargs': {'unet_use_cross_frame_attention': False, 'unet_use_temporal_attention': False, 'use_motion_module': True, 'motion_module_resolutions': [1, 2, 4, 8], 'motion_module_mid_block': False, 'motion_module_decoder_only': False, 'motion_module_type': 'Vanilla', 'motion_module_kwargs': {'num_attention_heads': 8, 'num_transformer_block': 1, 'attention_block_types': ['Temporal_Self', 'Temporal_Self'], 'temporal_position_encoding': True, 'temporal_position_encoding_max_len': 24, 'temporal_attention_dim_div': 1}}, 'noise_scheduler_kwargs': {'DDIMScheduler': {'num_train_timesteps': 1000, 'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'linear', 'steps_offset': 1, 'clip_sample': False}, 'EulerAncestralDiscreteScheduler': {'num_train_timesteps': 1000, 'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'linear'}, 'KDPM2AncestralDiscreteScheduler': {'num_train_timesteps': 1000, 'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'linear'}}}
29
+
30
  ### >>> create validation pipeline >>> ###
31
  tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer")
32
  text_encoder = CLIPTextModel.from_pretrained(model_path, subfolder="text_encoder")
 
91
  # This function will be called during inference time.
92
 
93
 
94
+ # new_handler = EndpointHandler()
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ