DeepBeepMeep commited on
Commit
50a1f67
·
1 Parent(s): be89d46
Files changed (2) hide show
  1. requirements.txt +1 -1
  2. wgp.py +3 -3
requirements.txt CHANGED
@@ -30,5 +30,5 @@ segment-anything
30
  omegaconf
31
  hydra-core
32
  librosa
33
- #loguru
34
  # rembg==2.0.65
 
30
  omegaconf
31
  hydra-core
32
  librosa
33
+ loguru
34
  # rembg==2.0.65
wgp.py CHANGED
@@ -1618,7 +1618,7 @@ def get_model_name(model_filename, description_container = [""]):
1618
  description = "A good looking image 2 video model, but not so good in prompt adherence."
1619
  elif "hunyuan_video_custom" in model_filename:
1620
  model_name = "Hunyuan Video Custom 720p"
1621
- description = "The Hunyuan Video Custom model is proably the best model to transfer people (only people for the momment) as it is quite good to keep their identity. However it is slow as to get good results, you to generate 720p videos with 30 steps."
1622
  else:
1623
  model_name = "Wan2.1 text2video"
1624
  model_name += " 14B" if "14B" in model_filename else " 1.3B"
@@ -1732,7 +1732,7 @@ def get_default_settings(filename):
1732
  "resolution": "1280x720"
1733
  })
1734
 
1735
- elif get_model_type(filename) in ("hunyuan_video_custom"):
1736
  ui_defaults.update({
1737
  "guidance_scale": 7.5,
1738
  "flow_shift": 13,
@@ -1929,7 +1929,7 @@ def download_models(transformer_filename):
1929
  text_encoder_filename = get_hunyuan_text_encoder_filename(text_encoder_quantization)
1930
  model_def = {
1931
  "repoId" : "DeepBeepMeep/HunyuanVideo",
1932
- "sourceFolderList" : [ "llava-llama-3-8b", "clip_vit_large_patch14" "" ],
1933
  "fileList" :[ ["config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "preprocessor_config.json"] + computeList(text_encoder_filename) , ["config.json", "model.safetensors", "preprocessor_config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.json"], [ "hunyuan_video_720_quanto_int8_map.json", "hunyuan_video_custom_VAE_fp32.safetensors", "hunyuan_video_custom_VAE_config.json", "hunyuan_video_VAE_fp32.safetensors", "hunyuan_video_VAE_config.json" , "hunyuan_video_720_quanto_int8_map.json" ] + computeList(transformer_filename) ]
1934
  }
1935
 
 
1618
  description = "A good looking image 2 video model, but not so good in prompt adherence."
1619
  elif "hunyuan_video_custom" in model_filename:
1620
  model_name = "Hunyuan Video Custom 720p"
1621
+ description = "The Hunyuan Video Custom model is proably the best model to transfer people (only people for the momment) as it is quite good to keep their identity. However it is slow as to get good results, you need to generate 720p videos with 30 steps."
1622
  else:
1623
  model_name = "Wan2.1 text2video"
1624
  model_name += " 14B" if "14B" in model_filename else " 1.3B"
 
1732
  "resolution": "1280x720"
1733
  })
1734
 
1735
+ elif get_model_type(filename) in ("hunyuan_custom"):
1736
  ui_defaults.update({
1737
  "guidance_scale": 7.5,
1738
  "flow_shift": 13,
 
1929
  text_encoder_filename = get_hunyuan_text_encoder_filename(text_encoder_quantization)
1930
  model_def = {
1931
  "repoId" : "DeepBeepMeep/HunyuanVideo",
1932
+ "sourceFolderList" : [ "llava-llama-3-8b", "clip_vit_large_patch14", "" ],
1933
  "fileList" :[ ["config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "preprocessor_config.json"] + computeList(text_encoder_filename) , ["config.json", "model.safetensors", "preprocessor_config.json", "special_tokens_map.json", "tokenizer.json", "tokenizer_config.json", "vocab.json"], [ "hunyuan_video_720_quanto_int8_map.json", "hunyuan_video_custom_VAE_fp32.safetensors", "hunyuan_video_custom_VAE_config.json", "hunyuan_video_VAE_fp32.safetensors", "hunyuan_video_VAE_config.json" , "hunyuan_video_720_quanto_int8_map.json" ] + computeList(transformer_filename) ]
1934
  }
1935