multimodalart HF Staff commited on
Commit
b4a27cf
Β·
verified Β·
1 Parent(s): bff8240

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -5,7 +5,21 @@ import subprocess
5
  import asyncio
6
  import uuid
7
  from typing import Sequence, Mapping, Any, Union
 
8
  import spaces
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # --- 2. Let ComfyUI's main.py handle all initial setup ---
10
  print("Importing ComfyUI's main.py for setup...")
11
  import main
@@ -14,7 +28,6 @@ print("ComfyUI main imported.")
14
  # --- 3. Now we can import the rest of the necessary modules ---
15
  import torch
16
  import gradio as gr
17
- from huggingface_hub import hf_hub_download
18
  from comfy import model_management
19
  from PIL import Image
20
  import random
@@ -43,19 +56,6 @@ def hf_hub_download_local(repo_id, filename, local_dir, **kwargs):
43
  return target_path
44
 
45
 
46
- # --- Model Downloads ---
47
- print("Downloading models from Hugging Face Hub...")
48
- text_encooder_repo = hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", local_dir="models/text_encoders")
49
- print(text_encooder_repo)
50
- hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
51
- hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
52
- hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
53
- hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
54
- hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
55
- hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", local_dir="models/loras")
56
- print("Downloads complete.")
57
-
58
-
59
  # --- ZeroGPU: Pre-load models and instantiate nodes globally ---
60
  cliploader = nodes.NODE_CLASS_MAPPINGS["CLIPLoader"]()
61
  cliptextencode = nodes.NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
 
5
  import asyncio
6
  import uuid
7
  from typing import Sequence, Mapping, Any, Union
8
+ from huggingface_hub import hf_hub_download
9
  import spaces
10
+
11
+ # --- Model Downloads ---
12
+ print("Downloading models from Hugging Face Hub...")
13
+ text_encooder_repo = hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", local_dir="models/text_encoders")
14
+ print(text_encooder_repo)
15
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
16
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
17
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
18
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
19
+ hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
20
+ hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", local_dir="models/loras")
21
+ print("Downloads complete.")
22
+
23
  # --- 2. Let ComfyUI's main.py handle all initial setup ---
24
  print("Importing ComfyUI's main.py for setup...")
25
  import main
 
28
  # --- 3. Now we can import the rest of the necessary modules ---
29
  import torch
30
  import gradio as gr
 
31
  from comfy import model_management
32
  from PIL import Image
33
  import random
 
56
  return target_path
57
 
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  # --- ZeroGPU: Pre-load models and instantiate nodes globally ---
60
  cliploader = nodes.NODE_CLASS_MAPPINGS["CLIPLoader"]()
61
  cliptextencode = nodes.NODE_CLASS_MAPPINGS["CLIPTextEncode"]()