Video-agent / config.py
Ani14's picture
Update config.py
372e400 verified
"""
Configuration file for WAN-VACE video generation application
"""
import os
# Hugging Face token (must be set as environment variable)
HF_TOKEN = os.getenv("HF_TOKEN")
# Model paths and configurations
MODEL_CONFIG = {
"transformer_path": "https://huggingface.co/calcuis/wan-gguf/blob/main/wan2.1-v5-vace-1.3b-q4_0.gguf",
"text_encoder_path": "chatpig/umt5xxl-encoder-gguf",
"text_encoder_file": "umt5xxl-encoder-q4_0.gguf",
"vae_path": "callgg/wan-decoder",
"pipeline_path": "callgg/wan-decoder"
}
# Default generation parameters
DEFAULT_PARAMS = {
"width": 720,
"height": 480,
"num_frames": 57,
"num_inference_steps": 24,
"guidance_scale": 2.5,
"conditioning_scale": 0.0,
"fps": 16,
"flow_shift": 3.0
}
# UI configuration
#
# The title and description here emphasise the agentic nature of the app:
# you provide a concept and the system plans the prompts for you. Feel free
# to adjust these strings to suit your needs or branding.
UI_CONFIG = {
"title": "🎬 Agentic WAN-VACE Video Generation",
"description": (
"Generate high-quality videos from simple concepts. "
"Provide a short description of what you want to see, and the agent "
"will craft a refined prompt and negative prompt before generating a cinematic "
"vertical video using the WAN‑VACE model."
),
"theme": "default"
}
# Server configuration
SERVER_CONFIG = {
"host": "0.0.0.0",
"port": 7860,
"share": False
}