|
|
import os |
|
|
import gradio as gr |
|
|
from diffusers import DiffusionPipeline |
|
|
import torch |
|
|
|
|
|
|
|
|
cache_dir = os.path.expanduser("~/Downloads/Openking") |
|
|
os.makedirs(cache_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
os.environ["HF_HOME"] = cache_dir |
|
|
os.environ["HF_HUB_CACHE"] = cache_dir |
|
|
os.environ["HF_DATASETS_CACHE"] = cache_dir |
|
|
|
|
|
|
|
|
|
|
|
hf_token = os.getenv("HF_TOKEN") |
|
|
if not hf_token: |
|
|
raise ValueError("Please set your Hugging Face token as a secret named 'HF_TOKEN' in your Space settings.") |
|
|
|
|
|
|
|
|
model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" |
|
|
|
|
|
try: |
|
|
pipe = DiffusionPipeline.from_pretrained( |
|
|
model_id, |
|
|
use_auth_token=hf_token, |
|
|
cache_dir=cache_dir, |
|
|
torch_dtype=torch.float16, |
|
|
variant="fp16" |
|
|
) |
|
|
pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu") |
|
|
except Exception as e: |
|
|
raise RuntimeError(f"Failed to load model: {e}") |
|
|
|
|
|
|
|
|
def generate_video(prompt: str, num_inference_steps: int = 50): |
|
|
try: |
|
|
|
|
|
|
|
|
|
|
|
video_frames = pipe(prompt, num_inference_steps=num_inference_steps).frames |
|
|
|
|
|
return f"Generated video for: '{prompt}' with {num_inference_steps} steps. (Output handling depends on model output format.)" |
|
|
except Exception as e: |
|
|
return f"Error: {str(e)}" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# 🎥 Wan2.1 Text-to-Video Generator") |
|
|
prompt = gr.Textbox(label="Prompt", placeholder="A cat flying through space...") |
|
|
steps = gr.Slider(10, 100, value=50, label="Inference Steps") |
|
|
output = gr.Textbox(label="Result") |
|
|
btn = gr.Button("Generate Video") |
|
|
btn.click(generate_video, inputs=[prompt, steps], outputs=output) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |