| import gradio as gr | |
| import torch | |
| from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler | |
| from diffusers.utils import export_to_video | |
| import spaces | |
| import os | |
| # 1. Load the Model Components | |
| print("Loading AnimateDiff-Lightning... this will be fast.") | |
| # Load the motion adapter (the "video" part of the brain) | |
| adapter = MotionAdapter.from_pretrained( | |
| "ByteDance/AnimateDiff-Lightning-4step-T2V", | |
| torch_dtype=torch.float16 | |
| ) | |
| # Load the base model (the "image" part of the brain) | |
| # We use epiCRealism for high-quality realistic style | |
| pipe = AnimateDiffPipeline.from_pretrained( | |
| "emilianJR/epiCRealism", | |
| motion_adapter=adapter, | |
| torch_dtype=torch.float16 | |
| ) | |
| # Set up the scheduler specifically for Lightning (4-step generation) | |
| pipe.scheduler = EulerDiscreteScheduler.from_config( | |
| pipe.scheduler.config, | |
| timestep_spacing="trailing", | |
| beta_schedule="linear" | |
| ) | |
| # Move to GPU immediately to speed up loading (ZeroGPU handles the swap) | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| pipe.to(device) | |
| # 2. Define the Generation Function | |
| # @spaces.GPU ensures you get a powerful GPU for this function | |
| @spaces.GPU(duration=60) | |
| def generate_video(prompt, negative_prompt): | |
| print(f"Generating video for: {prompt}") | |
| # Generate the video frames | |
| output = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| num_inference_steps=4, # Lightning needs only 4 steps! | |
| guidance_scale=1.5, # Keep guidance low for Lightning | |
| num_frames=16, # Standard length for AnimateDiff | |
| ) | |
| frames = output.frames[0] | |
| # Save to MP4 | |
| output_path = "output.mp4" | |
| export_to_video(frames, output_path) | |
| return output_path | |
| # 3. Build the User Interface | |
| with gr.Blocks(theme="soft") as demo: | |
| gr.Markdown("# ⚡ AnimateDiff Lightning (Free & Fast)") | |
| gr.Markdown("A truly free, open-source video generator using ByteDance's Lightning technology. fast generation.") | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt_input = gr.Textbox( | |
| label="Prompt", | |
| placeholder="Close up portrait of a cyberpunk woman, neon city background, rainfall, 8k, realistic", | |
| lines=3 | |
| ) | |
| neg_prompt_input = gr.Textbox( | |
| label="Negative Prompt", | |
| value="bad quality, worst quality, deformed, distorted, watermark", | |
| lines=2 | |
| ) | |
| generate_btn = gr.Button("⚡ Generate Video", variant="primary") | |
| with gr.Column(): | |
| video_output = gr.Video(label="Generated Result") | |
| generate_btn.click( | |
| fn=generate_video, | |
| inputs=[prompt_input, neg_prompt_input], | |
| outputs=video_output | |
| ) | |
| # Launch | |
| demo.launch() | |