File size: 1,515 Bytes
a69769d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import torch
import gradio as gr
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
from diffusers.utils import export_to_gif
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file

device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16

# 加載模型
step = 4  # Options: [1, 2, 4, 8]
repo = "ByteDance/AnimateDiff-Lightning"
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
base = "emilianJR/epiCRealism"

adapter = MotionAdapter().to(device, dtype)
adapter.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device))
pipe = AnimateDiffPipeline.from_pretrained(base, motion_adapter=adapter, torch_dtype=dtype).to(device)
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")

# 定義生成 GIF 的函數
def text_to_gif(prompt):
    output = pipe(prompt, guidance_scale=1.0, num_inference_steps=step)
    gif_path = "animation.gif"
    export_to_gif(output.frames[0], gif_path)
    return gif_path

# 設置 Gradio 界面
with gr.Blocks() as demo:
    gr.Markdown("# Text to GIF Generator using AnimateDiff")
    prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the animation you want to create")
    gif_output = gr.Image(label="Generated GIF")
    generate_btn = gr.Button("Generate GIF")
    
    generate_btn.click(fn=text_to_gif, inputs=prompt, outputs=gif_output)

# 啟動 Gradio 應用
demo.launch()