Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import os
|
4 |
+
import spaces
|
5 |
+
import uuid
|
6 |
+
|
7 |
+
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
|
8 |
+
from diffusers.utils import export_to_video
|
9 |
+
from huggingface_hub import hf_hub_download
|
10 |
+
from safetensors.torch import load_file
|
11 |
+
from PIL import Image
|
12 |
+
|
13 |
+
# Constants
|
14 |
+
bases = {
|
15 |
+
"ToonYou": "frankjoshua/toonyou_beta6",
|
16 |
+
"epiCRealism": "emilianJR/epiCRealism"
|
17 |
+
}
|
18 |
+
step_loaded = None
|
19 |
+
base_loaded = "ToonYou"
|
20 |
+
motion_loaded = None
|
21 |
+
|
22 |
+
# Ensure model and scheduler are initialized in GPU-enabled function
|
23 |
+
if not torch.cuda.is_available():
|
24 |
+
raise NotImplementedError("No GPU detected!")
|
25 |
+
|
26 |
+
device = "cuda"
|
27 |
+
dtype = torch.float16
|
28 |
+
pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
|
29 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
30 |
+
|
31 |
+
# Safety checkers
|
32 |
+
|
33 |
+
from transformers import CLIPFeatureExtractor
|
34 |
+
|
35 |
+
|
36 |
+
feature_extractor = CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
# Function
|
41 |
+
@spaces.GPU(enable_queue=True)
|
42 |
+
def generate_image(prompt, base, motion, step, progress=gr.Progress()):
|
43 |
+
global step_loaded
|
44 |
+
global base_loaded
|
45 |
+
global motion_loaded
|
46 |
+
print(prompt, base, step)
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
if base_loaded != base:
|
51 |
+
pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
|
52 |
+
base_loaded = base
|
53 |
+
|
54 |
+
if motion_loaded != motion:
|
55 |
+
pipe.unload_lora_weights()
|
56 |
+
if motion != "":
|
57 |
+
pipe.load_lora_weights(motion, adapter_name="motion")
|
58 |
+
pipe.set_adapters(["motion"], [0.7])
|
59 |
+
motion_loaded = motion
|
60 |
+
|
61 |
+
|
62 |
+
output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=int(step))
|
63 |
+
|
64 |
+
|
65 |
+
name = str(uuid.uuid4()).replace("-", "")
|
66 |
+
path = f"/tmp/{name}.mp4"
|
67 |
+
export_to_video(output.frames[0], path, fps=10)
|
68 |
+
return path
|
69 |
+
|
70 |
+
|
71 |
+
# Gradio Interface
|
72 |
+
with gr.Blocks(css="style.css") as demo:
|
73 |
+
gr.HTML(
|
74 |
+
"<h1><center>Text To Video ⚡</center></h1>" +
|
75 |
+
"<p><center>SDXL text-to-video generation</center></p>" +
|
76 |
+
"<p><center><a href='https://github/ByteDance/AnimateDiff-Lightning'>https://huggingface.co/ByteDance/AnimateDiff-Lightning</a></center></p>"
|
77 |
+
)
|
78 |
+
with gr.Group():
|
79 |
+
with gr.Row():
|
80 |
+
prompt = gr.Textbox(
|
81 |
+
label='Prompt (Eng)'
|
82 |
+
)
|
83 |
+
with gr.Row():
|
84 |
+
select_base = gr.Dropdown(
|
85 |
+
label='Base model',
|
86 |
+
v choices=[
|
87 |
+
"ToonYou",
|
88 |
+
"epiCRealism",
|
89 |
+
],
|
90 |
+
value=base_loaded,
|
91 |
+
interactive=True
|
92 |
+
)
|
93 |
+
select_motion = gr.Dropdown(
|
94 |
+
label='Motion',
|
95 |
+
choices=[
|
96 |
+
("Default", ""),
|
97 |
+
("Zoom in", "guoyww/animatediff-motion-lora-zoom-in"),
|
98 |
+
("Zoom out", "guoyww/animatediff-motion-lora-zoom-out"),
|
99 |
+
("Tilt up", "guoyww/animatediff-motion-lora-tilt-up"),
|
100 |
+
("Tilt down", "guoyww/animatediff-motion-lora-tilt-down"),
|
101 |
+
("Pan left", "guoyww/animatediff-motion-lora-pan-left"),
|
102 |
+
("Pan right", "guoyww/animatediff-motion-lora-pan-right"),
|
103 |
+
("Roll left", "guoyww/animatediff-motion-lora-rolling-anticlockwise"),
|
104 |
+
("Roll right", "guoyww/animatediff-motion-lora-rolling-clockwise"),
|
105 |
+
],
|
106 |
+
value="",
|
107 |
+
interactive=True
|
108 |
+
)
|
109 |
+
select_step = gr.Dropdown(
|
110 |
+
label='Inference steps',
|
111 |
+
choices=[
|
112 |
+
('1-Step', 1),
|
113 |
+
('2-Step', 2),
|
114 |
+
('4-Step', 4),
|
115 |
+
('8-Step', 8),
|
116 |
+
('16-Step', 16),
|
117 |
+
('25-Step', 25)],
|
118 |
+
value=16,
|
119 |
+
interactive=True
|
120 |
+
)
|
121 |
+
submit = gr.Button(
|
122 |
+
scale=1,
|
123 |
+
variant='primary'
|
124 |
+
)
|
125 |
+
video = gr.Video(
|
126 |
+
label='Text To Video',
|
127 |
+
autoplay=True,
|
128 |
+
height=512,
|
129 |
+
width=512,
|
130 |
+
elem_id="video_output"
|
131 |
+
)
|
132 |
+
|
133 |
+
prompt.submit(
|
134 |
+
fn=generate_image,
|
135 |
+
inputs=[prompt, select_base, select_motion, select_step],
|
136 |
+
outputs=video,
|
137 |
+
)
|
138 |
+
submit.click(
|
139 |
+
fn=generate_image,
|
140 |
+
inputs=[prompt, select_base, select_motion, select_step],
|
141 |
+
outputs=video,
|
142 |
+
)
|
143 |
+
|
144 |
+
demo.queue().launch()
|