Spaces:
Build error
Build error
frankleeeee
commited on
Commit
•
5b21912
1
Parent(s):
e6d2ce0
update
Browse files
app.py
CHANGED
@@ -7,151 +7,28 @@ Usage:
|
|
7 |
"""
|
8 |
|
9 |
import argparse
|
|
|
10 |
import importlib
|
11 |
import os
|
12 |
import subprocess
|
13 |
import sys
|
14 |
-
import
|
15 |
-
import json
|
16 |
-
import math
|
17 |
|
18 |
import spaces
|
19 |
import torch
|
20 |
|
21 |
import gradio as gr
|
22 |
|
23 |
-
|
24 |
-
|
25 |
CONFIG_MAP = {
|
26 |
-
"v1.
|
27 |
-
"v1.1-stage3": "configs/opensora-v1-1/inference/sample-ref.py",
|
28 |
-
}
|
29 |
-
HF_STDIT_MAP = {
|
30 |
-
"v1.1-stage2": "hpcai-tech/OpenSora-STDiT-v2-stage2",
|
31 |
-
"v1.1-stage3": "hpcai-tech/OpenSora-STDiT-v2-stage3",
|
32 |
-
}
|
33 |
-
RESOLUTION_MAP = {
|
34 |
-
"144p": (144, 256),
|
35 |
-
"240p": (240, 426),
|
36 |
-
"360p": (360, 480),
|
37 |
-
"480p": (480, 858),
|
38 |
-
"720p": (720, 1280),
|
39 |
-
"1080p": (1080, 1920)
|
40 |
}
|
|
|
41 |
|
42 |
|
43 |
# ============================
|
44 |
-
#
|
45 |
-
# ============================
|
46 |
-
def collect_references_batch(reference_paths, vae, image_size):
|
47 |
-
from opensora.datasets.utils import read_from_path
|
48 |
-
|
49 |
-
refs_x = []
|
50 |
-
for reference_path in reference_paths:
|
51 |
-
if reference_path is None:
|
52 |
-
refs_x.append([])
|
53 |
-
continue
|
54 |
-
ref_path = reference_path.split(";")
|
55 |
-
ref = []
|
56 |
-
for r_path in ref_path:
|
57 |
-
r = read_from_path(r_path, image_size, transform_name="resize_crop")
|
58 |
-
r_x = vae.encode(r.unsqueeze(0).to(vae.device, vae.dtype))
|
59 |
-
r_x = r_x.squeeze(0)
|
60 |
-
ref.append(r_x)
|
61 |
-
refs_x.append(ref)
|
62 |
-
# refs_x: [batch, ref_num, C, T, H, W]
|
63 |
-
return refs_x
|
64 |
-
|
65 |
-
|
66 |
-
def process_mask_strategy(mask_strategy):
|
67 |
-
mask_batch = []
|
68 |
-
mask_strategy = mask_strategy.split(";")
|
69 |
-
for mask in mask_strategy:
|
70 |
-
mask_group = mask.split(",")
|
71 |
-
assert len(mask_group) >= 1 and len(mask_group) <= 6, f"Invalid mask strategy: {mask}"
|
72 |
-
if len(mask_group) == 1:
|
73 |
-
mask_group.extend(["0", "0", "0", "1", "0"])
|
74 |
-
elif len(mask_group) == 2:
|
75 |
-
mask_group.extend(["0", "0", "1", "0"])
|
76 |
-
elif len(mask_group) == 3:
|
77 |
-
mask_group.extend(["0", "1", "0"])
|
78 |
-
elif len(mask_group) == 4:
|
79 |
-
mask_group.extend(["1", "0"])
|
80 |
-
elif len(mask_group) == 5:
|
81 |
-
mask_group.append("0")
|
82 |
-
mask_batch.append(mask_group)
|
83 |
-
return mask_batch
|
84 |
-
|
85 |
-
|
86 |
-
def apply_mask_strategy(z, refs_x, mask_strategys, loop_i):
|
87 |
-
masks = []
|
88 |
-
for i, mask_strategy in enumerate(mask_strategys):
|
89 |
-
mask = torch.ones(z.shape[2], dtype=torch.float, device=z.device)
|
90 |
-
if mask_strategy is None:
|
91 |
-
masks.append(mask)
|
92 |
-
continue
|
93 |
-
mask_strategy = process_mask_strategy(mask_strategy)
|
94 |
-
for mst in mask_strategy:
|
95 |
-
loop_id, m_id, m_ref_start, m_target_start, m_length, edit_ratio = mst
|
96 |
-
loop_id = int(loop_id)
|
97 |
-
if loop_id != loop_i:
|
98 |
-
continue
|
99 |
-
m_id = int(m_id)
|
100 |
-
m_ref_start = int(m_ref_start)
|
101 |
-
m_length = int(m_length)
|
102 |
-
m_target_start = int(m_target_start)
|
103 |
-
edit_ratio = float(edit_ratio)
|
104 |
-
ref = refs_x[i][m_id] # [C, T, H, W]
|
105 |
-
if m_ref_start < 0:
|
106 |
-
m_ref_start = ref.shape[1] + m_ref_start
|
107 |
-
if m_target_start < 0:
|
108 |
-
# z: [B, C, T, H, W]
|
109 |
-
m_target_start = z.shape[2] + m_target_start
|
110 |
-
z[i, :, m_target_start : m_target_start + m_length] = ref[:, m_ref_start : m_ref_start + m_length]
|
111 |
-
mask[m_target_start : m_target_start + m_length] = edit_ratio
|
112 |
-
masks.append(mask)
|
113 |
-
masks = torch.stack(masks)
|
114 |
-
return masks
|
115 |
-
|
116 |
-
|
117 |
-
def process_prompts(prompts, num_loop):
|
118 |
-
from opensora.models.text_encoder.t5 import text_preprocessing
|
119 |
-
|
120 |
-
ret_prompts = []
|
121 |
-
for prompt in prompts:
|
122 |
-
if prompt.startswith("|0|"):
|
123 |
-
prompt_list = prompt.split("|")[1:]
|
124 |
-
text_list = []
|
125 |
-
for i in range(0, len(prompt_list), 2):
|
126 |
-
start_loop = int(prompt_list[i])
|
127 |
-
text = prompt_list[i + 1]
|
128 |
-
text = text_preprocessing(text)
|
129 |
-
end_loop = int(prompt_list[i + 2]) if i + 2 < len(prompt_list) else num_loop
|
130 |
-
text_list.extend([text] * (end_loop - start_loop))
|
131 |
-
assert len(text_list) == num_loop, f"Prompt loop mismatch: {len(text_list)} != {num_loop}"
|
132 |
-
ret_prompts.append(text_list)
|
133 |
-
else:
|
134 |
-
prompt = text_preprocessing(prompt)
|
135 |
-
ret_prompts.append([prompt] * num_loop)
|
136 |
-
return ret_prompts
|
137 |
-
|
138 |
-
|
139 |
-
def extract_json_from_prompts(prompts):
|
140 |
-
additional_infos = []
|
141 |
-
ret_prompts = []
|
142 |
-
for prompt in prompts:
|
143 |
-
parts = re.split(r"(?=[{\[])", prompt)
|
144 |
-
assert len(parts) <= 2, f"Invalid prompt: {prompt}"
|
145 |
-
ret_prompts.append(parts[0])
|
146 |
-
if len(parts) == 1:
|
147 |
-
additional_infos.append({})
|
148 |
-
else:
|
149 |
-
additional_infos.append(json.loads(parts[1]))
|
150 |
-
return ret_prompts, additional_infos
|
151 |
-
|
152 |
-
|
153 |
-
# ============================
|
154 |
-
# Runtime Environment
|
155 |
# ============================
|
156 |
def install_dependencies(enable_optimization=False):
|
157 |
"""
|
@@ -223,13 +100,10 @@ def build_models(model_type, config, enable_optimization=False):
|
|
223 |
# build stdit
|
224 |
# we load model from HuggingFace directly so that we don't need to
|
225 |
# handle model download logic in HuggingFace Space
|
226 |
-
from
|
227 |
|
228 |
-
stdit =
|
229 |
-
|
230 |
-
enable_flash_attn=enable_optimization,
|
231 |
-
trust_remote_code=True,
|
232 |
-
).cuda()
|
233 |
|
234 |
# build scheduler
|
235 |
from opensora.registry import SCHEDULERS
|
@@ -253,13 +127,13 @@ def parse_args():
|
|
253 |
parser = argparse.ArgumentParser()
|
254 |
parser.add_argument(
|
255 |
"--model-type",
|
256 |
-
default="v1.
|
257 |
choices=MODEL_TYPES,
|
258 |
help=f"The type of model to run for the Gradio App, can only be {MODEL_TYPES}",
|
259 |
)
|
260 |
parser.add_argument("--output", default="./outputs", type=str, help="The path to the output folder")
|
261 |
parser.add_argument("--port", default=None, type=int, help="The port to run the Gradio App on.")
|
262 |
-
parser.add_argument("--host", default=
|
263 |
parser.add_argument("--share", action="store_true", help="Whether to share this gradio demo.")
|
264 |
parser.add_argument(
|
265 |
"--enable-optimization",
|
@@ -279,6 +153,8 @@ def parse_args():
|
|
279 |
# read config
|
280 |
args = parse_args()
|
281 |
config = read_config(CONFIG_MAP[args.model_type])
|
|
|
|
|
282 |
|
283 |
# make outputs dir
|
284 |
os.makedirs(args.output, exist_ok=True)
|
@@ -292,6 +168,24 @@ install_dependencies(enable_optimization=args.enable_optimization)
|
|
292 |
|
293 |
# import after installation
|
294 |
from opensora.datasets import IMG_FPS, save_sample
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
from opensora.utils.misc import to_torch_dtype
|
296 |
|
297 |
# some global variables
|
@@ -299,120 +193,297 @@ dtype = to_torch_dtype(config.dtype)
|
|
299 |
device = torch.device("cuda")
|
300 |
|
301 |
# build model
|
302 |
-
vae, text_encoder, stdit, scheduler = build_models(
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
307 |
with torch.inference_mode():
|
308 |
# ======================
|
309 |
-
# 1. Preparation
|
310 |
# ======================
|
311 |
# parse the inputs
|
312 |
-
|
313 |
-
|
314 |
-
# compute number of loops
|
315 |
-
num_seconds = int(length.rstrip('s'))
|
316 |
-
total_number_of_frames = num_seconds * config.fps / config.frame_interval
|
317 |
-
num_loop = math.ceil(total_number_of_frames / config.num_frames)
|
318 |
-
|
319 |
-
# prepare model args
|
320 |
-
model_args = dict()
|
321 |
-
height = torch.tensor([resolution[0]], device=device, dtype=dtype)
|
322 |
-
width = torch.tensor([resolution[1]], device=device, dtype=dtype)
|
323 |
-
num_frames = torch.tensor([config.num_frames], device=device, dtype=dtype)
|
324 |
-
ar = torch.tensor([resolution[0] / resolution[1]], device=device, dtype=dtype)
|
325 |
-
if config.num_frames == 1:
|
326 |
-
config.fps = IMG_FPS
|
327 |
-
fps = torch.tensor([config.fps], device=device, dtype=dtype)
|
328 |
-
model_args["height"] = height
|
329 |
-
model_args["width"] = width
|
330 |
-
model_args["num_frames"] = num_frames
|
331 |
-
model_args["ar"] = ar
|
332 |
-
model_args["fps"] = fps
|
333 |
-
|
334 |
-
# compute latent size
|
335 |
-
input_size = (config.num_frames, *resolution)
|
336 |
-
latent_size = vae.get_latent_size(input_size)
|
337 |
|
338 |
-
#
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
|
|
|
|
343 |
|
344 |
-
|
345 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
mask_strategy = [None]
|
347 |
-
elif mode == "
|
348 |
-
|
|
|
|
|
|
|
349 |
else:
|
350 |
raise ValueError(f"Invalid mode: {mode}")
|
351 |
|
352 |
-
#
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
|
|
|
|
|
|
363 |
else:
|
364 |
raise ValueError(f"Invalid mode: {mode}")
|
365 |
|
366 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
367 |
for loop_i in range(num_loop):
|
368 |
# 4.4 sample in hidden space
|
369 |
-
|
370 |
-
z = torch.randn(len(batch_prompts), vae.out_channels, *latent_size, device=device, dtype=dtype)
|
371 |
-
|
372 |
-
# 4.5. apply mask strategy
|
373 |
-
masks = None
|
374 |
|
375 |
-
#
|
376 |
if loop_i > 0:
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
mask_strategy[j] = ""
|
385 |
-
else:
|
386 |
-
mask_strategy[j] += ";"
|
387 |
-
mask_strategy[
|
388 |
-
j
|
389 |
-
] += f"{loop_i},{len(refs)-1},-{config.condition_frame_length},0,{config.condition_frame_length}"
|
390 |
-
|
391 |
-
masks = apply_mask_strategy(z, refs_x, mask_strategy, loop_i)
|
392 |
|
393 |
# 4.6. diffusion sampling
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
samples = scheduler.sample(
|
395 |
stdit,
|
396 |
text_encoder,
|
397 |
z=z,
|
398 |
-
prompts=
|
399 |
device=device,
|
400 |
additional_args=model_args,
|
401 |
-
|
|
|
402 |
)
|
403 |
-
samples = vae.decode(samples.to(dtype))
|
404 |
video_clips.append(samples)
|
405 |
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
415 |
return saved_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
|
417 |
|
418 |
def main():
|
@@ -442,48 +513,138 @@ def main():
|
|
442 |
|
443 |
with gr.Row():
|
444 |
with gr.Column():
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
)
|
451 |
-
prompt_text = gr.Textbox(
|
452 |
-
label="Prompt",
|
453 |
-
placeholder="Describe your video here",
|
454 |
-
lines=4,
|
455 |
-
)
|
456 |
resolution = gr.Radio(
|
457 |
-
|
458 |
-
|
459 |
-
label="Resolution",
|
|
|
|
|
|
|
|
|
|
|
460 |
)
|
461 |
length = gr.Radio(
|
462 |
-
choices=["2s", "4s", "8s"],
|
463 |
value="2s",
|
464 |
-
label="Video Length",
|
465 |
-
info="8s may fail as Hugging Face ZeroGPU has the limitation of max 200 seconds inference time."
|
466 |
)
|
467 |
|
468 |
-
|
469 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
470 |
)
|
471 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
472 |
with gr.Column():
|
473 |
-
output_video = gr.Video(
|
474 |
-
label="Output Video",
|
475 |
-
height="100%"
|
476 |
-
)
|
477 |
|
478 |
with gr.Row():
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
487 |
|
488 |
# launch
|
489 |
demo.launch(server_port=args.port, server_name=args.host, share=args.share)
|
|
|
7 |
"""
|
8 |
|
9 |
import argparse
|
10 |
+
import datetime
|
11 |
import importlib
|
12 |
import os
|
13 |
import subprocess
|
14 |
import sys
|
15 |
+
from tempfile import NamedTemporaryFile
|
|
|
|
|
16 |
|
17 |
import spaces
|
18 |
import torch
|
19 |
|
20 |
import gradio as gr
|
21 |
|
22 |
+
MODEL_TYPES = ["v1.2-stage3"]
|
23 |
+
WATERMARK_PATH = "./assets/images/watermark/watermark.png"
|
24 |
CONFIG_MAP = {
|
25 |
+
"v1.2-stage3": "configs/opensora-v1-2/inference/sample.py",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
}
|
27 |
+
HF_STDIT_MAP = {"v1.2-stage3": "hpcai-tech/OpenSora-STDiT-v3"}
|
28 |
|
29 |
|
30 |
# ============================
|
31 |
+
# Prepare Runtime Environment
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
# ============================
|
33 |
def install_dependencies(enable_optimization=False):
|
34 |
"""
|
|
|
100 |
# build stdit
|
101 |
# we load model from HuggingFace directly so that we don't need to
|
102 |
# handle model download logic in HuggingFace Space
|
103 |
+
from opensora.models.stdit.stdit3 import STDiT3
|
104 |
|
105 |
+
stdit = STDiT3.from_pretrained(HF_STDIT_MAP[model_type])
|
106 |
+
stdit = stdit.cuda()
|
|
|
|
|
|
|
107 |
|
108 |
# build scheduler
|
109 |
from opensora.registry import SCHEDULERS
|
|
|
127 |
parser = argparse.ArgumentParser()
|
128 |
parser.add_argument(
|
129 |
"--model-type",
|
130 |
+
default="v1.2-stage3",
|
131 |
choices=MODEL_TYPES,
|
132 |
help=f"The type of model to run for the Gradio App, can only be {MODEL_TYPES}",
|
133 |
)
|
134 |
parser.add_argument("--output", default="./outputs", type=str, help="The path to the output folder")
|
135 |
parser.add_argument("--port", default=None, type=int, help="The port to run the Gradio App on.")
|
136 |
+
parser.add_argument("--host", default="0.0.0.0", type=str, help="The host to run the Gradio App on.")
|
137 |
parser.add_argument("--share", action="store_true", help="Whether to share this gradio demo.")
|
138 |
parser.add_argument(
|
139 |
"--enable-optimization",
|
|
|
153 |
# read config
|
154 |
args = parse_args()
|
155 |
config = read_config(CONFIG_MAP[args.model_type])
|
156 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
157 |
+
torch.backends.cudnn.allow_tf32 = True
|
158 |
|
159 |
# make outputs dir
|
160 |
os.makedirs(args.output, exist_ok=True)
|
|
|
168 |
|
169 |
# import after installation
|
170 |
from opensora.datasets import IMG_FPS, save_sample
|
171 |
+
from opensora.datasets.aspect import get_image_size, get_num_frames
|
172 |
+
from opensora.models.text_encoder.t5 import text_preprocessing
|
173 |
+
from opensora.utils.inference_utils import (
|
174 |
+
add_watermark,
|
175 |
+
append_generated,
|
176 |
+
append_score_to_prompts,
|
177 |
+
apply_mask_strategy,
|
178 |
+
collect_references_batch,
|
179 |
+
dframe_to_frame,
|
180 |
+
extract_json_from_prompts,
|
181 |
+
extract_prompts_loop,
|
182 |
+
get_random_prompt_by_openai,
|
183 |
+
has_openai_key,
|
184 |
+
merge_prompt,
|
185 |
+
prepare_multi_resolution_info,
|
186 |
+
refine_prompts_by_openai,
|
187 |
+
split_prompt,
|
188 |
+
)
|
189 |
from opensora.utils.misc import to_torch_dtype
|
190 |
|
191 |
# some global variables
|
|
|
193 |
device = torch.device("cuda")
|
194 |
|
195 |
# build model
|
196 |
+
vae, text_encoder, stdit, scheduler = build_models(
|
197 |
+
args.model_type, config, enable_optimization=args.enable_optimization
|
198 |
+
)
|
199 |
+
|
200 |
+
|
201 |
+
def run_inference(
|
202 |
+
mode,
|
203 |
+
prompt_text,
|
204 |
+
resolution,
|
205 |
+
aspect_ratio,
|
206 |
+
length,
|
207 |
+
motion_strength,
|
208 |
+
aesthetic_score,
|
209 |
+
use_motion_strength,
|
210 |
+
use_aesthetic_score,
|
211 |
+
camera_motion,
|
212 |
+
reference_image,
|
213 |
+
refine_prompt,
|
214 |
+
fps,
|
215 |
+
num_loop,
|
216 |
+
seed,
|
217 |
+
sampling_steps,
|
218 |
+
cfg_scale,
|
219 |
+
):
|
220 |
+
if prompt_text is None or prompt_text == "":
|
221 |
+
gr.Warning("Your prompt is empty, please enter a valid prompt")
|
222 |
+
return None
|
223 |
+
|
224 |
+
torch.manual_seed(seed)
|
225 |
with torch.inference_mode():
|
226 |
# ======================
|
227 |
+
# 1. Preparation arguments
|
228 |
# ======================
|
229 |
# parse the inputs
|
230 |
+
# frame_interval must be 1 so we ignore it here
|
231 |
+
image_size = get_image_size(resolution, aspect_ratio)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
|
233 |
+
# compute generation parameters
|
234 |
+
if mode == "Text2Image":
|
235 |
+
num_frames = 1
|
236 |
+
fps = IMG_FPS
|
237 |
+
else:
|
238 |
+
num_frames = config.num_frames
|
239 |
+
num_frames = get_num_frames(length)
|
240 |
|
241 |
+
condition_frame_length = int(num_frames / 17 * 5 / 3)
|
242 |
+
condition_frame_edit = 0.0
|
243 |
+
|
244 |
+
input_size = (num_frames, *image_size)
|
245 |
+
latent_size = vae.get_latent_size(input_size)
|
246 |
+
multi_resolution = "OpenSora"
|
247 |
+
align = 5
|
248 |
+
|
249 |
+
# == prepare mask strategy ==
|
250 |
+
if mode == "Text2Image":
|
251 |
mask_strategy = [None]
|
252 |
+
elif mode == "Text2Video":
|
253 |
+
if reference_image is not None:
|
254 |
+
mask_strategy = ["0"]
|
255 |
+
else:
|
256 |
+
mask_strategy = [None]
|
257 |
else:
|
258 |
raise ValueError(f"Invalid mode: {mode}")
|
259 |
|
260 |
+
# == prepare reference ==
|
261 |
+
if mode == "Text2Image":
|
262 |
+
refs = [""]
|
263 |
+
elif mode == "Text2Video":
|
264 |
+
if reference_image is not None:
|
265 |
+
# save image to disk
|
266 |
+
from PIL import Image
|
267 |
+
|
268 |
+
im = Image.fromarray(reference_image)
|
269 |
+
temp_file = NamedTemporaryFile(suffix=".png")
|
270 |
+
im.save(temp_file.name)
|
271 |
+
refs = [temp_file.name]
|
272 |
+
else:
|
273 |
+
refs = [""]
|
274 |
else:
|
275 |
raise ValueError(f"Invalid mode: {mode}")
|
276 |
|
277 |
+
# == get json from prompts ==
|
278 |
+
batch_prompts = [prompt_text]
|
279 |
+
batch_prompts, refs, mask_strategy = extract_json_from_prompts(batch_prompts, refs, mask_strategy)
|
280 |
+
|
281 |
+
# == get reference for condition ==
|
282 |
+
refs = collect_references_batch(refs, vae, image_size)
|
283 |
+
|
284 |
+
# == multi-resolution info ==
|
285 |
+
model_args = prepare_multi_resolution_info(
|
286 |
+
multi_resolution, len(batch_prompts), image_size, num_frames, fps, device, dtype
|
287 |
+
)
|
288 |
+
|
289 |
+
# == process prompts step by step ==
|
290 |
+
# 0. split prompt
|
291 |
+
# each element in the list is [prompt_segment_list, loop_idx_list]
|
292 |
+
batched_prompt_segment_list = []
|
293 |
+
batched_loop_idx_list = []
|
294 |
+
for prompt in batch_prompts:
|
295 |
+
prompt_segment_list, loop_idx_list = split_prompt(prompt)
|
296 |
+
batched_prompt_segment_list.append(prompt_segment_list)
|
297 |
+
batched_loop_idx_list.append(loop_idx_list)
|
298 |
+
|
299 |
+
# 1. refine prompt by openai
|
300 |
+
if refine_prompt:
|
301 |
+
# check if openai key is provided
|
302 |
+
if not has_openai_key():
|
303 |
+
gr.Warning("OpenAI API key is not provided, the prompt will not be enhanced.")
|
304 |
+
else:
|
305 |
+
for idx, prompt_segment_list in enumerate(batched_prompt_segment_list):
|
306 |
+
batched_prompt_segment_list[idx] = refine_prompts_by_openai(prompt_segment_list)
|
307 |
+
|
308 |
+
# process scores
|
309 |
+
aesthetic_score = aesthetic_score if use_aesthetic_score else None
|
310 |
+
motion_strength = motion_strength if use_motion_strength and mode != "Text2Image" else None
|
311 |
+
camera_motion = None if camera_motion == "none" or mode == "Text2Image" else camera_motion
|
312 |
+
# 2. append score
|
313 |
+
for idx, prompt_segment_list in enumerate(batched_prompt_segment_list):
|
314 |
+
batched_prompt_segment_list[idx] = append_score_to_prompts(
|
315 |
+
prompt_segment_list,
|
316 |
+
aes=aesthetic_score,
|
317 |
+
flow=motion_strength,
|
318 |
+
camera_motion=camera_motion,
|
319 |
+
)
|
320 |
+
|
321 |
+
# 3. clean prompt with T5
|
322 |
+
for idx, prompt_segment_list in enumerate(batched_prompt_segment_list):
|
323 |
+
batched_prompt_segment_list[idx] = [text_preprocessing(prompt) for prompt in prompt_segment_list]
|
324 |
+
|
325 |
+
# 4. merge to obtain the final prompt
|
326 |
+
batch_prompts = []
|
327 |
+
for prompt_segment_list, loop_idx_list in zip(batched_prompt_segment_list, batched_loop_idx_list):
|
328 |
+
batch_prompts.append(merge_prompt(prompt_segment_list, loop_idx_list))
|
329 |
+
|
330 |
+
# =========================
|
331 |
+
# Generate image/video
|
332 |
+
# =========================
|
333 |
+
video_clips = []
|
334 |
+
|
335 |
for loop_i in range(num_loop):
|
336 |
# 4.4 sample in hidden space
|
337 |
+
batch_prompts_loop = extract_prompts_loop(batch_prompts, loop_i)
|
|
|
|
|
|
|
|
|
338 |
|
339 |
+
# == loop ==
|
340 |
if loop_i > 0:
|
341 |
+
refs, mask_strategy = append_generated(
|
342 |
+
vae, video_clips[-1], refs, mask_strategy, loop_i, condition_frame_length, condition_frame_edit
|
343 |
+
)
|
344 |
+
|
345 |
+
# == sampling ==
|
346 |
+
z = torch.randn(len(batch_prompts), vae.out_channels, *latent_size, device=device, dtype=dtype)
|
347 |
+
masks = apply_mask_strategy(z, refs, mask_strategy, loop_i, align=align)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
|
349 |
# 4.6. diffusion sampling
|
350 |
+
# hack to update num_sampling_steps and cfg_scale
|
351 |
+
scheduler_kwargs = config.scheduler.copy()
|
352 |
+
scheduler_kwargs.pop("type")
|
353 |
+
scheduler_kwargs["num_sampling_steps"] = sampling_steps
|
354 |
+
scheduler_kwargs["cfg_scale"] = cfg_scale
|
355 |
+
|
356 |
+
scheduler.__init__(**scheduler_kwargs)
|
357 |
samples = scheduler.sample(
|
358 |
stdit,
|
359 |
text_encoder,
|
360 |
z=z,
|
361 |
+
prompts=batch_prompts_loop,
|
362 |
device=device,
|
363 |
additional_args=model_args,
|
364 |
+
progress=True,
|
365 |
+
mask=masks,
|
366 |
)
|
367 |
+
samples = vae.decode(samples.to(dtype), num_frames=num_frames)
|
368 |
video_clips.append(samples)
|
369 |
|
370 |
+
# =========================
|
371 |
+
# Save output
|
372 |
+
# =========================
|
373 |
+
video_clips = [val[0] for val in video_clips]
|
374 |
+
for i in range(1, num_loop):
|
375 |
+
video_clips[i] = video_clips[i][:, dframe_to_frame(condition_frame_length) :]
|
376 |
+
video = torch.cat(video_clips, dim=1)
|
377 |
+
current_datetime = datetime.datetime.now()
|
378 |
+
timestamp = current_datetime.timestamp()
|
379 |
+
save_path = os.path.join(args.output, f"output_{timestamp}")
|
380 |
+
saved_path = save_sample(video, save_path=save_path, fps=24)
|
381 |
+
torch.cuda.empty_cache()
|
382 |
+
|
383 |
+
# add watermark
|
384 |
+
# all watermarked videos should have a _watermarked suffix
|
385 |
+
if mode != "Text2Image" and os.path.exists(WATERMARK_PATH):
|
386 |
+
watermarked_path = saved_path.replace(".mp4", "_watermarked.mp4")
|
387 |
+
success = add_watermark(saved_path, WATERMARK_PATH, watermarked_path)
|
388 |
+
if success:
|
389 |
+
return watermarked_path
|
390 |
+
else:
|
391 |
return saved_path
|
392 |
+
else:
|
393 |
+
return saved_path
|
394 |
+
|
395 |
+
|
396 |
+
@spaces.GPU(duration=200)
|
397 |
+
def run_image_inference(
|
398 |
+
prompt_text,
|
399 |
+
resolution,
|
400 |
+
aspect_ratio,
|
401 |
+
length,
|
402 |
+
motion_strength,
|
403 |
+
aesthetic_score,
|
404 |
+
use_motion_strength,
|
405 |
+
use_aesthetic_score,
|
406 |
+
camera_motion,
|
407 |
+
reference_image,
|
408 |
+
refine_prompt,
|
409 |
+
fps,
|
410 |
+
num_loop,
|
411 |
+
seed,
|
412 |
+
sampling_steps,
|
413 |
+
cfg_scale,
|
414 |
+
):
|
415 |
+
return run_inference(
|
416 |
+
"Text2Image",
|
417 |
+
prompt_text,
|
418 |
+
resolution,
|
419 |
+
aspect_ratio,
|
420 |
+
length,
|
421 |
+
motion_strength,
|
422 |
+
aesthetic_score,
|
423 |
+
use_motion_strength,
|
424 |
+
use_aesthetic_score,
|
425 |
+
camera_motion,
|
426 |
+
reference_image,
|
427 |
+
refine_prompt,
|
428 |
+
fps,
|
429 |
+
num_loop,
|
430 |
+
seed,
|
431 |
+
sampling_steps,
|
432 |
+
cfg_scale,
|
433 |
+
)
|
434 |
+
|
435 |
+
|
436 |
+
@spaces.GPU(duration=200)
|
437 |
+
def run_video_inference(
|
438 |
+
prompt_text,
|
439 |
+
resolution,
|
440 |
+
aspect_ratio,
|
441 |
+
length,
|
442 |
+
motion_strength,
|
443 |
+
aesthetic_score,
|
444 |
+
use_motion_strength,
|
445 |
+
use_aesthetic_score,
|
446 |
+
camera_motion,
|
447 |
+
reference_image,
|
448 |
+
refine_prompt,
|
449 |
+
fps,
|
450 |
+
num_loop,
|
451 |
+
seed,
|
452 |
+
sampling_steps,
|
453 |
+
cfg_scale,
|
454 |
+
):
|
455 |
+
# if (resolution == "480p" and length == "16s") or \
|
456 |
+
# (resolution == "720p" and length in ["8s", "16s"]):
|
457 |
+
# gr.Warning("Generation is interrupted as the combination of 480p and 16s will lead to CUDA out of memory")
|
458 |
+
# else:
|
459 |
+
return run_inference(
|
460 |
+
"Text2Video",
|
461 |
+
prompt_text,
|
462 |
+
resolution,
|
463 |
+
aspect_ratio,
|
464 |
+
length,
|
465 |
+
motion_strength,
|
466 |
+
aesthetic_score,
|
467 |
+
use_motion_strength,
|
468 |
+
use_aesthetic_score,
|
469 |
+
camera_motion,
|
470 |
+
reference_image,
|
471 |
+
refine_prompt,
|
472 |
+
fps,
|
473 |
+
num_loop,
|
474 |
+
seed,
|
475 |
+
sampling_steps,
|
476 |
+
cfg_scale,
|
477 |
+
)
|
478 |
+
|
479 |
+
|
480 |
+
def generate_random_prompt():
|
481 |
+
if "OPENAI_API_KEY" not in os.environ:
|
482 |
+
gr.Warning("Your prompt is empty and the OpenAI API key is not provided, please enter a valid prompt")
|
483 |
+
return None
|
484 |
+
else:
|
485 |
+
prompt_text = get_random_prompt_by_openai()
|
486 |
+
return prompt_text
|
487 |
|
488 |
|
489 |
def main():
|
|
|
513 |
|
514 |
with gr.Row():
|
515 |
with gr.Column():
|
516 |
+
prompt_text = gr.Textbox(label="Prompt", placeholder="Describe your video here", lines=4)
|
517 |
+
refine_prompt = gr.Checkbox(value=True, label="Refine prompt with GPT4o")
|
518 |
+
random_prompt_btn = gr.Button("Random Prompt By GPT4o")
|
519 |
+
|
520 |
+
gr.Markdown("## Basic Settings")
|
|
|
|
|
|
|
|
|
|
|
|
|
521 |
resolution = gr.Radio(
|
522 |
+
choices=["144p", "240p", "360p", "480p", "720p"],
|
523 |
+
value="480p",
|
524 |
+
label="Resolution",
|
525 |
+
)
|
526 |
+
aspect_ratio = gr.Radio(
|
527 |
+
choices=["9:16", "16:9", "3:4", "4:3", "1:1"],
|
528 |
+
value="9:16",
|
529 |
+
label="Aspect Ratio (H:W)",
|
530 |
)
|
531 |
length = gr.Radio(
|
532 |
+
choices=["2s", "4s", "8s", "16s"],
|
533 |
value="2s",
|
534 |
+
label="Video Length",
|
535 |
+
info="only effective for video generation, 8s may fail as Hugging Face ZeroGPU has the limitation of max 200 seconds inference time.",
|
536 |
)
|
537 |
|
538 |
+
with gr.Row():
|
539 |
+
seed = gr.Slider(value=1024, minimum=1, maximum=2048, step=1, label="Seed")
|
540 |
+
|
541 |
+
sampling_steps = gr.Slider(value=30, minimum=1, maximum=200, step=1, label="Sampling steps")
|
542 |
+
cfg_scale = gr.Slider(value=7.0, minimum=0.0, maximum=10.0, step=0.1, label="CFG Scale")
|
543 |
+
|
544 |
+
with gr.Row():
|
545 |
+
with gr.Column():
|
546 |
+
motion_strength = gr.Slider(
|
547 |
+
value=5,
|
548 |
+
minimum=0,
|
549 |
+
maximum=100,
|
550 |
+
step=1,
|
551 |
+
label="Motion Strength",
|
552 |
+
info="only effective for video generation",
|
553 |
+
)
|
554 |
+
use_motion_strength = gr.Checkbox(value=False, label="Enable")
|
555 |
+
|
556 |
+
with gr.Column():
|
557 |
+
aesthetic_score = gr.Slider(
|
558 |
+
value=6.5,
|
559 |
+
minimum=4,
|
560 |
+
maximum=7,
|
561 |
+
step=0.1,
|
562 |
+
label="Aesthetic",
|
563 |
+
info="effective for text & video generation",
|
564 |
+
)
|
565 |
+
use_aesthetic_score = gr.Checkbox(value=True, label="Enable")
|
566 |
+
|
567 |
+
camera_motion = gr.Radio(
|
568 |
+
value="none",
|
569 |
+
label="Camera Motion",
|
570 |
+
choices=["none", "pan right", "pan left", "tilt up", "tilt down", "zoom in", "zoom out", "static"],
|
571 |
+
interactive=True,
|
572 |
)
|
573 |
+
|
574 |
+
gr.Markdown("## Advanced Settings")
|
575 |
+
with gr.Row():
|
576 |
+
fps = gr.Slider(
|
577 |
+
value=24,
|
578 |
+
minimum=1,
|
579 |
+
maximum=60,
|
580 |
+
step=1,
|
581 |
+
label="FPS",
|
582 |
+
info="This is the frames per seconds for video generation, keep it to 24 if you are not sure",
|
583 |
+
)
|
584 |
+
num_loop = gr.Slider(
|
585 |
+
value=1,
|
586 |
+
minimum=1,
|
587 |
+
maximum=20,
|
588 |
+
step=1,
|
589 |
+
label="Number of Loops",
|
590 |
+
info="This will change the length of the generated video, keep it to 1 if you are not sure",
|
591 |
+
)
|
592 |
+
|
593 |
+
gr.Markdown("## Reference Image")
|
594 |
+
reference_image = gr.Image(label="Image (optional)", show_download_button=True)
|
595 |
+
|
596 |
with gr.Column():
|
597 |
+
output_video = gr.Video(label="Output Video", height="100%")
|
|
|
|
|
|
|
598 |
|
599 |
with gr.Row():
|
600 |
+
image_gen_button = gr.Button("Generate image")
|
601 |
+
video_gen_button = gr.Button("Generate video")
|
602 |
+
|
603 |
+
image_gen_button.click(
|
604 |
+
fn=run_image_inference,
|
605 |
+
inputs=[
|
606 |
+
prompt_text,
|
607 |
+
resolution,
|
608 |
+
aspect_ratio,
|
609 |
+
length,
|
610 |
+
motion_strength,
|
611 |
+
aesthetic_score,
|
612 |
+
use_motion_strength,
|
613 |
+
use_aesthetic_score,
|
614 |
+
camera_motion,
|
615 |
+
reference_image,
|
616 |
+
refine_prompt,
|
617 |
+
fps,
|
618 |
+
num_loop,
|
619 |
+
seed,
|
620 |
+
sampling_steps,
|
621 |
+
cfg_scale,
|
622 |
+
],
|
623 |
+
outputs=reference_image,
|
624 |
+
)
|
625 |
+
video_gen_button.click(
|
626 |
+
fn=run_video_inference,
|
627 |
+
inputs=[
|
628 |
+
prompt_text,
|
629 |
+
resolution,
|
630 |
+
aspect_ratio,
|
631 |
+
length,
|
632 |
+
motion_strength,
|
633 |
+
aesthetic_score,
|
634 |
+
use_motion_strength,
|
635 |
+
use_aesthetic_score,
|
636 |
+
camera_motion,
|
637 |
+
reference_image,
|
638 |
+
refine_prompt,
|
639 |
+
fps,
|
640 |
+
num_loop,
|
641 |
+
seed,
|
642 |
+
sampling_steps,
|
643 |
+
cfg_scale,
|
644 |
+
],
|
645 |
+
outputs=output_video,
|
646 |
+
)
|
647 |
+
random_prompt_btn.click(fn=generate_random_prompt, outputs=prompt_text)
|
648 |
|
649 |
# launch
|
650 |
demo.launch(server_port=args.port, server_name=args.host, share=args.share)
|