File size: 7,295 Bytes
d3091ae 792b20f b46297c d3091ae 53bdc9e d3091ae 5adf87d a07ca9d 5cc09f2 5adf87d 53bdc9e 509de15 1697e2a 509de15 53bdc9e a07ca9d d3091ae 4b4913f 5cc09f2 4c6de9b d3091ae 4b4913f d3091ae 4b4913f d3091ae 792b20f d3091ae 4c6de9b d3091ae 690f172 d3091ae 690f172 d3091ae 690f172 d3091ae 792b20f d3091ae 792b20f d3091ae 792b20f 8029cd1 d3091ae 5adf87d d3091ae 8029cd1 792b20f d3091ae 5adf87d 792b20f 070c1ad 8395168 070c1ad 8395168 d3091ae 5cc09f2 792b20f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 |
import os
import time
from pathlib import Path
from loguru import logger
from datetime import datetime
import gradio as gr
import random
import spaces
import torch
from hyvideo.utils.file_utils import save_videos_grid
from hyvideo.utils.preprocess_text_encoder_tokenizer_utils import preprocess_text_encoder_tokenizer
from hyvideo.config import parse_args
from hyvideo.inference import HunyuanVideoSampler
from hyvideo.constants import NEGATIVE_PROMPT
from huggingface_hub import snapshot_download
if torch.cuda.device_count() > 0:
snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
snapshot_download(repo_id="xtuner/llava-llama-3-8b-v1_1-transformers", repo_type="model", local_dir="ckpts/llava-llama-3-8b-v1_1-transformers", force_download=True)
class Args:
def __init__(self, input_dir, output_dir):
self.input_dir = input_dir
self.output_dir = output_dir
# Create the object
args = Args("ckpts/llava-llama-3-8b-v1_1-transformers", "ckpts/text_encoder")
preprocess_text_encoder_tokenizer(args)
snapshot_download(repo_id="openai/clip-vit-large-patch14", repo_type="model", local_dir="ckpts/text_encoder_2", force_download=True)
def initialize_model(model_path):
print('initialize_model: ' + model_path)
if torch.cuda.device_count() == 0:
return None
args = parse_args()
models_root_path = Path(model_path)
if not models_root_path.exists():
raise ValueError(f"`models_root` not exists: {models_root_path}")
print(f"`models_root` exists: {models_root_path}")
hunyuan_video_sampler = HunyuanVideoSampler.from_pretrained(models_root_path, args=args)
print('Model initialized: ' + model_path)
return hunyuan_video_sampler
@spaces.GPU(duration=120)
def generate_video(
model,
prompt,
resolution,
video_length,
seed,
num_inference_steps,
guidance_scale,
flow_shift,
embedded_guidance_scale
):
if torch.cuda.device_count() == 0:
gr.Warning('Set this space to GPU config to make it work.')
return None
seed = None if seed == -1 else seed
width, height = resolution.split("x")
width, height = int(width), int(height)
negative_prompt = "" # not applicable in the inference
outputs = model.predict(
prompt=prompt,
height=height,
width=width,
video_length=video_length,
seed=seed,
negative_prompt=negative_prompt,
infer_steps=num_inference_steps,
guidance_scale=guidance_scale,
num_videos_per_prompt=1,
flow_shift=flow_shift,
batch_size=1,
embedded_guidance_scale=embedded_guidance_scale
)
samples = outputs['samples']
sample = samples[0].unsqueeze(0)
save_path = "./gradio_outputs"
os.makedirs(save_path, exist_ok=True)
time_flag = datetime.fromtimestamp(time.time()).strftime("%Y-%m-%d-%H:%M:%S")
video_path = f"{save_path}/{time_flag}_seed{outputs['seeds'][0]}_{outputs['prompts'][0][:100].replace('/','')}.mp4"
save_videos_grid(sample, video_path, fps=24)
logger.info(f'Sample saved to: {video_path}')
return video_path
def create_demo(model_path):
model = initialize_model(model_path)
with gr.Blocks() as demo:
if torch.cuda.device_count() == 0:
with gr.Row():
gr.HTML("""
<p style="background-color: red;"><big><big><big><b>⚠️To use <i>Hunyuan Video</i>, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/HunyuanVideo?duplicate=true">duplicate this space</a> and set a GPU with 80 GB VRAM.</b>
You can't use <i>Hunyuan Video</i> directly here because this space runs on a CPU, which is not enough for <i>Hunyuan Video</i>. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/HunyuanVideo/discussions/new">feedback</a> if you have issues.
</big></big></big></p>
""")
gr.Markdown("# Hunyuan Video Generation")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", value="A cat walks on the grass, realistic style.")
with gr.Row():
resolution = gr.Dropdown(
choices=[
# 720p
("1280x720 (16:9, 720p)", "1280x720"),
("720x1280 (9:16, 720p)", "720x1280"),
("1104x832 (4:3, 720p)", "1104x832"),
("832x1104 (3:4, 720p)", "832x1104"),
("960x960 (1:1, 720p)", "960x960"),
# 540p
("960x544 (16:9, 540p)", "960x544"),
("544x960 (9:16, 540p)", "544x960"),
("832x624 (4:3, 540p)", "832x624"),
("624x832 (3:4, 540p)", "624x832"),
("720x720 (1:1, 540p)", "720x720"),
],
value="832x624",
label="Resolution"
)
video_length = gr.Dropdown(
label="Video Length",
choices=[
("2s(65f)", 65),
("5s(129f)", 129),
],
value=65,
)
num_inference_steps = gr.Slider(1, 100, value=5, step=1, label="Number of Inference Steps")
with gr.Accordion("Advanced Options", open=False):
with gr.Column():
seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**63 - 1, step=1)
guidance_scale = gr.Slider(1.0, 20.0, value=1.0, step=0.5, label="Guidance Scale")
flow_shift = gr.Slider(0.0, 10.0, value=7.0, step=0.1, label="Flow Shift")
embedded_guidance_scale = gr.Slider(1.0, 20.0, value=6.0, step=0.5, label="Embedded Guidance Scale")
generate_btn = gr.Button(value = "🚀 Generate Video", variant = "primary")
with gr.Row():
output = gr.Video(label = "Generated Video", autoplay = True)
gr.Markdown("""
## **Alternatives**
If you can't use _Hunyuan Video_, you can use _[CogVideoX](https://huggingface.co/spaces/THUDM/CogVideoX-5B-Space)_ or _[LTX Video Playground](https://huggingface.co/spaces/Lightricks/LTX-Video-Playground)_ instead.
""")
generate_btn.click(
fn=lambda *inputs: generate_video(model, *inputs),
inputs=[
prompt,
resolution,
video_length,
seed,
num_inference_steps,
guidance_scale,
flow_shift,
embedded_guidance_scale
],
outputs=output
)
return demo
if __name__ == "__main__":
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
demo = create_demo("ckpts")
demo.queue(10).launch() |