|
import gradio as gr |
|
from huggingface_hub import hf_hub_download, snapshot_download |
|
import subprocess |
|
import tempfile |
|
import shutil |
|
import os |
|
import spaces |
|
|
|
from transformers import T5ForConditionalGeneration, T5Tokenizer |
|
import os |
|
|
|
print ("starting the app.") |
|
|
|
def download_t5_model(model_id, save_directory): |
|
|
|
if not os.path.exists(save_directory): |
|
os.makedirs(save_directory) |
|
snapshot_download(repo_id="DeepFloyd/t5-v1_1-xxl",local_dir=save_directory, local_dir_use_symlinks=False) |
|
|
|
|
|
model_id = "DeepFloyd/t5-v1_1-xxl" |
|
save_directory = "pretrained_models/t5_ckpts/t5-v1_1-xxl" |
|
|
|
|
|
download_t5_model(model_id, save_directory) |
|
|
|
def download_model(repo_id, model_name): |
|
model_path = hf_hub_download(repo_id=repo_id, filename=model_name) |
|
return model_path |
|
|
|
import glob |
|
|
|
@spaces.GPU |
|
def run_inference(model_name, prompt_text): |
|
repo_id = "hpcai-tech/Open-Sora" |
|
|
|
|
|
config_mapping = { |
|
"OpenSora-v1-16x256x256.pth": "configs/opensora/inference/16x256x256.py", |
|
"OpenSora-v1-HQ-16x256x256.pth": "configs/opensora/inference/16x512x512.py", |
|
"OpenSora-v1-HQ-16x512x512.pth": "configs/opensora/inference/64x512x512.py" |
|
} |
|
|
|
config_path = config_mapping[model_name] |
|
ckpt_path = download_model(repo_id, model_name) |
|
|
|
|
|
prompt_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w') |
|
prompt_file.write(prompt_text) |
|
prompt_file.close() |
|
|
|
with open(config_path, 'r') as file: |
|
config_content = file.read() |
|
config_content = config_content.replace('prompt_path = "./assets/texts/t2v_samples.txt"', f'prompt_path = "{prompt_file.name}"') |
|
|
|
with tempfile.NamedTemporaryFile('w', delete=False, suffix='.py') as temp_file: |
|
temp_file.write(config_content) |
|
temp_config_path = temp_file.name |
|
|
|
cmd = [ |
|
"torchrun", "--standalone", "--nproc_per_node", "1", |
|
"scripts/inference.py", temp_config_path, |
|
"--ckpt-path", ckpt_path |
|
] |
|
subprocess.run(cmd) |
|
|
|
save_dir = "./outputs/samples/" |
|
list_of_files = glob.glob(f'{save_dir}/*') |
|
if list_of_files: |
|
latest_file = max(list_of_files, key=os.path.getctime) |
|
return latest_file |
|
else: |
|
print("No files found in the output directory.") |
|
return None |
|
|
|
|
|
os.remove(temp_file.name) |
|
os.remove(prompt_file.name) |
|
|
|
def main(): |
|
gr.Interface( |
|
fn=run_inference, |
|
inputs=[ |
|
gr.Dropdown(choices=[ |
|
"OpenSora-v1-16x256x256.pth", |
|
"OpenSora-v1-HQ-16x256x256.pth", |
|
"OpenSora-v1-HQ-16x512x512.pth" |
|
], |
|
value="OpenSora-v1-16x256x256.pth", |
|
label="Model Selection"), |
|
gr.Textbox(label="Prompt Text", value="Enter prompt text here") |
|
], |
|
outputs=gr.Video(label="Output Video"), |
|
title="Open-Sora Inference", |
|
description="Run Open-Sora Inference with Custom Parameters", |
|
).launch() |
|
|
|
if __name__ == "__main__": |
|
main() |