File size: 3,513 Bytes
b38913b
9f69156
b38913b
 
 
a3e2fa9
d3f4bbe
b38913b
cac6536
 
 
327639b
0803ad5
 
cac6536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9f69156
 
 
 
b205b1d
d3f4bbe
 
a3e2fa9
9f69156
 
a3e2fa9
b0f3abf
 
 
 
 
 
 
9f69156
 
a3e2fa9
 
 
 
 
b38913b
 
a3e2fa9
b38913b
f05fa99
b38913b
 
 
 
 
 
 
 
b205b1d
 
a4e26fc
 
 
 
 
 
 
 
b205b1d
a3e2fa9
 
 
b38913b
 
 
 
 
9f69156
 
 
 
0bb4b93
 
 
 
b38913b
7eadeda
b38913b
a3e2fa9
b38913b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import gradio as gr
from huggingface_hub import hf_hub_download
import subprocess
import tempfile
import shutil
import os
import spaces

from transformers import T5ForConditionalGeneration, T5Tokenizer
import os

subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
    

def download_t5_model(model_id, save_directory):
    # Modelin tokenizer'ını ve modeli indir
    model = T5ForConditionalGeneration.from_pretrained(model_id)
    tokenizer = T5Tokenizer.from_pretrained(model_id)

    # Model ve tokenizer'ı belirtilen dizine kaydet
    if not os.path.exists(save_directory):
        os.makedirs(save_directory)
    model.save_pretrained(save_directory)
    tokenizer.save_pretrained(save_directory)

# Model ID ve kaydedilecek dizin
model_id = "DeepFloyd/t5-v1_1-xxl"
save_directory = "pretrained_models/t5_ckpts/t5-v1_1-xxl"

# Modeli indir
download_t5_model(model_id, save_directory)

def download_model(repo_id, model_name):
    model_path = hf_hub_download(repo_id=repo_id, filename=model_name)
    return model_path

import glob

@spaces.GPU
def run_inference(model_name, prompt_text):
    repo_id = "hpcai-tech/Open-Sora"
    
    # Map model names to their respective configuration files
    config_mapping = {
        "OpenSora-v1-16x256x256.pth": "configs/opensora/inference/16x256x256.py",
        "OpenSora-v1-HQ-16x256x256.pth": "configs/opensora/inference/16x512x512.py",
        "OpenSora-v1-HQ-16x512x512.pth": "configs/opensora/inference/64x512x512.py"
    }
    
    config_path = config_mapping[model_name]
    ckpt_path = download_model(repo_id, model_name)

    # Save prompt_text to a temporary text file
    prompt_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode='w')
    prompt_file.write(prompt_text)
    prompt_file.close()

    with open(config_path, 'r') as file:
        config_content = file.read()
    config_content = config_content.replace('prompt_path = "./assets/texts/t2v_samples.txt"', f'prompt_path = "{prompt_file.name}"')
    
    with tempfile.NamedTemporaryFile('w', delete=False, suffix='.py') as temp_file:
        temp_file.write(config_content)
        temp_config_path = temp_file.name

    cmd = [
        "torchrun", "--standalone", "--nproc_per_node", "1",
        "scripts/inference.py", temp_config_path,
        "--ckpt-path", ckpt_path
    ]
    subprocess.run(cmd)

    save_dir = "./outputs/samples/"  # Örneğin, inference.py tarafından kullanılan kayıt dizini
    list_of_files = glob.glob(f'{save_dir}/*')
    if list_of_files:
        latest_file = max(list_of_files, key=os.path.getctime)
        return latest_file
    else:
        print("No files found in the output directory.")
        return None

    # Clean up the temporary files
    os.remove(temp_file.name)
    os.remove(prompt_file.name)

def main():
    gr.Interface(
        fn=run_inference,
        inputs=[
            gr.Dropdown(choices=[
                "OpenSora-v1-16x256x256.pth",
                "OpenSora-v1-HQ-16x256x256.pth",
                "OpenSora-v1-HQ-16x512x512.pth"
            ], 
            value="OpenSora-v1-16x256x256.pth",
            label="Model Selection"),
            gr.Textbox(label="Prompt Text", value="Enter prompt text here")
        ],
        outputs=gr.Video(label="Output Video"),
        title="Open-Sora Inference",
        description="Run Open-Sora Inference with Custom Parameters",
    ).launch()

if __name__ == "__main__":
    main()