File size: 2,597 Bytes
d2e8c3f
 
 
b04e321
d2e8c3f
 
bc16e8f
 
97b862c
3520c6c
 
 
 
aa3cb1c
3520c6c
bc16e8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ba29991
103784a
ba29991
aa1dc20
 
 
bc16e8f
 
 
 
d7be7dc
 
2ca90d5
d7be7dc
 
 
 
 
 
 
 
bc16e8f
 
adb9d22
cf30bfe
aa1dc20
 
cf30bfe
 
bc16e8f
 
 
 
 
d7be7dc
103784a
aa1dc20
bc16e8f
aa1dc20
bc16e8f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import torch

print(torch.__version__)
print(torch.version.cuda)
print(torch.cuda.is_available())

import os, subprocess
import uuid, tempfile
from glob import glob

env_list = os.environ['PATH'].split(':')
env_list.append('/usr/local/cuda/bin')
os.environ['PATH'] = ':'.join(env_list)
os.environ['TORCH_CUDA_ARCH_LIST'] = '8.6'

import gradio as gr
from huggingface_hub import snapshot_download

os.makedirs("pretrained", exist_ok=True)
snapshot_download(
    repo_id = "jiawei011/L4GM",
    local_dir = "./pretrained"
)

# Folder containing example images
examples_folder = "data_test"

# Retrieve all file paths in the folder
video_examples = [
    os.path.join(examples_folder, file)
    for file in os.listdir(examples_folder)
    if os.path.isfile(os.path.join(examples_folder, file))
]


def generate(input_video):

    unique_id = str(uuid.uuid4())
    
    workdir = f"results_{unique_id}"
    recon_model = "pretrained/recon.safetensors"
    interp_model = "pretrained/interp.safetensors"
    num_frames = 16
    test_path = input_video
    
    try:
        # Run the inference command
        subprocess.run(
            [
                "python", "infer_3d.py", "big",
                "--workspace", f"{workdir}",
                "--resume", f"{recon_model}",
                "--num_frames", f"1",
                "--test_path", f"{test_path}",
            ],
            check=True
        )

        subprocess.run(
            [
                "python", "infer_4d.py", "big",
                "--workspace", f"{workdir}",
                "--resume", f"{recon_model}",
                "--interpresume", f"{interp_model}",
                "--num_frames", f"{num_frames}",
                "--test_path", f"{test_path}",
            ],
            check=True
        )

        output_videos = glob(os.path.join(f"{workdir}", "*.mp4"))
        print(output_videos)
        return output_videos[0]
   
    except subprocess.CalledProcessError as e:
        raise gr.Error(f"Error during inference: {str(e)}")

with gr.Blocks() as demo:
    with gr.Column():
        with gr.Row():
            with gr.Column():
                input_video = gr.Video(label="Input Video")
                submit_btn = gr.Button("Submit")
            with gr.Column():
                output_result = gr.Video(label="Result")

        gr.Examples(
            examples = video_examples,
            inputs = [input_video]
        )

    submit_btn.click(
        fn = generate,
        inputs = [input_video],
        outputs = [output_result]
    )

demo.queue().launch(show_api=False, show_error=True)