File size: 4,762 Bytes
d2e8c3f b04e321 d2e8c3f fe99116 bc16e8f 97b862c 3520c6c aa3cb1c 3520c6c bc16e8f ba29991 103784a ba29991 aa1dc20 bc16e8f d7be7dc 2ca90d5 d7be7dc bc16e8f adb9d22 cf30bfe aa1dc20 cf30bfe bc16e8f d027ebe aa1dc20 bc16e8f aa1dc20 bc16e8f c576d1e d027ebe c576d1e d027ebe bc16e8f 08a027e bc16e8f 51b9d00 bc16e8f d3660c2 9ccd6e5 bc16e8f 9ccd6e5 bc16e8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import torch
print(torch.__version__)
print(torch.version.cuda)
print(torch.cuda.is_available())
import os, subprocess, shutil
import uuid, tempfile
from glob import glob
env_list = os.environ['PATH'].split(':')
env_list.append('/usr/local/cuda/bin')
os.environ['PATH'] = ':'.join(env_list)
os.environ['TORCH_CUDA_ARCH_LIST'] = '8.6'
import gradio as gr
from huggingface_hub import snapshot_download
os.makedirs("pretrained", exist_ok=True)
snapshot_download(
repo_id = "jiawei011/L4GM",
local_dir = "./pretrained"
)
# Folder containing example images
examples_folder = "data_test"
# Retrieve all file paths in the folder
video_examples = [
os.path.join(examples_folder, file)
for file in os.listdir(examples_folder)
if os.path.isfile(os.path.join(examples_folder, file))
]
def generate(input_video):
unique_id = str(uuid.uuid4())
workdir = f"results_{unique_id}"
recon_model = "pretrained/recon.safetensors"
interp_model = "pretrained/interp.safetensors"
num_frames = 16
test_path = input_video
try:
# Run the inference command
subprocess.run(
[
"python", "infer_3d.py", "big",
"--workspace", f"{workdir}",
"--resume", f"{recon_model}",
"--num_frames", f"1",
"--test_path", f"{test_path}",
],
check=True
)
subprocess.run(
[
"python", "infer_4d.py", "big",
"--workspace", f"{workdir}",
"--resume", f"{recon_model}",
"--interpresume", f"{interp_model}",
"--num_frames", f"{num_frames}",
"--test_path", f"{test_path}",
],
check=True
)
# Get all .mp4 files in the workdir
output_videos = glob(os.path.join(workdir, "*.mp4"))
print("Found videos:", output_videos)
# Check if the 5th video exists
if len(output_videos) < 5:
raise IndexError("Less than 5 .mp4 files found in the workdir.")
# Get the 5th video
selected_video = output_videos[4]
print("Selected video:", selected_video)
# Create a new temporary directory
temp_dir = tempfile.mkdtemp()
print("Temporary directory created:", temp_dir)
# Copy the selected video to the temporary directory
new_video_path = os.path.join(temp_dir, os.path.basename(selected_video))
shutil.copy(selected_video, new_video_path)
print(f"Copied {selected_video} to {new_video_path}")
# Delete the workdir folder
shutil.rmtree(workdir)
print(f"Deleted workdir: {workdir}")
# Return the new path of the copied video
return new_video_path
except subprocess.CalledProcessError as e:
raise gr.Error(f"Error during inference: {str(e)}")
with gr.Blocks() as demo:
with gr.Column():
gr.Markdown("# L4GM: Large 4D Gaussian Reconstruction Model")
gr.HTML("""
<div style="display:flex;column-gap:4px;">
<a href="https://github.com/nv-tlabs/L4GM-official/tree/main">
<img src='https://img.shields.io/badge/GitHub-Repo-blue'>
</a>
<a href="https://research.nvidia.com/labs/toronto-ai/l4gm/">
<img src='https://img.shields.io/badge/Project-Page-green'>
</a>
<a href="https://arxiv.org/abs/2406.10324">
<img src='https://img.shields.io/badge/ArXiv-Paper-red'>
</a>
<a href="https://huggingface.co/spaces/fffiloni/L4GM-demo?duplicate=true">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-sm.svg" alt="Duplicate this Space">
</a>
<a href="https://huggingface.co/fffiloni">
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/follow-me-on-HF-sm-dark.svg" alt="Follow me on HF">
</a>
</div>
""")
with gr.Row():
with gr.Column():
input_video = gr.Video(label="Input Video", interactive=False)
submit_btn = gr.Button("Submit")
gr.Examples(
examples = video_examples,
inputs = [input_video],
examples_per_page = 5
)
with gr.Column():
output_result_4 = gr.Video(label="Result")
submit_btn.click(
fn = generate,
inputs = [input_video],
outputs = [
output_result_4
]
)
demo.queue().launch(show_api=False, show_error=True)
|