File size: 2,111 Bytes
d2e8c3f b04e321 d2e8c3f bc16e8f cf30bfe bc16e8f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
import torch
print(torch.__version__)
print(torch.version.cuda)
print(torch.cuda.is_available())
import os, subprocess
import uuid, tempfile
import gradio as gr
from huggingface_hub import snapshot_download
os.makedirs("pretrained", exist_ok=True)
snapshot_download(
repo_id = "jiawei011/L4GM",
local_dir = "./pretrained"
)
# Folder containing example images
examples_folder = "data_test"
# Retrieve all file paths in the folder
video_examples = [
os.path.join(examples_folder, file)
for file in os.listdir(examples_folder)
if os.path.isfile(os.path.join(examples_folder, file))
]
def generate(input_video):
#--test_path data_test/otter-on-surfboard_fg.mp4
workdir = "results"
pretrained_model = "pretrained/recon.safetensors"
num_frames = 1
test_path = input_video
try:
# Run the inference command
subprocess.run(
[
"python", "infer_3d.py", "big",
"--workspace", f"{workdir}",
"--resume", f"{pretrained_model}",
"--num_frames", f"{num_frames}",
"--test_path", f"{test_path}",
],
check=True
)
# Retrieve the file name without the extension
#removed_bg_file_name = os.path.splitext(os.path.basename(removed_bg_path))[0]
output_videos = glob(os.path.join(f"{workdir}", "*.mp4"))
return output_videos
except subprocess.CalledProcessError as e:
return f"Error during inference: {str(e)}"
with gr.Blocks() as demo:
with gr.Column():
with gr.Row():
with gr.Column():
input_video = gr.Video(label="Input Video")
submit_btn = gr.Button("Submit")
with gr.Column():
output_result = gr.Video(label="Result")
gr.Examples(
examples = video_examples,
inputs = [input_video]
)
submit_btn.click(
fn = generate,
inputs = [input_video],
outputs = [output_result]
)
demo.queue().launch(show_api=False, show_error=True)
|