Spaces:
Paused
Paused
File size: 3,913 Bytes
653ce35 7ca5351 c93a0cb 454eedf 39489bd 0cbec0b acf84db 3a66e37 12c01c3 c05f3f8 12c01c3 c05f3f8 12c01c3 3a66e37 833e264 de0aaee 7d319ce de0aaee 16af56d e57581d 618e51c de0aaee 833e264 12c01c3 6fcb174 6a87ed0 c93a0cb cc3607e 6a87ed0 07f7f7b 618e51c 833e264 0479125 833e264 de1d7d7 c93a0cb de1d7d7 c93a0cb de1d7d7 454eedf de1d7d7 454eedf 7ca5351 39489bd 7ca5351 79917e9 12c01c3 39489bd 7ca5351 0269ee9 39489bd 0269ee9 eaf8a3c 106f93a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import gradio as gr
import os
import subprocess
import cv2
from moviepy.editor import VideoFileClip, concatenate_videoclips
import math
from huggingface_hub import snapshot_download
model_ids = [
'runwayml/stable-diffusion-v1-5',
'lllyasviel/sd-controlnet-depth',
'lllyasviel/sd-controlnet-canny',
'lllyasviel/sd-controlnet-openpose',
]
for model_id in model_ids:
model_name = model_id.split('/')[-1]
snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
def get_frame_count_in_duration(filepath):
video = cv2.VideoCapture(filepath)
fps = video.get(cv2.CAP_PROP_FPS)
frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
video.release()
return gr.update(maximum=frame_count)
def get_video_dimension(filepath):
video = cv2.VideoCapture(filepath)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
video.release()
return width, height
def resize_video(input_path, output_path, width):
# Load the video clip
video = VideoFileClip(input_path)
# Calculate the new height while maintaining the aspect ratio
height = int(video.size[1] * (width / video.size[0]))
# Resize the video
resized_video = video.resize(width=width, height=height)
# Write the resized video to the output path
resized_video.write_videofile(output_path, codec='libx264')
return output_path
def run_inference(prompt, video_path, condition, video_length):
# Specify the input and output paths
input_vid = video_path
resized_vid = 'resized.mp4'
# Call the function to resize the video
video_path = resize_video(input_vid, resized_vid, width=512)
width, height = get_video_dimension(video_path)
print(f"{width} x {height}")
output_path = 'output/'
os.makedirs(output_path, exist_ok=True)
# Construct the final video path
video_path_output = os.path.join(output_path, f"{prompt}.mp4")
# Check if the file already exists
if os.path.exists(video_path_output):
# Delete the existing file
os.remove(video_path_output)
if video_length > 12:
command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{video_path}' --output_path '{output_path}' --width {width} --height {height} --video_length {video_length} --is_long_video"
else:
command = f"python inference.py --prompt '{prompt}' --condition '{condition}' --video_path '{video_path}' --output_path '{output_path}' --width {width} --height {height} --video_length {video_length}"
subprocess.run(command, shell=True)
# Construct the video path
video_path_output = os.path.join(output_path, f"{prompt}.mp4")
return "done", video_path_output
with gr.Blocks() as demo:
with gr.Column():
prompt = gr.Textbox(label="prompt")
video_path = gr.Video(source="upload", type="filepath")
condition = gr.Textbox(label="Condition", value="depth")
video_length = gr.Slider(label="video length", minimum=1, maximum=15, step=1, value=2)
#seed = gr.Number(label="seed", value=42)
submit_btn = gr.Button("Submit")
video_res = gr.Video(label="result")
status = gr.Textbox(label="result")
video_path.change(fn=get_frame_count_in_duration,
inputs=[video_path],
outputs=[video_length]
)
submit_btn.click(fn=run_inference,
inputs=[prompt,
video_path,
condition,
video_length
],
outputs=[status, video_res])
demo.queue(max_size=12).launch() |