Pix2Pix-Video / app.py
fffiloni's picture
Update app.py
9e2421a
raw
history blame
1.72 kB
import gradio as gr
import os
import cv2
import numpy as np
from moviepy.editor import *
token = os.environ.get('HF_TOKEN')
pix2pix = gr.Blocks.load(name="spaces/fffiloni/instruct-pix2pix-clone", api_key=token)
def get_frames(video_in):
frames = []
# Opens the Video file
cap= cv2.VideoCapture(video_in)
fps = cap.get(cv2.CAP_PROP_FPS)
i=0
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
cv2.imwrite('kang'+str(i)+'.jpg',frame)
frames.append('kang'+str(i)+'.jpg')
i+=1
cap.release()
cv2.destroyAllWindows()
return frames, fps
def create_video(frames, fps):
clip = ImageSequenceClip(frames, fps=fps)
clip.write_videofile("movie.mp4", fps=fps)
return 'movie.mp4'
def infer(prompt,video_in):
break_vid = get_frames(video_in)
frames_list= break_vid[0]
fps = break_vid[1]
result_frames = []
for i in frames_list:
pix2pix_img = pix2pix(prompt,5.5,1.5,i,15,"",512,512,123456,fn_index=0)
images = [os.path.join(pix2pix_img[0], img) for img in os.listdir(pix2pix_img[0])]
result_frames.append(images[0])
final_vid = create_video(result_frames, fps)
return final_vid
with gr.Blocks(css='style.css') as demo:
with gr.Column(elem_id="col-container"):
prompt = gr.Textbox(placeholder="enter prompt")
video_inp = gr.Video(source="upload", type="filepath")
video_out = gr.Video()
submit_btn = gr.Button("go")
inputs = [
prompt,
video_inp
]
outputs = [video_out]
submit_btn.click(infer, inputs, outputs)
demo.launch().queue(max_size=12)