freealise's picture
Update app.py
25626e1 verified
import os
os.system("git clone https://github.com/google-research/frame-interpolation")
import sys
sys.path.append("frame-interpolation")
import math
import cv2
import numpy as np
import tensorflow as tf
import mediapy
from PIL import Image
import gradio as gr
from huggingface_hub import snapshot_download
from image_tools.sizes import resize_and_crop
from moviepy.editor import *
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
from eval import interpolator, util
interpolator = interpolator.Interpolator(model, None)
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
def do_interpolation(frame1, frame2, interpolation):
print("tween frames: " + str(interpolation))
print(frame1, frame2)
input_frames = [frame1, frame2]
frames = list(
util.interpolate_recursively_from_files(
input_frames, int(interpolation), interpolator))
#print(frames)
mediapy.write_video(f"{frame1}_to_{frame2}_out.mp4", frames, fps=25)
return f"{frame1}_to_{frame2}_out.mp4"
def get_frames(video_in, step, name, resize_w):
frames = []
cap = cv2.VideoCapture(video_in)
cframes = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cfps = int(cap.get(cv2.CAP_PROP_FPS))
print(f'frames: {cframes}, fps: {cfps}')
#resize the video
#clip = VideoFileClip(video_in)
#check fps
#if cfps > 25:
# print("video rate is over 25, resetting to 25")
# clip_resized = clip.resize(height=1024)
# clip_resized.write_videofile("video_resized.mp4", fps=25)
#else:
# print("video rate is OK")
# clip_resized = clip.resize(height=1024)
# clip_resized.write_videofile("video_resized.mp4", fps=cfps)
#print("video resized to 1024 height")
# Opens the Video file with CV2
#cap = cv2.VideoCapture("video_resized.mp4")
fps = cap.get(cv2.CAP_PROP_FPS)
print("video fps: " + str(fps))
i=0
while(cap.isOpened()):
ret, frame = cap.read()
if ret == False:
break
if resize_w > 0:
resize_h = resize_w / 2.0
frame = cv2.resize(frame, (int(resize_w), int(resize_h)))
cv2.imwrite(f"{name}_{step}{str(i)}.jpg",frame)
frames.append(f"{name}_{step}{str(i)}.jpg")
i+=1
cap.release()
cv2.destroyAllWindows()
print("broke the video into frames")
return frames, fps
def create_video(frames, fps, type):
print("building video result")
clip = ImageSequenceClip(frames, fps=fps)
clip.write_videofile(type + "_result.mp4", fps=fps)
return type + "_result.mp4"
def infer(url_in,interpolation,fps_output,resize_n,winsize,o_flow):
fps_output = logscale(fps_output)
# 1. break video into frames and get FPS
break_vid = get_frames(url_in, "vid_input_frame", "origin", resize_n)
frames_list = break_vid[0]
fps = break_vid[1]
print(f"ORIGIN FPS: {fps}")
n_frame = int(15*fps) #limited to 15 seconds
#n_frame = len(frames_list)
if n_frame >= len(frames_list):
print("video is shorter than the cut value")
n_frame = len(frames_list)
# 2. prepare frames result arrays
result_frames = []
print("set stop frames to: " + str(n_frame))
for idx, frame in enumerate(frames_list[0:int(n_frame)]):
if idx < len(frames_list) - 1:
next_frame = frames_list[idx+1]
interpolated_frames = do_interpolation(frame, next_frame, interpolation) # should return a list of 3 interpolated frames
break_interpolated_video = get_frames(interpolated_frames, "interpol", f"{idx}_", 0)
print(break_interpolated_video[0])
for j, img in enumerate(break_interpolated_video[0][0:len(break_interpolated_video[0])-1]):
print(f"IMG:{img}")
os.rename(img, f"{frame}_to_{next_frame}_{j}.jpg")
result_frames.append(f"{frame}_to_{next_frame}_{j}.jpg")
print("frames " + str(idx) + " & " + str(idx+1) + "/" + str(n_frame) + ": done;")
#print(f"CURRENT FRAMES: {result_frames}")
result_frames.append(f"{frames_list[n_frame-1]}")
final_vid = create_video(result_frames, fps_output, "interpolated")
files = final_vid
depth_map = cv2.VideoCapture(final_vid)
print("interpolated frames: " + str(len(frames_list)) + " -> " + str(len(result_frames)))
depth_frames = []
ret, fr1 = depth_map.read()
prvs = cv2.cvtColor(fr1, cv2.COLOR_RGBA2GRAY)
hsv = np.zeros_like(fr1)
hsv[..., 1] = 255
res = np.zeros_like(prvs)
flow = res
i=0
while(depth_map.isOpened()):
ret, fr2 = depth_map.read()
if ret == False:
if o_flow == False:
rgb = prvs
alpha = 1.0/len(result_frames)
beta = (1.0 - alpha)
res = cv2.addWeighted(rgb, alpha, res, beta, 0.0, res)
break
nxt = cv2.cvtColor(fr2, cv2.COLOR_RGBA2GRAY)
if o_flow == True:
fl = cv2.calcOpticalFlowFarneback(prvs, nxt, flow, 0.5, 3, winsize, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(fl[..., 0], fl[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
rgb = cv2.cvtColor(rgb, cv2.COLOR_RGBA2GRAY)
else:
rgb = prvs
alpha = 1.0/len(result_frames)
beta = (1.0 - alpha)
res = cv2.addWeighted(rgb, alpha, res, beta, 0.0, res)
rgb = cv2.cvtColor(rgb, cv2.COLOR_GRAY2RGB)
cv2.imwrite(f"opticalfb{i}.jpg", rgb)
depth_frames.append(f"opticalfb{i}.jpg")
i+=1
prvs = nxt
print("averaged frames: " + str(len(result_frames)))
depth_vid = create_video(depth_frames, fps_output, "depth_map")
cv2.imwrite('opticalfb.png', res)
depth_map.release()
cv2.destroyAllWindows()
return final_vid, files, depth_vid
def logscale(linear):
return int(math.pow(2, linear))
def linscale(linear):
return int(math.log2(linear))
def loadurl(url):
return url
title="""
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
"
>
<h1 style="font-weight: 600; margin-bottom: 7px;">
Video interpolation with FILM
</h1>
</div>
<p> This space uses FILM to generate interpolation frames in a video you need to 'tween'.<br />
Generation is limited to 15 seconds, from the beginning of your video input.<br />
<a style="display:inline-block" href="https://huggingface.co/spaces/freealise/video_frame_interpolation?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
</p>
</div>
"""
with gr.Blocks() as demo:
with gr.Column():
gr.HTML(title)
with gr.Row():
with gr.Column():
url_input = gr.Textbox(value="./examples/streetview.mp4", label="URL")
video_input = gr.Video()
video_input.change(fn=loadurl, inputs=[video_input], outputs=[url_input])
resize_num = gr.Slider(minimum=1, maximum=4096, step=1, value=256, label="Resize to width: ")
of_check = gr.Checkbox(value=True, label="Detect motion for depth map: ")
winsize_num = gr.Slider(minimum=1, maximum=256, step=1, value=15, label="Motion detection window size: ")
with gr.Row():
interpolation_slider = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Interpolation Steps: ")
interpolation = gr.Number(value=2, show_label=False, interactive=False)
interpolation_slider.change(fn=logscale, inputs=[interpolation_slider], outputs=[interpolation])
with gr.Row():
fps_output_slider = gr.Slider(minimum=0, maximum=5, step=1, value=0, label="FPS output: ")
fps_output = gr.Number(value=1, show_label=False, interactive=False)
fps_output_slider.change(fn=logscale, inputs=[fps_output_slider], outputs=[fps_output])
submit_btn = gr.Button("Submit")
with gr.Column():
video_output = gr.Video()
file_output = gr.File()
depth_output = gr.Video()
gr.Examples(
examples=[["./examples/streetview.mp4", 1, 0, 256, 15, True]],
fn=infer,
inputs=[url_input,interpolation_slider,fps_output_slider,resize_num,winsize_num,of_check],
outputs=[video_output,file_output,depth_output],
cache_examples=True
)
submit_btn.click(fn=infer, inputs=[url_input,interpolation_slider,fps_output_slider,resize_num,winsize_num,of_check], outputs=[video_output, file_output, depth_output])
demo.launch()