|
import os |
|
os.system("git clone https://github.com/google-research/frame-interpolation") |
|
import sys |
|
sys.path.append("frame-interpolation") |
|
|
|
import cv2 |
|
import numpy as np |
|
import tensorflow as tf |
|
import mediapy |
|
from PIL import Image |
|
from eval import interpolator, util |
|
import gradio as gr |
|
|
|
from huggingface_hub import snapshot_download |
|
|
|
from image_tools.sizes import resize_and_crop |
|
|
|
|
|
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") |
|
|
|
interpolator = interpolator.Interpolator(model, None) |
|
|
|
ffmpeg_path = util.get_ffmpeg_path() |
|
mediapy.set_ffmpeg(ffmpeg_path) |
|
|
|
def resize(width,img): |
|
basewidth = width |
|
img = Image.open(img) |
|
wpercent = (basewidth/float(img.size[0])) |
|
hsize = int((float(img.size[1])*float(wpercent))) |
|
img = img.resize((basewidth,hsize), Image.ANTIALIAS) |
|
return img |
|
|
|
|
|
def resize_img(img1,img2): |
|
img_target_size = Image.open(img1) |
|
img_to_resize = resize_and_crop( |
|
img2, |
|
(img_target_size.size[0],img_target_size.size[1]), |
|
crop_origin="middle" |
|
) |
|
img_to_resize.save('resized_img2.png') |
|
|
|
|
|
sketch1 = gr.Image(image_mode="RGB", |
|
source="canvas", |
|
tool='color-sketch', |
|
type="filepath", |
|
shape=None, |
|
invert_colors=False) |
|
|
|
sketch2 = gr.Image(image_mode="RGB", |
|
source="canvas", |
|
tool='color-sketch', |
|
type="filepath", |
|
shape=None, |
|
invert_colors=False) |
|
|
|
slider = gr.inputs.Slider(minimum=2,maximum=4,step=1) |
|
|
|
|
|
def predict(frame1, frame2, times_to_interpolate): |
|
|
|
frame1 = resize(512,frame1) |
|
frame2 = resize(512,frame2) |
|
|
|
frame1.save("test1.png") |
|
frame2.save("test2.png") |
|
|
|
resize_img("test1.png","test2.png") |
|
input_frames = ["test1.png", "resized_img2.png"] |
|
|
|
frames = list( |
|
util.interpolate_recursively_from_files( |
|
input_frames, times_to_interpolate, interpolator)) |
|
print(frames) |
|
mediapy.write_video("out.mp4", frames, fps=24) |
|
|
|
|
|
filename = "out.mp4" |
|
|
|
cap = cv2.VideoCapture(filename) |
|
cap.set(cv2.CAP_PROP_POS_AVI_RATIO,0) |
|
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
videoFPS = int(cap.get(cv2.CAP_PROP_FPS)) |
|
|
|
print (f"frameCount: {frameCount}") |
|
print (f"frameWidth: {frameWidth}") |
|
print (f"frameHeight: {frameHeight}") |
|
print (f"videoFPS: {videoFPS}") |
|
|
|
buf = np.empty(( |
|
frameCount, |
|
frameHeight, |
|
frameWidth, |
|
3), np.dtype('uint8')) |
|
|
|
fc = 0 |
|
ret = True |
|
|
|
while (fc < frameCount): |
|
ret, buf[fc] = cap.read() |
|
fc += 1 |
|
|
|
cap.release() |
|
videoArray = buf |
|
|
|
print (f"DURATION: {frameCount/videoFPS}") |
|
print (videoArray) |
|
|
|
return "out.mp4", videoArray |
|
|
|
|
|
|
|
title="sketch-frame-interpolation" |
|
description="<p style='text-align: center'>This is a fork of the Gradio demo for FILM: Frame Interpolation for Large Scene Motion from @akhaliq, but using sketches instead of images. This could be very useful for the animation industry :) <br /> To use it, simply draw your sketches and add the times to interpolate number. Read more at the links below. <br /> <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.sketch_frame_interpolation' style='display: inline-block'/></p>" |
|
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>" |
|
custom_css = "style.css" |
|
|
|
gr.Interface(predict,[sketch1,sketch2,slider],outputs=[gr.Video(label="video output"),gr.Gallery(label="list of frames output").style(grid=5)],title=title,description=description,article=article,css=custom_css).launch(enable_queue=True) |