Spaces:
Runtime error
Runtime error
File size: 2,176 Bytes
eadf256 8aee673 eadf256 da5d467 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import os
os.system("git clone https://github.com/google-research/frame-interpolation")
import sys
sys.path.append("frame-interpolation")
import numpy as np
import tensorflow as tf
import mediapy
from PIL import Image
from eval import interpolator, util
import tensorflow as tf
import gradio as gr
_UINT8_MAX_F = float(np.iinfo(np.uint8).max)
from huggingface_hub import snapshot_download
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)
batch_dt = np.full(shape=(1,), fill_value=0.5, dtype=np.float32)
def predict(frame1, frame2, times_to_interpolate):
img1 = frame1
img2 = frame2
if not img1.size == img2.size:
img1 = img1.crop((0, 0, min(img1.size[0], img2.size[0]), min(img1.size[1], img2.size[1])))
img2 = img2.crop((0, 0, min(img1.size[0], img2.size[0]), min(img1.size[1], img2.size[1])))
frame1 = 'new_frame1.png'
frame2 = 'new_frame2.png'
img1.save(frame1)
img2.save(frame2)
input_frames = [str(frame1), str(frame2)]
frames = list(
util.interpolate_recursively_from_files(
input_frames, times_to_interpolate, interpolator))
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
out_path = "out.mp4"
mediapy.write_video(str(out_path), frames, fps=30)
return out_path
title="frame-interpolation"
description="Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2202.04901' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
examples=[['cat1.jpeg','cat1.jpeg',2]]
gr.Interface(predict,[gr.inputs.Image(type='pil'),gr.inputs.Image(type='pil'),gr.inputs.Slider(minimum=2,maximum=5,step=1)],"playable_video",title=title,description=description,article=article,examples=examples).launch(enable_queue=True) |