import os os.system("git clone https://github.com/google-research/frame-interpolation") import sys sys.path.append("frame-interpolation") import numpy as np import tensorflow as tf import mediapy from PIL import Image from eval import interpolator, util import tensorflow as tf import gradio as gr _UINT8_MAX_F = float(np.iinfo(np.uint8).max) from huggingface_hub import snapshot_download model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style") interpolator = interpolator.Interpolator(model, None) batch_dt = np.full(shape=(1,), fill_value=0.5, dtype=np.float32) def predict(frame1, frame2, times_to_interpolate): img1 = frame1 img2 = frame2 if not img1.size == img2.size: img1 = img1.crop((0, 0, min(img1.size[0], img2.size[0]), min(img1.size[1], img2.size[1]))) img2 = img2.crop((0, 0, min(img1.size[0], img2.size[0]), min(img1.size[1], img2.size[1]))) frame1 = 'new_frame1.png' frame2 = 'new_frame2.png' img1.save(frame1) img2.save(frame2) input_frames = [str(frame1), str(frame2)] frames = list( util.interpolate_recursively_from_files( input_frames, times_to_interpolate, interpolator)) ffmpeg_path = util.get_ffmpeg_path() mediapy.set_ffmpeg(ffmpeg_path) out_path = "out.mp4" mediapy.write_video(str(out_path), frames, fps=30) return out_path title="frame-interpolation" description="Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below." article = "
" examples=[['cat1.jpeg','cat1.jpeg',2]] gr.Interface(predict,[gr.inputs.Image(type='pil'),gr.inputs.Image(type='pil'),gr.inputs.Slider(minimum=2,maximum=5,step=1)],"playable_video",title=title,description=description,article=article,examples=examples).launch(enable_queue=True)