image-to-video / app.py
Dao3's picture
Duplicate from Mishyface/image-to-video-film-3-kazuk-hugorowan-mishyface
0d81aae
import gradio as gr
from transformers import pipeline
import io, base64
from PIL import Image
import numpy as np
import tensorflow as tf
import mediapy
import os
import sys
from huggingface_hub import snapshot_download
from image_tools.sizes import resize_and_crop
os.system("git clone https://github.com/google-research/frame-interpolation")
sys.path.append("frame-interpolation")
from eval import interpolator, util
ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)
model = snapshot_download(repo_id="akhaliq/frame-interpolation-film-style")
interpolator = interpolator.Interpolator(model, None)
def resize(width, img):
basewidth = width
img = Image.open(img)
wpercent = (basewidth / float(img.size[0]))
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
return img
def resize_img(img1, img2, output_name):
img_target_size = Image.open(img1)
img_to_resize = resize_and_crop(
img2,
(img_target_size.size[0], img_target_size.size[1]),
crop_origin="middle"
)
img_to_resize.save(output_name)
def generate_interpolation(frame1, frame2, frame3, frame4, frame5, frame6, times_to_interpolate, fps):
frame1 = resize(256, frame1)
frame2 = resize(256, frame2)
frame3 = resize(256, frame3)
frame4 = resize(256, frame4)
frame5 = resize(256, frame5)
frame6 = resize(256, frame6)
frame1.save("test1.png")
frame2.save("test2.png")
frame3.save("test3.png")
frame4.save("test4.png")
frame5.save("test5.png")
frame6.save("test6.png")
resize_img("test1.png", "test2.png", "resized_img2.png")
resize_img("test1.png", "test3.png", "resized_img3.png")
resize_img("test1.png", "test4.png", "resized_img4.png")
resize_img("test1.png", "test5.png", "resized_img5.png")
resize_img("test1.png", "test6.png", "resized_img6.png")
input_frames = ["test1.png", "resized_img2.png", "resized_img3.png", "resized_img4.png", "resized_img5.png", "resized_img6.png"]
frames = list(util.interpolate_recursively_from_files(input_frames, times_to_interpolate, interpolator))
mediapy.write_video("out.mp4", frames, fps=fps)
return "out.mp4"
demo = gr.Blocks()
with demo:
with gr.Row():
# Left column (inputs)
with gr.Column():
with gr.Row():
# upload images and get image strings
input_arr = [
gr.inputs.Image(type='filepath', label="Frame 1"),
gr.inputs.Image(type='filepath', label="Frame 2"),
gr.inputs.Image(type='filepath', label="Frame 3"),
gr.inputs.Image(type='filepath', label="Frame 4"),
gr.inputs.Image(type='filepath', label="Frame 5"),
gr.inputs.Image(type='filepath', label="Frame 6"),
]
with gr.Row():
input_arr.append(gr.inputs.Slider(minimum=2, maximum=10, step=1, label="Times to Interpolate"))
input_arr.append(gr.inputs.Slider(minimum=15, maximum=60, step=1, label="fps"))
# Rows of instructions & buttons
with gr.Row():
gr.Markdown("After uploading some images, hit the 'Generate Video' button to create a short video!")
button_gen_video = gr.Button("Generate Video")
# Right column (outputs)
with gr.Column():
output_interpolation = gr.Video(label="Generated Video")
# Bind functions to buttons
button_gen_video.click(fn=generate_interpolation, inputs=input_arr, outputs=output_interpolation)
demo.launch(debug=True, enable_queue=True)