File size: 2,956 Bytes
eadf256
eed8d18
8aee673
 
eed8d18
8aee673
eadf256
 
 
 
 
 
 
 
497c126
aef8a67
 
0bb1032
eed8d18
 
 
 
 
eadf256
eed8d18
 
 
 
 
 
 
 
eadf256
3e2f0b7
 
 
eed8d18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eadf256
c1e74d5
 
 
eed8d18
62778fc
eadf256
 
 
eed8d18
3e2f0b7
 
5c4fbd0
eadf256
eed8d18
 
 
d24fd05
eed8d18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os

os.system("git clone https://github.com/google-research/frame-interpolation")
import sys

sys.path.append("frame-interpolation")
import numpy as np
import tensorflow as tf
import mediapy
from PIL import Image
from eval import interpolator, util
import gradio as gr

from huggingface_hub import snapshot_download

from image_tools.sizes import resize_and_crop


def load_model(model_name):
    interpolator = interpolator.Interpolator(snapshot_download(repo_id=model_name), None)

    return model


model_names = [
    "akhaliq/frame-interpolation-film-style",
    "akhaliq/frame-interpolation_film_l1",
    "akhaliq/frame_interpolation_film_vgg",
    "akhaliq/frame-interpolation-film-imagenet-vgg-verydeep-19"
]

models = {model_name: load_model(model_name) for model_name in model_names}

ffmpeg_path = util.get_ffmpeg_path()
mediapy.set_ffmpeg(ffmpeg_path)


def resize(width, img):
    basewidth = width
    img = Image.open(img)
    wpercent = (basewidth / float(img.size[0]))
    hsize = int((float(img.size[1]) * float(wpercent)))
    img = img.resize((basewidth, hsize), Image.ANTIALIAS)
    return img


def resize_img(img1, img2):
    img_target_size = Image.open(img1)
    img_to_resize = resize_and_crop(
        img2,
        (img_target_size.size[0], img_target_size.size[1]),  # set width and height to match img1
        crop_origin="middle"
    )
    img_to_resize.save('resized_img2.png')


def predict(frame1, frame2, times_to_interpolate, model_name):
    model = models[model_name]

    frame1 = resize(256, frame1)
    frame2 = resize(256, frame2)

    frame1.save("test1.png")
    frame2.save("test2.png")

    resize_img("test1.png", "test2.png")
    input_frames = ["test1.png", "resized_img2.png"]

    frames = list(
        util.interpolate_recursively_from_files(
            input_frames, times_to_interpolate, model))

    mediapy.write_video("out.mp4", frames, fps=30)
    return "out.mp4"


title = "frame-interpolation"
description = "Gradio demo for FILM: Frame Interpolation for Large Scene Motion. To use it, simply upload your images and add the times to interpolate number or click on one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://film-net.github.io/' target='_blank'>FILM: Frame Interpolation for Large Motion</a> | <a href='https://github.com/google-research/frame-interpolation' target='_blank'>Github Repo</a></p>"
examples = [
    ['cat3.jpeg', 'cat4.jpeg', 2, model_names[0]],
    ['cat1.jpeg', 'cat2.jpeg', 2, model_names[1]],
]

gr.Interface(
    predict,
    [
        gr.inputs.Image(type='filepath'),
        gr.inputs.Image(type='filepath'),
        gr.inputs.Slider(minimum=2, maximum=4, step=1),
        gr.inputs.Dropdown(choices=model_names, default=model_names[0])
    ],
    "playable_video",
    title=title,
    description=description,
    article=article,
    examples=examples
).launch(enable_queue=True)