|
import mediapy |
|
import gradio as gr |
|
from utils import load_image |
|
from interpolator import Interpolator, interpolate_recursively |
|
|
|
path = "./smoot.mp4" |
|
|
|
interpolator = Interpolator() |
|
|
|
|
|
def predict(image_a, image_b): |
|
image1 = load_image(image_a) |
|
image2 = load_image(image_b) |
|
input_frames = [image1, image2] |
|
frames = list(interpolate_recursively(input_frames, interpolator)) |
|
mediapy.write_video(path, frames, fps=30) |
|
return path |
|
|
|
|
|
footer = r""" |
|
<center> |
|
<b> |
|
Demo for <a href='https://www.tensorflow.org/hub/tutorials/tf_hub_delf_module'>DELF</a> |
|
</b> |
|
</center> |
|
""" |
|
|
|
coffe = r""" |
|
<center> |
|
<a href="https://www.buymeacoffee.com/leonelhs"><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a |
|
coffee&emoji=&slug=leonelhs&button_colour=FFDD00&font_colour=000000&font_family=Cookie&outline_colour=000000 |
|
&coffee_colour=ffffff" /></a> |
|
</center> |
|
""" |
|
|
|
with gr.Blocks(title="DELF") as app: |
|
gr.HTML("<center><h1>Match images using DELF</h1></center>") |
|
gr.HTML("<center><h3>Neural network and logic for processing images to identify keypoints and their " |
|
"descriptors.</h3></center>") |
|
with gr.Row(equal_height=False): |
|
with gr.Column(): |
|
input_img_a = gr.Image(type="filepath", label="Input image A") |
|
input_img_b = gr.Image(type="filepath", label="Input image B") |
|
run_btn = gr.Button(variant="primary") |
|
with gr.Column(): |
|
output_img = gr.Video(format="mp4", label="Interpolate video") |
|
gr.ClearButton(components=[input_img_a, input_img_b, output_img], variant="stop") |
|
|
|
run_btn.click(predict, [input_img_a, input_img_b], [output_img]) |
|
|
|
with gr.Row(): |
|
blobs_a = [[f"examples/image_a/{x:02d}.jpg"] for x in range(1, 2)] |
|
examples_a = gr.Dataset(components=[input_img_a], samples=blobs_a) |
|
examples_a.click(lambda x: x[0], [examples_a], [input_img_a]) |
|
with gr.Row(): |
|
blobs_b = [[f"examples/image_b/{x:02d}.jpg"] for x in range(1, 2)] |
|
examples_b = gr.Dataset(components=[input_img_b], samples=blobs_b) |
|
examples_b.click(lambda x: x[0], [examples_b], [input_img_b]) |
|
|
|
with gr.Row(): |
|
gr.HTML(footer) |
|
with gr.Row(): |
|
gr.HTML(coffe) |
|
|
|
app.launch(share=False, debug=True, show_error=True) |
|
app.queue() |
|
|