Spaces:
Running
Running
from PIL import Image, ImageDraw | |
import torch | |
from torchvision import transforms | |
import torch.nn.functional as F | |
import gradio as gr | |
# import sys | |
# sys.path.insert(0, './') | |
from test import create_letr, draw_fig | |
from models.preprocessing import * | |
from models.misc import nested_tensor_from_tensor_list | |
model = create_letr() | |
# PREPARE PREPROCESSING | |
test_size = 256 | |
# transform_test = transforms.Compose([ | |
# transforms.Resize((test_size)), | |
# transforms.ToTensor(), | |
# transforms.Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]), | |
# ]) | |
normalize = Compose([ | |
ToTensor(), | |
Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]), | |
Resize([test_size]), | |
]) | |
def predict(inp): | |
image = Image.fromarray(inp.astype('uint8'), 'RGB') | |
h, w = image.height, image.width | |
orig_size = torch.as_tensor([int(h), int(w)]) | |
img = normalize(image) | |
inputs = nested_tensor_from_tensor_list([img]) | |
with torch.no_grad(): | |
outputs = model(inputs)[0] | |
draw_fig(image, outputs, orig_size) | |
return image | |
inputs = gr.inputs.Image() | |
outputs = gr.outputs.Image() | |
gr.Interface( | |
fn=predict, | |
inputs=inputs, | |
outputs=outputs, | |
examples=["demo.png", "tappeto-per-calibrazione.jpg"], | |
title="LETR", | |
description="Model for line detection..." | |
).launch() | |