Spaces:
Running
Running
File size: 1,321 Bytes
d863531 1016041 d863531 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
from PIL import Image, ImageDraw
import torch
from torchvision import transforms
import torch.nn.functional as F
import gradio as gr
# import sys
# sys.path.insert(0, './')
from test import create_letr, draw_fig
from models.preprocessing import *
from models.misc import nested_tensor_from_tensor_list
model = create_letr()
# PREPARE PREPROCESSING
test_size = 1100
# transform_test = transforms.Compose([
# transforms.Resize((test_size)),
# transforms.ToTensor(),
# transforms.Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]),
# ])
normalize = Compose([
ToTensor(),
Normalize([0.538, 0.494, 0.453], [0.257, 0.263, 0.273]),
Resize([test_size]),
])
def predict(inp):
image = Image.fromarray(inp.astype('uint8'), 'RGB')
h, w = image.height, image.width
orig_size = torch.as_tensor([int(h), int(w)])
img = normalize(image)
inputs = nested_tensor_from_tensor_list([img])
with torch.no_grad():
outputs = model(inputs)[0]
draw_fig(image, outputs, orig_size)
return image
inputs = gr.inputs.Image()
outputs = gr.outputs.Image()
gr.Interface(
fn=predict,
inputs=inputs,
outputs=outputs,
examples=["demo.png", "tappeto-per-calibrazione.jpg"],
title="LETR",
description="Model for line detection..."
).launch()
|