|
from fastai.vision.all import * |
|
import gradio as gr |
|
import torchvision.transforms as transforms |
|
from pathlib import Path |
|
import PIL |
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model = torch.jit.load("unet.pth") |
|
model = model.cpu() |
|
model.eval() |
|
|
|
|
|
def transform_image(image): |
|
|
|
|
|
|
|
my_transforms = transforms.Compose([transforms.ToTensor(), |
|
transforms.Normalize( |
|
[0.485, 0.456, 0.406], |
|
[0.229, 0.224, 0.225])]) |
|
image_aux = image |
|
|
|
image = transforms.Resize((480,640))(Image.fromarray(image)) |
|
tensor = my_transforms(image_aux).unsqueeze(0).to(device) |
|
|
|
|
|
|
|
|
|
model.to(device) |
|
with torch.no_grad(): |
|
outputs = model(tensor) |
|
|
|
outputs = torch.argmax(outputs,1) |
|
|
|
mask = np.array(outputs.cpu()) |
|
mask[mask==0]=255 |
|
mask[mask==1]=150 |
|
mask[mask==2]=76 |
|
mask[mask==3]=25 |
|
mask[mask==4]=0 |
|
|
|
mask=np.reshape(mask,(480,640)) |
|
return Image.fromarray(mask.astype('uint8')) |
|
|
|
|
|
|
|
gr.Interface(fn=transform_image, inputs=gr.inputs.Image(shape=(640, 480)), outputs=gr.outputs.Image(),examples=['color_188.jpg','color_189.jpg']).launch(share=False) |
|
|
|
|