|
from huggingface_hub import from_pretrained_fastai |
|
import gradio as gr |
|
|
|
from fastai.vision.all import * |
|
|
|
import torchvision.transforms as transforms |
|
import torchvision.transforms as transforms |
|
|
|
from fastai.basics import * |
|
from fastai.vision import models |
|
from fastai.vision.all import * |
|
from fastai.metrics import * |
|
from fastai.data.all import * |
|
from fastai.callback import * |
|
from pathlib import Path |
|
|
|
import random |
|
import PIL |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
def transform_image(image): |
|
my_transforms = transforms.Compose([transforms.ToTensor(), |
|
transforms.Normalize( |
|
[0.485, 0.456, 0.406], |
|
[0.229, 0.224, 0.225])]) |
|
image_aux = image |
|
return my_transforms(image_aux).unsqueeze(0).to(device) |
|
|
|
class TargetMaskConvertTransform(ItemTransform): |
|
def __init__(self): |
|
pass |
|
def encodes(self, x): |
|
img,mask = x |
|
|
|
|
|
mask = np.array(mask) |
|
|
|
mask[(mask!=255) & (mask!=150) & (mask!=76) & (mask!=74) & (mask!=29) & (mask!=25)]=0 |
|
mask[mask==255]=1 |
|
mask[mask==150]=2 |
|
mask[mask==76]=4 |
|
mask[mask==74]=4 |
|
mask[mask==29]=3 |
|
mask[mask==25]=3 |
|
|
|
|
|
mask = PILMask.create(mask) |
|
return img, mask |
|
|
|
from albumentations import ( |
|
Compose, |
|
OneOf, |
|
ElasticTransform, |
|
GridDistortion, |
|
OpticalDistortion, |
|
HorizontalFlip, |
|
Rotate, |
|
Transpose, |
|
CLAHE, |
|
ShiftScaleRotate |
|
) |
|
|
|
def get_y_fn (x): |
|
return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png")) |
|
|
|
class SegmentationAlbumentationsTransform(ItemTransform): |
|
split_idx = 0 |
|
|
|
def __init__(self, aug): |
|
self.aug = aug |
|
|
|
def encodes(self, x): |
|
img,mask = x |
|
aug = self.aug(image=np.array(img), mask=np.array(mask)) |
|
return PILImage.create(aug["image"]), PILMask.create(aug["mask"]) |
|
|
|
|
|
|
|
repo_id = "jegilj/Practica3" |
|
learn = from_pretrained_fastai(repo_id) |
|
model = learn.model |
|
model = model.cpu() |
|
|
|
|
|
|
|
def predict(img_ruta): |
|
img = PIL.Image.fromarray(img_ruta) |
|
image = transforms.Resize((480,640))(img) |
|
tensor = transform_image(image=image) |
|
model.to(device) |
|
with torch.no_grad(): |
|
outputs = model(tensor) |
|
|
|
outputs = torch.argmax(outputs,1) |
|
mask = np.array(outputs.cpu()) |
|
mask[mask==1]=255 |
|
mask[mask==2]=150 |
|
mask[mask==3]=29 |
|
mask[mask==4]=74 |
|
mask = np.reshape(mask,(480,640)) |
|
return Image.fromarray(mask.astype('uint8')) |
|
|
|
|
|
|
|
gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(480, 640)), outputs=gr.inputs.Image(shape=(480, 640)), examples=['color_184.jpg','color_189.jpg']).launch(share=False) |
|
|