from fastai.basics import * from fastai.vision import models from fastai.vision.all import * from fastai.metrics import * from fastai.data.all import * from fastai.callback import * from pathlib import Path import random import PIL import torchvision.transforms as transforms import gradio as gr # Cargamos el learner #learn = load_learner('export.pkl') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = torch.jit.load("unet.pth") model = model.cpu() model.eval() # Definimos las etiquetas de nuestro modelo #labels = learn.dls.vocab def transform_image(image): my_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image_aux = image return my_transforms(image_aux).unsqueeze(0).to(device) # Definimos una funciĆ³n que se encarga de llevar a cabo las predicciones def predict(img): img = PILImage.create(img) image = transforms.Resize((480,640))(img) tensor = transform_image(image=image) with torch.no_grad(): outputs = model(tensor) outputs = torch.argmax(outputs,1) mask = np.array(outputs.cpu()) mask[mask==0]=255 #grape mask[mask==1]=150 #leaves mask[mask==2]=76 #pole mask[mask==2]=74 #pole mask[mask==3]=29 #wood mask[mask==3]=25 #wood mask=np.reshape(mask,(480,640)) return Image.fromarray(mask.astype('uint8')) #pred,pred_idx,probs = learn.predict(img) #return {labels[i]: float(probs[i]) for i in range(len(labels))} # Creamos la interfaz y la lanzamos. gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Image(),examples=['color_154.jpg','color_155.jpg']).launch(share=False)