from huggingface_hub import from_pretrained_fastai import gradio as gr from fastai.vision.all import * # repo_id = "YOUR_USERNAME/YOUR_LEARNER_NAME" # repo_id = "igmarco/grapes-semanticsegmentation" # learner = from_pretrained_fastai(repo_id) import torchvision.transforms as transforms import PIL device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = torch.jit.load("Pr1.pth") model = model.cpu() def transform_image(image): my_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image_aux = image return my_transforms(image_aux).unsqueeze(0).to(device) # Definimos una funciĆ³n que se encarga de llevar a cabo las predicciones def predict(img): img_pil = PIL.Image.fromarray(img, 'RGB') image = transforms.Resize((480,640))(img_pil) tensor = transform_image(image=image) model.to(device) with torch.no_grad(): outputs = model(tensor) outputs = torch.argmax(outputs,1) mask = np.array(outputs.cpu()) mask[mask==0]=0 mask[mask==1]=150 mask[mask==2]=25 mask[mask==3]=74 mask[mask==4]=255 mask=np.reshape(mask,(480,640)) return(Image.fromarray(mask.astype('uint8'))) # Creamos la interfaz y la lanzamos. # gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(480, 640)), outputs=gr.outputs.Image(type="pil")).launch(share=False) gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(480, 640)), outputs=gr.outputs.Image(type="pil"),examples=['grapes1.jpg','grapes2.jpg']).launch(share=False)