YOLOP / app.py
akhaliq's picture
akhaliq HF staff
Update app.py
76c24b2
import os
from PIL import Image
import torch
from torchvision import transforms
import gradio as gr
# load model
model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
transform=transforms.Compose([
transforms.ToTensor(),
# normalize
])
def inference(img):
# print(img.size)
img = img.resize((640, 640))
img = torch.unsqueeze(transform(img), dim=0)
# img = transform(img)
det_out, da_seg_out,ll_seg_out = model(img)
ll_out = ll_seg_out[0][0, :, :].detach().numpy()
da_out = da_seg_out[0][0, :, :].detach().numpy()
return da_out,ll_out
title="YOLOP"
description="Gradio demo for YOLOP: YOLOP pretrained on the BDD100K dataset. To use it, simply upload your image or click on one of the examples to load them. Read more at the links below"
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2108.11250' target='_blank'>YOLOP: You Only Look Once for Panoptic Driving Perception</a> | <a href='https://github.com/hustvl/YOLOP' target='_blank'>Github Repo</a></p>"
examples=[['frame_00_delay-0.13s.jpg']]
gr.Interface(inference,gr.inputs.Image(type="pil"),[gr.outputs.Image(label='Drivable Area Segmentation'),gr.outputs.Image(label='Lane Detection')],article=article,description=description,title=title,examples=examples).launch()