import os from PIL import Image import torch from torchvision import transforms import gradio as gr # load model model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True) normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) transform=transforms.Compose([ transforms.ToTensor(), # normalize ]) def inference(img): # print(img.size) img = img.resize((640, 640)) img = torch.unsqueeze(transform(img), dim=0) # img = transform(img) det_out, da_seg_out,ll_seg_out = model(img) ll_out = ll_seg_out[0][0, :, :].detach().numpy() da_out = da_seg_out[0][0, :, :].detach().numpy() return da_out,ll_out title="YOLOP" description="Gradio demo for YOLOP: YOLOP pretrained on the BDD100K dataset. To use it, simply upload your image or click on one of the examples to load them. Read more at the links below" article = "

YOLOP: You Only Look Once for Panoptic Driving Perception | Github Repo

" examples=[['frame_00_delay-0.13s.jpg']] gr.Interface(inference,gr.inputs.Image(type="pil"),[gr.outputs.Image(label='Drivable Area Segmentation'),gr.outputs.Image(label='Lane Detection')],article=article,description=description,title=title,examples=examples).launch()