Spaces:
Build error
Build error
import gradio as gr | |
from gradio.outputs import Label | |
import cv2 | |
import requests | |
import os | |
import numpy as np | |
from ultralytics import YOLO | |
import yolov5 | |
file_urls = [ | |
'https://c8.alamy.com/zooms/9/382c1e254fe14207998df9ec56354291/wce5tj.jpg', | |
'https://c8.alamy.com/comp/W20YYR/freight-container-on-the-back-of-a-truck-stuck-in-traffic-on-the-interstate-in-georgia-usa-W20YYR.jpg', | |
'https://www.shutterstock.com/shutterstock/photos/318604739/display_1500/stock-photo-highway-and-container-truck-at-china-318604739.jpg' | |
] | |
def download_file(url, save_name): | |
url = url | |
if not os.path.exists(save_name): | |
file = requests.get(url) | |
open(save_name, 'wb').write(file.content) | |
for i, url in enumerate(file_urls): | |
download_file( | |
file_urls[i], | |
f"image_{i}.jpg" | |
) | |
# model_path = 'Container_YOLOV5' | |
def yolov5_inference( | |
image: gr.inputs.Image = None, | |
model_path: gr.inputs.Dropdown = None, | |
image_size: gr.inputs.Slider = 640, | |
conf_threshold: gr.inputs.Slider = 0.25, | |
iou_threshold: gr.inputs.Slider = 0.45 ): | |
model = yolov5.load(model_path, device="cpu") | |
model.conf = conf_threshold | |
model.iou = iou_threshold | |
results = model([image], size=image_size) | |
crops = results.crop(save=False) | |
img_crops = [] | |
for i in range(len(crops)): | |
img_crops.append(crops[i]["im"][..., ::-1]) | |
return results.render()[0], img_crops | |
inputs = [ | |
gr.inputs.Image(type="pil", label="Input Image"), | |
gr.inputs.Dropdown(["Crime_Y5.pt","yolov5s.pt", "yolov5m.pt", "yolov5l.pt", "yolov5x.pt"], label="Model", default = 'Crime_Y5.pt'), | |
gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"), | |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"), | |
gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"), | |
] | |
outputs = gr.outputs.Image(type="filepath", label="Output Image") | |
outputs_crops = gr.Gallery(label="Object crop") | |
title = "Container code detection - YOLO V5" | |
description = "YOLOv5 is a family of object detection models pretrained on COCO dataset. This model is a pip implementation of the original YOLOv5 model." | |
examples = [['1.jpg', 'Crime_Y5.pt', 640, 0.35, 0.45] | |
,['2.jpg', 'Crime_Y5.pt', 640, 0.35, 0.45] | |
,['4.jpg', 'Crime_Y5.pt', 640, 0.35, 0.45]] | |
demo_app = gr.Interface( | |
fn=yolov5_inference, | |
inputs=inputs, | |
outputs=[outputs,outputs_crops], | |
title=title, | |
examples=examples, | |
cache_examples=True, | |
live=True, | |
theme='huggingface', | |
) | |
demo_app.launch(debug=True, enable_queue=True, width=50, height=50) |