Theo Dullin
force update model v3
eaf2354
import gradio as gr
import torch
from PIL import Image
# Images
torch.hub.download_url_to_file('https://storage.googleapis.com/kili-datasets-public/plastic-in-river/samples/ckze0btj10ejf0lyy1imtdy7o.jpg', 'bottles1.jpg')
torch.hub.download_url_to_file('https://storage.googleapis.com/kili-datasets-public/plastic-in-river/samples/ckze0btj10ejd0lyyfzm85k9u.jpg', 'bottles2.jpg')
# Model
model = torch.hub.load('PierreLeveau/yolov5', 'custom', 'https://storage.googleapis.com/kili-datasets-public/plastic-in-river/model/v3/best.pt') # force_reload=True to update
def yolo(im, size=640):
g = (size / max(im.size)) # gain
im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
results = model(im) # inference
results.render() # updates results.imgs with boxes and labels
return Image.fromarray(results.imgs[0])
inputs = gr.inputs.Image(type='pil', label="Original Image")
outputs = gr.outputs.Image(type="pil", label="Output Image")
title = "YOLOv5 - Plastic in river detection"
description = "This space demontrates a YOLOv5 model fine-tuned on a dataset of annotated photos of plastic waste in rivers. Upload an image or click an example image to use."
article = "<p style='text-align: center'>YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset. We performed fine-tuning of models trained by Ultralytics with the help of their awesome <a href='https://github.com/ultralytics/yolov5'>code repository</a>. The data comes from a community challenge organized by [Kili](https://kili-technology.com/blog/kili-s-community-challenge-plastic-in-river-dataset), and the demo site is heavily inspired from the original [YoloV5 space](https://huggingface.co/spaces/akhaliq/YOLOv5). We will update the model during the challenge."
examples = [['bottles1.jpg'], ['bottles2.jpg']]
gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(
debug=True)