Spaces:
Build error
Build error
File size: 6,965 Bytes
0179eff 2b07e2c 0179eff 8793a8a 0179eff 9de683c 0179eff 1d36280 0179eff 7cf713e 2b07e2c d42b885 0179eff 54de5cd 0179eff bd691e6 54de5cd 44f55c3 54de5cd 44f55c3 54de5cd 44f55c3 54de5cd 44f55c3 0179eff 2b07e2c 0179eff 85e86af 0179eff 85e86af 1aaf055 ecf5c6e 8e4e703 ecf5c6e 44f55c3 ecf5c6e 995bcea ecf5c6e 0179eff 02d9707 0179eff 9beb7dd 9d134d2 9beb7dd 0179eff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
# -*- coding: utf-8 -*-
"""Deploy Barcelo demo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1FxaL8DcYgvjPrWfWruSA5hvk3J81zLY9
![ ](https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png)
# Modelo
YOLO es una familia de modelos de detección de objetos a escala compuesta entrenados en COCO dataset, e incluye una funcionalidad simple para Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite.
## Gradio Inferencia
![](https://i.ibb.co/982NS6m/header.png)
Este Notebook se acelera opcionalmente con un entorno de ejecución de GPU
----------------------------------------------------------------------
YOLOv5 Gradio demo
*Author: Ultralytics LLC and Gradio*
# Código
"""
#!pip install -qr https://raw.githubusercontent.com/ultralytics/yolov5/master/requirements.txt gradio # install dependencies
import os
import re
import json
import numpy as np
import pandas as pd
import gradio as gr
import torch
from PIL import Image
from ultralytics import YOLO
#from ultralyticsplus import render_result
# Images
torch.hub.download_url_to_file('https://huggingface.co/spaces/Municipalidad-de-Vicente-Lopez/Trampas_Barcelo/resolve/main/2024-03-11T10-50-27.jpg', 'ejemplo1.jpg')
torch.hub.download_url_to_file('https://i.pinimg.com/originals/c2/ce/e0/c2cee05624d5477ffcf2d34ca77b47d1.jpg', 'ejemplo2.jpg')
# Model
class YOLODetect():
def __init__(self, modelo):
self.modelo = modelo
def predecir(self, url):
# conf float 0.25 umbral de confianza del objeto para la detección
# iou float 0.7 umbral de intersección sobre unión (IoU) para NMS
self.source = url
self.results = self.modelo.predict(source=self.source, save=True, imgsz=640, conf=0.5, iou=0.40)
return self.results
def show(self):
results = self.results[0]
render = None #render_result(model=self.modelo, image=self.source, result=self.results[0])
render = Image.open(f"runs/detect/predict/{results.path}")
return render
def to_json(self):
results = self.results[0]
img_size = results.orig_shape
img_name = results.path
array_numpy = results.boxes.cls.cpu().numpy().astype(np.int32)
# Definir las clases y sus nombres correspondientes
clases = {
0: "Aedes",
1: "Mosquitos",
2: "Moscas"
}
# Contabilizar las clases
conteo_clases = np.bincount(array_numpy)
self.json_result = [{'Especie': clases[i], 'Cantidad': str(conteo_clases[i]) if i < len(conteo_clases) else str(0)} for i in range(len(clases))]
# Crear un diccionario con los elementos necesarios
result_dict = {
"image": str(img_name),
"size": str(img_size),
"detail": self.json_result
}
# Convertir el diccionario a una cadena JSON
result_dict = json.dumps(result_dict)
#print(f"{type(self.json_result)} - {self.json_result}")
# Convertir la cadena JSON a un objeto Python (diccionario)
result_dict = json.loads(result_dict)
#print(f"{type(self.json_result)} - {self.json_result}")
return result_dict
def to_dataframe(self):
return pd.DataFrame(self.json_result)
modelo_yolo = YOLO('best.pt')
def yolo(size, iou, conf, im):
'''Wrapper fn for gradio'''
g = (int(size) / max(im.size)) # gain
im = im.resize((int(x * g) for x in im.size), Image.LANCZOS) # resize with antialiasing
# model.iou = iou
# model.conf = conf
# results2 = model(im) # inference
# #print(type(results2))
print(type(im))
source = im#Image.open(im)
model = YOLODetect(modelo_yolo)
results = model.predecir(source)
result_json = model.to_json()
print(result_json)
result_df = model.to_dataframe()
print(result_df)
result_img = model.show()
#result_img, result_df, result_json = source, None, None
return result_img, result_df, result_json
#------------ Interface-------------
#in1 = gr.inputs.Radio(['640', '1280'], label="Tamaño de la imagen", type='value')
#in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, label='NMS IoU threshold')
#in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, label='Umbral o threshold')
#in4 = gr.inputs.Image(type='pil', label="Original Image")
#out2 = gr.outputs.Image(type="pil", label="YOLOv5")
#out3 = gr.outputs.Dataframe(label="Cantidad_especie", headers=['Cantidad','Especie'], type="pandas")
#out4 = gr.outputs.JSON(label="JSON")
in1 = gr.Radio(['640', '1280'], label="Tamaño de la imagen", type='value')
in2 = gr.Slider(minimum=0, maximum=1, step=0.05, label='NMS IoU threshold')
in3 = gr.Slider(minimum=0, maximum=1, step=0.05, label='Umbral o threshold')
in4 = gr.Image(type='pil', label="Original Image")
out2 = gr.Image(type="pil", label="YOLOv5")
out3 = gr.Dataframe(label="Cantidad_especie", headers=['Cantidad','Especie'], type="pandas")
out4 = gr.JSON(label="JSON")
#-------------- Text-----
title = 'Trampas Barceló'
description = """
<p>
<center>
Sistemas de Desarrollado por Subsecretaría de Modernización del Municipio de Vicente López. Advertencia solo usar fotos provenientes de las trampas Barceló, no de celular o foto de internet.
<img src="https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png" alt="logo" width="250"/>
</center>
</p>
"""
article ="<p style='text-align: center'><a href='https://docs.google.com/presentation/d/1T5CdcLSzgRe8cQpoi_sPB4U170551NGOrZNykcJD0xU/edit?usp=sharing' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://drive.google.com/drive/folders/1owACN3HGIMo4zm2GQ_jf-OhGNeBVRS7l?usp=sharing ' target='_blank'>Google Colab Demo</a></p><p style='text-align: center'><a href='https://github.com/Municipalidad-de-Vicente-Lopez/Trampa_Barcelo' target='_blank'>Repo Github</a></p></center></p>"
examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']]
iface = gr.Interface(yolo,
inputs=[in1, in2, in3, in4],
outputs=[out2,out3,out4], title=title,
description=description,
article=article,
examples=examples,
analytics_enabled=False,
allow_flagging="manual",
flagging_options=["Correcto", "Incorrecto", "Casi correcto", "Error", "Otro"],
#flagging_callback=hf_writer
)
#iface.launch(enable_queue=True, debug=True)
iface.queue()
iface.launch(debug=True)
"""For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36).
## Citation
[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686)
""" |