Spaces:
Sleeping
Sleeping
MargaritaMawyin
commited on
Commit
•
4ab64fd
1
Parent(s):
9e923b9
detector de cuys
Browse files
app.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
|
4 |
+
from ultralytics import YOLO
|
5 |
+
|
6 |
+
#Cargar modelo entrenado
|
7 |
+
model = YOLO('best.pt')
|
8 |
+
|
9 |
+
#Definir funcion que ejecuta la interfaz definida (en este caso es solo una interfaz, pero pueden ser algunas)
|
10 |
+
#La interfaz solo recibe una entrada (La imagen ingresada en el cargador de path de imagenes), por lo
|
11 |
+
# q ue solo se define un parametro de entrada en la funcion.
|
12 |
+
def show_results(loaded_image):
|
13 |
+
#Se generan las salidas (detecciones) pidiendo al modelo que prediga a partir de la imagen de entrada
|
14 |
+
outputs = model.predict(source=loaded_image)
|
15 |
+
results = outputs[0].cpu().numpy()
|
16 |
+
#Se carga la imagen usando openCV para poder editarla
|
17 |
+
image = cv2.imread(loaded_image)
|
18 |
+
#Se recorre cada boundingBox detectado y para cada uno se pinta un rectangulo y se escribe un id.
|
19 |
+
for i, det in enumerate(results.boxes.xyxy):
|
20 |
+
cv2.rectangle(image,
|
21 |
+
(int(det[0]), int(det[1])),
|
22 |
+
(int(det[2]), int(det[3])),
|
23 |
+
color=(0, 0, 255),
|
24 |
+
thickness=2,
|
25 |
+
lineType=cv2.LINE_AA
|
26 |
+
)
|
27 |
+
cv2.putText(image,
|
28 |
+
text =f"id:{i}",
|
29 |
+
org=(int(det[0]), int(det[1])),
|
30 |
+
fontFace =cv2.FONT_HERSHEY_SIMPLEX,
|
31 |
+
fontScale=1,
|
32 |
+
color=(0,0,255),
|
33 |
+
thickness=1,
|
34 |
+
lineType=cv2.LINE_AA
|
35 |
+
)
|
36 |
+
#Se retornan las 2 salidas definidas(imagen y texto): la imagen resultante (image) y un texto indicando cuantos boundingBox se encontraron
|
37 |
+
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), len(results.boxes)
|
38 |
+
|
39 |
+
|
40 |
+
inputs = [gr.components.Image(type="filepath", label="Input Image"),
|
41 |
+
]
|
42 |
+
outputs= [gr.components.Image(type="numpy", label="Output Image"),
|
43 |
+
gr.Textbox(label="Total:")
|
44 |
+
]
|
45 |
+
#examples = [['demo1.png'], ['demo2.jpg'], ['demo3.jpg'], ['demo4.png']]
|
46 |
+
|
47 |
+
interface = gr.Interface(fn=show_results,
|
48 |
+
inputs=inputs,
|
49 |
+
outputs=outputs,
|
50 |
+
title="Object Detection",
|
51 |
+
#En la interfaz se pueden incluir ejemplos de lo que se espera como entrada o entradas. En este caso,
|
52 |
+
# la entrada es una imagen por lo que se pueden poner imagenes de ejemplo (deben estar subidas en el repositorio
|
53 |
+
# y con el path correctamente referenciado)
|
54 |
+
#examples=examples,
|
55 |
+
)
|
56 |
+
interface.launch()
|
best.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6d1085e3ecf1d498e291fc0ad5931af8c437dafc7c7299d5fd6da0c3fd77afd
|
3 |
+
size 6241326
|
gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flagged/
|
2 |
+
*.pt
|
3 |
+
*.png
|
4 |
+
*.jpg
|
5 |
+
*.mp4
|
6 |
+
*.mkv
|
7 |
+
gradio_cached_examples/
|
requirements.txt
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ultralytics requirements
|
2 |
+
# Usage: pip install -r requirements.txt
|
3 |
+
|
4 |
+
# Base ----------------------------------------
|
5 |
+
hydra-core>=1.2.0
|
6 |
+
matplotlib>=3.2.2
|
7 |
+
numpy>=1.18.5
|
8 |
+
opencv-python>=4.1.1
|
9 |
+
Pillow>=7.1.2
|
10 |
+
PyYAML>=5.3.1
|
11 |
+
requests>=2.23.0
|
12 |
+
scipy>=1.4.1
|
13 |
+
torch>=1.7.0
|
14 |
+
torchvision>=0.8.1
|
15 |
+
tqdm>=4.64.0
|
16 |
+
ultralytics
|
17 |
+
|
18 |
+
# Logging -------------------------------------
|
19 |
+
tensorboard>=2.4.1
|
20 |
+
# clearml
|
21 |
+
# comet
|
22 |
+
|
23 |
+
# Plotting ------------------------------------
|
24 |
+
pandas>=1.1.4
|
25 |
+
seaborn>=0.11.0
|
26 |
+
|
27 |
+
# Export --------------------------------------
|
28 |
+
# coremltools>=6.0 # CoreML export
|
29 |
+
# onnx>=1.12.0 # ONNX export
|
30 |
+
# onnx-simplifier>=0.4.1 # ONNX simplifier
|
31 |
+
# nvidia-pyindex # TensorRT export
|
32 |
+
# nvidia-tensorrt # TensorRT export
|
33 |
+
# scikit-learn==0.19.2 # CoreML quantization
|
34 |
+
# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
|
35 |
+
# tensorflowjs>=3.9.0 # TF.js export
|
36 |
+
# openvino-dev # OpenVINO export
|
37 |
+
|
38 |
+
# Extras --------------------------------------
|
39 |
+
ipython # interactive notebook
|
40 |
+
psutil # system utilization
|
41 |
+
thop>=0.1.1 # FLOPs computation
|
42 |
+
# albumentations>=1.0.3
|
43 |
+
# pycocotools>=2.0.6 # COCO mAP
|
44 |
+
# roboflow
|
45 |
+
|
46 |
+
# HUB -----------------------------------------
|
47 |
+
GitPython>=3.1.24
|