Francisco Cerna Fukuzaki commited on
Commit
e1763bc
1 Parent(s): 69a62ef
2022_01_14_21_38_49_Barcelo_YOLOv5_Guia_Colaboratory.jpg ADDED
README.md CHANGED
@@ -1,12 +1,37 @@
1
  ---
2
  title: DemoIAZIKA
3
- emoji: 🦀
4
- colorFrom: pink
5
- colorTo: green
6
  sdk: gradio
7
- sdk_version: 3.4.1
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: DemoIAZIKA
3
+ emoji: 🦋
4
+ colorFrom: blue
5
+ colorTo: pink
6
  sdk: gradio
 
7
  app_file: app.py
8
  pinned: false
9
  ---
10
 
11
+ # Configuration
12
+
13
+ `title`: _string_
14
+ Display title for the Space
15
+
16
+ `emoji`: _string_
17
+ Space emoji (emoji-only character allowed)
18
+
19
+ `colorFrom`: _string_
20
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
+
22
+ `colorTo`: _string_
23
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
+
25
+ `sdk`: _string_
26
+ Can be either `gradio` or `streamlit`
27
+
28
+ `sdk_version` : _string_
29
+ Only applicable for `streamlit` SDK.
30
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
+
32
+ `app_file`: _string_
33
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
+ Path is relative to the root of the repository.
35
+
36
+ `pinned`: _boolean_
37
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Deploy Barcelo demo.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1FxaL8DcYgvjPrWfWruSA5hvk3J81zLY9
8
+
9
+ ![ ](https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png)
10
+
11
+ # Modelo
12
+
13
+ YOLO es una familia de modelos de detección de objetos a escala compuesta entrenados en COCO dataset, e incluye una funcionalidad simple para Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite.
14
+
15
+
16
+ ## Gradio Inferencia
17
+
18
+ ![](https://i.ibb.co/982NS6m/header.png)
19
+
20
+ Este Notebook se acelera opcionalmente con un entorno de ejecución de GPU
21
+
22
+
23
+ ----------------------------------------------------------------------
24
+
25
+ YOLOv5 Gradio demo
26
+
27
+ *Author: Ultralytics LLC and Gradio*
28
+
29
+ # Código
30
+ """
31
+
32
+ #!pip install -qr https://raw.githubusercontent.com/ultralytics/yolov5/master/requirements.txt gradio # install dependencies
33
+
34
+ import os
35
+ import gradio as gr
36
+ import torch
37
+ from PIL import Image
38
+
39
+ HF_TOKEN = os.getenv("HF_TOKEN")
40
+ hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "demoIAZIKA-flags")
41
+
42
+ # Images
43
+ torch.hub.download_url_to_file('https://i.pinimg.com/originals/7f/5e/96/7f5e9657c08aae4bcd8bc8b0dcff720e.jpg', 'ejemplo1.jpg')
44
+ torch.hub.download_url_to_file('https://i.pinimg.com/originals/c2/ce/e0/c2cee05624d5477ffcf2d34ca77b47d1.jpg', 'ejemplo2.jpg')
45
+
46
+ # Model
47
+ #model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # force_reload=True to update
48
+
49
+ #model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt') # local model o google colab
50
+ model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', force_reload=True, autoshape=True) # local model o google colab
51
+ #model = torch.hub.load('path/to/yolov5', 'custom', path='/content/yolov56.pt', source='local') # local repo
52
+
53
+
54
+ def yolo(size, iou, conf, im):
55
+ '''Wrapper fn for gradio'''
56
+ g = (int(size) / max(im.size)) # gain
57
+ im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize
58
+
59
+ model.iou = iou
60
+
61
+ model.conf = conf
62
+
63
+
64
+ results2 = model(im) # inference
65
+
66
+ results2.render() # updates results.imgs with boxes and labels
67
+ return Image.fromarray(results2.ims[0])
68
+
69
+ #------------ Interface-------------
70
+
71
+
72
+
73
+ in1 = gr.inputs.Radio(['640', '1280'], label="Tamaño de la imagen", default='640', type='value')
74
+ in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.45, label='NMS IoU threshold')
75
+ in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.50, label='Umbral o threshold')
76
+ in4 = gr.inputs.Image(type='pil', label="Original Image")
77
+
78
+ out2 = gr.outputs.Image(type="pil", label="YOLOv5")
79
+ #-------------- Text-----
80
+ title = 'Trampas Barceló'
81
+ description = """
82
+ <p>
83
+ <center>
84
+ Sistemas de Desarrollado por Subsecretaría de Innovación del Municipio de Vicente López. Advertencia solo usar fotos provenientes de las trampas Barceló, no de celular o foto de internet.
85
+ <img src="https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png" alt="logo" width="250"/>
86
+ </center>
87
+ </p>
88
+ """
89
+ article ="<p style='text-align: center'><a href='https://docs.google.com/presentation/d/1T5CdcLSzgRe8cQpoi_sPB4U170551NGOrZNykcJD0xU/edit?usp=sharing' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://drive.google.com/drive/folders/1owACN3HGIMo4zm2GQ_jf-OhGNeBVRS7l?usp=sharing ' target='_blank'>Google Colab Demo</a></p><p style='text-align: center'><a href='https://github.com/Municipalidad-de-Vicente-Lopez/Trampa_Barcelo' target='_blank'>Repo Github</a></p></center></p>"
90
+
91
+ examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']]
92
+
93
+ iface = gr.Interface(yolo,
94
+ inputs=[in1, in2, in3, in4],
95
+ outputs=out2, title=title,
96
+ description=description,
97
+ article=article,
98
+ examples=examples,
99
+ theme="huggingface",
100
+ analytics_enabled=False,
101
+ allow_flagging="manual",
102
+ flagging_options=["correcto", "incorrecto", "casi correcto", "error", "otro"],
103
+ flagging_callback=hf_writer).launch(
104
+ enable_queue=True,
105
+ debug=True)
106
+
107
+ iface.launch()
108
+
109
+ """For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36).
110
+
111
+
112
+ ## Citation
113
+
114
+ [![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686)
115
+ """
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3306079ebeedfbc36f56650a8c9c4a73d97a394113e26a1117371bc0233cd439
3
+ size 14348789
requirements.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install -r requirements.txt
2
+ # base ----------------------------------------
3
+ matplotlib>=3.2.2
4
+ numpy>=1.18.5
5
+ opencv-python-headless
6
+ Pillow
7
+ PyYAML>=5.3.1
8
+ scipy>=1.4.1
9
+ torch>=1.7.0
10
+ torchvision>=0.8.1
11
+ tqdm>=4.41.0
12
+ # logging -------------------------------------
13
+ tensorboard>=2.4.1
14
+ # wandb
15
+ # plotting ------------------------------------
16
+ seaborn>=0.11.0
17
+ pandas
18
+ # export --------------------------------------
19
+ # coremltools>=4.1
20
+ # onnx>=1.9.0
21
+ # scikit-learn==0.19.2 # for coreml quantization
22
+ # extras --------------------------------------
23
+ # Cython # for pycocotools https://github.com/cocodataset/cocoapi/issues/172
24
+ # pycocotools>=2.0 # COCO mAP
25
+ # albumentations>=1.0.3
26
+ thop # FLOPs computation