File size: 7,853 Bytes
8eeee11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f159097
8eeee11
 
235b9a3
8eeee11
 
235b9a3
992f75b
6059241
 
 
 
 
8eeee11
 
 
 
 
90df0db
 
 
 
6059241
 
 
 
 
 
 
 
 
 
 
 
 
 
90df0db
6059241
 
 
 
 
90df0db
6059241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8eeee11
992f75b
 
 
6059241
 
 
 
992f75b
 
 
 
 
 
ffd9cf3
992f75b
 
 
 
 
 
 
 
 
6059241
992f75b
 
368b420
ffd9cf3
992f75b
 
 
 
 
 
 
 
 
 
 
 
 
135b6f6
992f75b
 
 
 
 
 
8eeee11
 
 
6059241
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8eeee11
6059241
8eeee11
 
 
 
 
 
 
 
1c4c9dd
135b6f6
368b420
8eeee11
 
 
 
 
1c4c9dd
 
d213f0f
86988fa
d213f0f
8eeee11
 
 
86988fa
8eeee11
 
 
368b420
8eeee11
 
 
 
 
 
 
 
 
 
86988fa
8eeee11
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
# -*- coding: utf-8 -*-
"""Deploy OceanApp demo.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1j0T8gdLIa0X8fzkIgFpXDoU27BF49RUz?usp=sharing

![   ](https://i.pinimg.com/564x/3e/b8/f7/3eb8f7c348dffd7b3dffcafe81fbf2a6.jpg)

# Modelo

YOLO es una familia de modelos de detecci贸n de objetos a escala compuesta entrenados en COCO dataset, e incluye una funcionalidad simple para Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite.


## Gradio Inferencia

![](https://i.ibb.co/982NS6m/header.png)

Este Notebook se acelera opcionalmente con un entorno de ejecuci贸n de GPU

----------------------------------------------------------------------

 YOLOv5 Gradio demo

*Author: Ultralytics LLC and Gradio*

# C贸digo
"""
#!pip install -qr https://raw.githubusercontent.com/ultralytics/yolov5/master/requirements.txt gradio # install dependencies

import gradio as gr
import pandas as pd
import torch
import logging
import json
import re
import os 
import boto3
from botocore.exceptions import NoCredentialsError
import tempfile
import io
from PIL import Image
# Images
torch.hub.download_url_to_file('https://i.pinimg.com/564x/18/0b/00/180b00e454362ff5caabe87d9a763a6f.jpg', 'ejemplo1.jpg')
torch.hub.download_url_to_file('https://i.pinimg.com/564x/3b/2f/d4/3b2fd4b6881b64429f208c5f32e5e4be.jpg', 'ejemplo2.jpg')

aws_access_key_id = os.environ['aws_access_key_id']
aws_secret_access_key = os.environ['aws_secret_access_key'] 
region = os.environ['region'] 

# Envio de imagenes a S3
def upload_file(file_name, bucket=None, object_name=None):
    """Upload a file to an S3 bucket

    :param file_name: File to upload
    :param bucket: Bucket to upload to
    :param object_name: S3 object name. If not specified then file_name is used
    :return: Json if file was uploaded, else False
    """
    # If S3 object_name was not specified, use file_name
    if object_name is None:
        object_name = os.path.basename(file_name+".jpg")
    if bucket is None:
        bucket = 'oceanapp'
    s3_client = boto3.client('s3',aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key)
    aws_region = boto3.session.Session().region_name
    # Upload the file
    try:
        with open(file_name, "rb") as f:
          response = s3_client.upload_fileobj(f, bucket, object_name)
          s3_url = f"https://{bucket}.s3.amazonaws.com/{object_name}"
          stado = '"url_details":[{"statusCode":200, "s3_url":"'+s3_url+'"}]'
        print(s3_url)
    except FileNotFoundError:
        print("The file was not found")
        return False
    except NoCredentialsError as e:
        logging.error(e)
        return False
    return stado

#Imagen temporal guardada en upload_file
def tempFileJSON(img_file):
  temp = tempfile.NamedTemporaryFile(mode="wb")
  with temp as jpg:
    jpg.write(img_file)
    print(jpg.name)
    uf = upload_file(jpg.name)
    return uf

def removeStr(string): 
    return string.replace(" ", "")

# Model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', force_reload=True, autoshape=True)  # local model  o google colab

def listJSON(a,b,c,d,e,f,resImg):
      x = re.findall("obo mar", d)
      y = re.findall("elica", d)
      z = re.findall("elica", f)
      if x:
        d = 'Lobo marino'
      if y:
        d = 'Pelicano'
      if z:
        f = 'Pelicano'
      if(d=='Lobo marino' or d=='Pelicano'):
        if d =='Pelicano\nSp' or d =='Pelicano\nS':
          d = 'Pelicano'
        if f!='Pelicano':
          strlista = '"detail":[{"quantity":"'+str(removeStr(c))+'","description":"'+str(d)+'"}]'
        else:
          strlista = '"detail":[{"quantity":"'+str(removeStr(c))+'","description":"'+str(d)+'"},{"quantity":"'+str(removeStr(e))+'","description":"'+str(f)+'"}]'
        strlist = '{"image":"'+str(removeStr(a))+'","size":"'+str(removeStr(b))+'",'+strlista+','+resImg+'}'
        json_string = json.loads(strlist)
        return json_string

def arrayLista(a,b,c,d):
      x = re.findall("obo mar", b)
      y = re.findall("elica", b)
      z = re.findall("elica", d)
      if x:
        b = 'Lobo marino'
      if y:
        b = 'Pelicano'
      if z:
        d = 'Pelicano'
      if(b=='Lobo marino' or b=='Pelicano'):
        strlist =[]
        strlist2 =[]
        strlist.append(removeStr(a))
        strlist.append(b)
        if d=='Pelicano':
          strlist2.append(removeStr(c))
          strlist2.append(d)
        strlista = [strlist,strlist2]
        df = pd.DataFrame(strlista,columns=['Cantidad','Especie'])
        return df

def yolo(size, iou, conf, im):
    try:
      '''Wrapper fn for gradio'''
      # gain
      g = (int(size) / max(im.size))  
      # resize
      im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS)  

      model.iou = iou
      
      model.conf = conf
      # inference 
      results2 = model(im)  
      # updates results.imgs with boxes and labels
      results2.render()  
      results3 = str(results2)
      #Transformando la img en bytes
      pil_im = Image.fromarray(results2.ims[0])
      b = io.BytesIO()
      pil_im.save(b, 'jpeg')
      im_bytes = b.getvalue()
      fileImg = tempFileJSON(im_bytes)
      lista = listJSON(results3[0:9], results3[11:18] ,results3[19:21],results3[22:32], results3[34:36], results3[36:45], fileImg)
      lista2 = arrayLista(results3[19:21],results3[22:32], results3[34:36], results3[37:45])
      return Image.fromarray(results2.ims[0]), lista2, lista
    except Exception as e:
      logging.error(e, exc_info=True)

#------------ Interface-------------

in1 = gr.inputs.Radio(['640', '1280'], label="Tama帽o de la imagen", default='640', type='value')
in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.45, label='NMS IoU threshold')
in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.50, label='Umbral o  threshold')
in4 = gr.inputs.Image(type='pil', label="Original Image")

out2 = gr.outputs.Image(type="pil", label="Identificaci贸n con Yolov5")
out3 = gr.outputs.Dataframe(label="Descripci贸n", headers=['Cantidad','Especie'])  
out4 = gr.outputs.JSON(label="JSON")
#-------------- Text-----
title = 'OceanApp'
description = """
<p>
<center>
<p>Sistema para el reconocimiento de las especies en la pesca acompa帽ante de cerco, utilizando redes neuronales convolucionales para una empresa del sector pesquero en los puertos de callao y paracas.</p>
<p><b>Nota</b>: Este modelo solo acepta imagenes de <b>Lobos marinos</b> o <b>Pelicanos</b> proporcionados por empresas peruanas.</p>
<center>
<img src="https://i.pinimg.com/564x/3e/b8/f7/3eb8f7c348dffd7b3dffcafe81fbf2a6.jpg" alt="logo" width="250"/>
</center>
</center>
</p>
"""
article ="<p style='text-align: center'><a href='' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://colab.research.google.com/drive/1j0T8gdLIa0X8fzkIgFpXDoU27BF49RUz?usp=sharing' target='_blank'>Google Colab Demo</a></p><p style='text-align: center'><a href='https://github.com/MssLune/OceanApp-Model' target='_blank'>Repo Github</a></p></center></p>"
          
examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']]

iface = gr.Interface(yolo, inputs=[in1, in2, in3, in4], outputs=[out2,out3,out4], title=title, description=description, article=article, examples=examples,theme="huggingface", analytics_enabled=False).launch(
    debug=True)

iface.launch()

"""For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36).


## Citation

[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686)

"""