# -*- coding: utf-8 -*- """Deploy OceanApp demo.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1j0T8gdLIa0X8fzkIgFpXDoU27BF49RUz?usp=sharing ![ ](https://i.pinimg.com/564x/3e/b8/f7/3eb8f7c348dffd7b3dffcafe81fbf2a6.jpg) # Modelo YOLO es una familia de modelos de detección de objetos a escala compuesta entrenados en COCO dataset, e incluye una funcionalidad simple para Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. ## Gradio Inferencia ![](https://i.ibb.co/982NS6m/header.png) Este Notebook se acelera opcionalmente con un entorno de ejecución de GPU ---------------------------------------------------------------------- YOLOv5 Gradio demo *Author: Ultralytics LLC and Gradio* # Código """ #!pip install -qr https://raw.githubusercontent.com/ultralytics/yolov5/master/requirements.txt gradio # install dependencies import gradio as gr import pandas as pd import torch import logging import json import re import os import boto3 from botocore.exceptions import NoCredentialsError import tempfile import io from PIL import Image # Images torch.hub.download_url_to_file('https://i.pinimg.com/564x/18/0b/00/180b00e454362ff5caabe87d9a763a6f.jpg', 'ejemplo1.jpg') torch.hub.download_url_to_file('https://i.pinimg.com/564x/3b/2f/d4/3b2fd4b6881b64429f208c5f32e5e4be.jpg', 'ejemplo2.jpg') aws_access_key_id = os.environ['aws_access_key_id'] aws_secret_access_key = os.environ['aws_secret_access_key'] region = os.environ['region'] #Lista Json y Dataframe def removeStr(string): return string.replace(" ", "") def listJSON(a,b,c,d,e,f,resImg): x = re.findall("obo Mar", d) y = re.findall("elica", d) z = re.findall("elica", f) if x: d = 'Lobo marino' if y: d = 'Pelicano' if z: f = 'Pelicano' if(d=='Lobo marino' or d=='Pelicano'): if d =='Pelicano\nSp' or d =='Pelicano\nS': d = 'Pelicano' if f!='Pelicano': strlista = '"detail":[{"quantity":"'+str(removeStr(c))+'","description":"'+str(d)+'"}]' else: strlista = '"detail":[{"quantity":"'+str(removeStr(c))+'","description":"'+str(d)+'"},{"quantity":"'+str(removeStr(e))+'","description":"'+str(f)+'"}]' strlist = '{"image":"'+str(removeStr(a))+'","size":"'+str(removeStr(b))+'",'+strlista+','+resImg+'}' json_string = json.loads(strlist) return json_string def arrayLista(a,b,c,d): x = re.findall("obo Mar", b) y = re.findall("elica", b) z = re.findall("elica", d) if x: b = 'Lobo marino' if y: b = 'Pelicano' if z: d = 'Pelicano' if(b=='Lobo marino' or b=='Pelicano'): strlist =[] strlist2 =[] strlist.append(removeStr(a)) strlist.append(b) if d=='Pelicano': strlist2.append(removeStr(c)) strlist2.append(d) strlista = [strlist,strlist2] df = pd.DataFrame(strlista,columns=['Cantidad','Especie']) return df #Imagen temporal guardada en upload_file def tempFileJSON(img_file): temp = tempfile.NamedTemporaryFile(mode="wb") with temp as jpg: jpg.write(img_file) print(jpg.name) uf = upload_file(jpg.name) return uf # Envio de imagenes a S3 def upload_file(file_name, bucket=None, object_name=None): """Upload a file to an S3 bucket :param file_name: File to upload :param bucket: Bucket to upload to :param object_name: S3 object name. If not specified then file_name is used :return: Json if file was uploaded, else False """ # If S3 object_name was not specified, use file_name if object_name is None: object_name = os.path.basename(file_name+".jpg") if bucket is None: bucket = 'oceanapp' s3_client = boto3.client('s3',aws_access_key_id=aws_access_key_id,aws_secret_access_key=aws_secret_access_key) aws_region = boto3.session.Session().region_name # Upload the file try: with open(file_name, "rb") as f: response = s3_client.upload_fileobj(f, bucket, object_name) s3_url = f"https://{bucket}.s3.amazonaws.com/{object_name}" stado = '"url_details":[{"statusCode":200, "s3_url":"'+s3_url+'"}]' print(s3_url) except FileNotFoundError: print("The file was not found") return False except NoCredentialsError as e: logging.error(e) return False return stado # Model model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', force_reload=True, autoshape=True) # local model o google colab def yolo(size, iou, conf, im): try: '''Wrapper fn for gradio''' # gain g = (int(size) / max(im.size)) # resize if(max(im.size)>900 and g>0.4): g=0.3 if(max(im.size)>2000 and g>0.4): g=0.1 im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) model.iou = iou model.conf = conf # inference results2 = model(im) # updates results.imgs with boxes and labels results2.render() results3 = str(results2) #Transformando la img en bytes pil_im = Image.fromarray(results2.ims[0]) b = io.BytesIO() pil_im.save(b, 'jpeg') im_bytes = b.getvalue() fileImg = tempFileJSON(im_bytes) lista = listJSON(results3[0:9], results3[11:18] ,results3[19:21],results3[22:32], results3[34:36], results3[36:45], fileImg) lista2 = arrayLista(results3[19:21],results3[22:32], results3[34:36], results3[37:45]) return Image.fromarray(results2.ims[0]), lista2, lista except Exception as e: logging.error(e, exc_info=True) #------------ Interface------------- in1 = gr.inputs.Radio(['640', '1280'], label="Tamaño de la imagen", default='640', type='value') in2 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.45, label='NMS IoU threshold') in3 = gr.inputs.Slider(minimum=0, maximum=1, step=0.05, default=0.50, label='Umbral o threshold') in4 = gr.inputs.Image(type='pil', label="Original Image") out2 = gr.outputs.Image(type="pil", label="Identificación con Yolov5") out3 = gr.outputs.Dataframe(label="Descripción", headers=['Cantidad','Especie']) out4 = gr.outputs.JSON(label="JSON") #-------------- Text----- title = 'OceanApp' description = """

Sistema para el reconocimiento de las especies en la pesca acompañante de cerco, utilizando redes neuronales convolucionales para una empresa del sector pesquero en los puertos de callao y paracas.

Nota: Este modelo solo acepta imagenes de Lobos marinos o Pelicanos proporcionados por empresas peruanas.

logo

""" article ="

Para mas info, clik para ir al white paper

Google Colab Demo

Repo Github

" examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']] iface = gr.Interface(yolo, inputs=[in1, in2, in3, in4], outputs=[out2,out3,out4], title=title, description=description, article=article, examples=examples,theme="huggingface", analytics_enabled=False).launch( debug=True) iface.launch() """For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36). ## Citation [![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686) """