Detection-video / app.py
carlosalonso's picture
update
369a97a
import os
try:
import detectron2
except:
os.system('pip install git+https://github.com/facebookresearch/detectron2.git')
os.system('pip install altair')
import altair
import gradio as gr
import uuid
import torch
import numpy as np
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
# os.system('git clone https://github.com/facebookresearch/detectron2')
title = '<center><img src = "https://images.squarespace-cdn.com/content/v1/5573469fe4b0061829d437e6/1591631182400-7DJR03RV6ZOCN0TBPRD7/white-deloitte-logo1.jpg" width="130" height="20"></center><p>Detectron2 Video Detection</p>'
description = 'Implementación de Detectron2 en la detección de vídeo. Sube un vídeo, dale a submit y espera unos minutos a ver el output de la imagen con los objetos detectados. Puedes descargar el vídeo haciendo click derecho en el vídeo y clickando en "Save As"'
article = '<p>Conoce más en: <a href="https://www2.deloitte.com/es/es/pages/strategy-operations/solutions/analytics-and-cognitive.html">Visita Deloitte AI&Data</a></p><p>Desarrollado por Carlos y Lucía</p>'
print(os.listdir('/home/user/app'))
def inference(video):
print('Input video:\n')
print(video)
# Absolute path of a file
old_name = video
new_name = video.replace(" ", "_")
# Renaming the file
os.rename(old_name, new_name)
print(video, new_name)
output = str(uuid.uuid4())
output += '.mp4'
print('Output video:\n')
print(output)
print('\n\nAntes de lanzar el modelo\n\n')
orden = 'python carpeta_deteccion/demo/demo.py --config-file carpeta_deteccion/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml --video-input ' + new_name + ' --confidence-threshold 0.6 --output ' + output + ' --opts MODEL.WEIGHTS detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_101_3x/139514519/model_final_cafdb1.pkl'
os.system(orden)
print(orden)
print(os.listdir('/home/user/app'))
print('\n\nTras lanzar el modelo\n\n')
return output
gr.Interface(inference,
[gr.inputs.Video(source = "upload", type = "mp4", label = "Original Video")],
gr.outputs.Video(type = "mp4", label = "Detect Object - Video"),
title = title,
description = description,
article = article,
examples=[]).launch()