|
|
|
import gradio as gr |
|
import torch |
|
import torchvision |
|
import numpy as np |
|
from PIL import Image |
|
|
|
|
|
import json |
|
import os |
|
import numpy as np |
|
import tensorflow.compat.v1 as tf |
|
tf.disable_v2_behavior() |
|
from dlclive import DLCLive, Processor |
|
from numpy import savetxt |
|
|
|
|
|
model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt") |
|
|
|
def yolo(im, size=640): |
|
g = (size / max(im.size)) |
|
im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) |
|
|
|
model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt") |
|
|
|
results = model(im) |
|
results.render() |
|
return Image.fromarray(results.imgs[0]) |
|
|
|
|
|
def dlclive_pose(model, crop_np, crop, fname, index,dlc_proc): |
|
model = torch.hub.load('ultralytics/yolov5', 'custom', "model_weights/md_v5a.0.0.pt") |
|
|
|
dlc_live = DLCLive(model, processor=dlc_proc) |
|
dlc_live.init_inference(crop_np) |
|
keypts = dlc_live.get_pose(crop_np) |
|
savetxt(str(fname)+ '_' + str(index) + '.csv' , keypts, delimiter=',') |
|
xpose = [] |
|
ypose = [] |
|
for key in keypts[:,2]: |
|
|
|
i = np.where(keypts[:,2]==key) |
|
xpose.append(keypts[i,0]) |
|
ypose.append(keypts[i,1]) |
|
plt.imshow(crop) |
|
plt.scatter(xpose[:], ypose[:], 40, color='cyan') |
|
plt.savefig(str(fname)+ '_' + str(index) + '.png') |
|
plt.show() |
|
plt.clf() |
|
|
|
dlc_proc = Processor() |
|
|
|
|
|
title = "MegaDetector and DeepLabcutLive" |
|
description = "Interact with MegaDetector and DeeplabCutLive for pose estimation" |
|
article = "<p style='text-align: center'>This app uses MegaDetector YOLOv5x6 model that was trained to detect animals, humans, and vehicles in camera trap images; find out more about the project on <a href='https://github.com/microsoft/CameraTraps'>GitHub</a>. We have also integrated DeepLabCut Live for pose estimation <a href='https://github.com/DeepLabCut/DeepLabCut-live'></a>.</p>" |
|
|
|
|
|
inputs = gr.inputs.Image(type="pil", label="Input Image") |
|
outputs = gr.outputs.Image(type="pil", label="Output Image") |
|
|
|
|
|
examples = [['data/owl.jpg'], ['data/snake.jpg'],['data/beluga.jpg'],['data/rhino.jpg']] |
|
gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True) |