vchiang001's picture
consolidating codes
d581ff8
raw
history blame
No virus
3.08 kB
# Copied megadetector section from https://huggingface.co/spaces/hlydecker/MegaDetector_v5
# Copied from https://huggingface.co/spaces/Neslihan/megadetector_dlcmodels/blob/main/app.py
print("before import")
import gradio as gr
import json
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from dlclive import DLCLive, Processor
from numpy import savetxt
import PIL
print("after import")
# A method that allows using dlc live, but with different models, saves poses, and plots the poses onto image
def dlclive_pose(model, crop_np, crop, index,dlc_proc):
dlc_live = DLCLive(model, processor=dlc_proc)
dlc_live.init_inference(crop_np)
keypts = dlc_live.get_pose(crop_np)
xpose = []
ypose = []
for key in keypts[:,2]:
# if key > 0.05: # which value do we need here?
i = np.where(keypts[:,2]==key)
xpose.append(keypts[i,0])
ypose.append(keypts[i,1])
plt.imshow(crop)
plt.scatter(xpose[:], ypose[:], 40, color='cyan')
canvas = plt.gca().figure.canvas
canvas.draw()
image = PIL.Image.frombytes('RGB', canvas.get_width_height(), canvas.tostring_rgb())
plt.clf()
return image
def classify_image(img, file):
primate_face_model = 'model_weights/DLC_FacialLandmarks_resnet_50_iteration-1_shuffle-1'
human_model = 'model_weights/DLC_human_dancing_resnet_101_iteration-0_shuffle-1'
with open(file.name, 'r') as f:
detection_results = json.load(f)
dlc_proc = Processor()
# Assuming there is only 1 detection on the output
img_data = detection_results["images"][0]
output_images = []
for detections_dict in img_data["detections"]:
index = img_data["detections"].index(detections_dict)
if detections_dict["conf"] > 0.8:
x1, y1,w_box, h_box = detections_dict["bbox"]
ymin,xmin,ymax, xmax = y1, x1, y1 + h_box, x1 + w_box
imageWidth=img.size[0]
imageHeight= img.size[1]
area = (xmin * imageWidth, ymin * imageHeight, xmax * imageWidth,
ymax * imageHeight)
crop = img.crop(area)
crop_np = np.asarray(crop)
if detections_dict["category"] == "1":
selected_model = primate_face_model
elif detections_dict["category"] == "2":
selected_model = human_model
# Until we know how to dynamically add output element to gradio, just return the first image
output_images.append(dlclive_pose(selected_model, crop_np, crop, index, dlc_proc))
return output_images[0], output_images[1] # lol
input_image = gr.inputs.Image(type="pil", label="Input Image")
input_file = gr.inputs.File(label="output.json")
# Fake it till we make it, we know our example has 2 outputs
outputs = [gr.outputs.Image(type="pil", label="Output Image"), gr.outputs.Image(type="pil", label="Output Image")]
gr.Interface(fn=classify_image, inputs=[input_image, input_file], outputs=outputs, theme="huggingface").launch()