hinobus / app.py
thanhson28's picture
Update app.py
2e25c68 verified
raw
history blame
7.75 kB
import os
import gradio as gr
from fastapi import FastAPI
import uvicorn
from cvut.logger import Logger
from libs import display_frame_pose
import cv2
import numpy as np
app = FastAPI()
# from db.libs.config_json import LOG_ROOT
from libs import PersonDetectorAttrib, PoseDetector, HumanAG_ONNX
# ------------------------------------------------------------------------------
# Utils
# ------------------------------------------------------------------------------
GRADIO_PORT = os.environ.get("GRADIO_PORT", 7860)
GRADIO_USERNAME = os.environ.get("GRADIO_USERNAME", '')
GRADIO_PASSWORD = os.environ.get("GRADIO_PASSWORD", '')
# build logger
# logger = Logger('gradio', LOG_ROOT)
logger = None
# build detector
person_detector = PersonDetectorAttrib(
labels={0: 'person'},
onnx_file="./model_ort.onnx",
infer_wh=(800, 640),
logger=logger,
)
# build pose detector
pose_detector = PoseDetector(
onnx_file="./rtmpose_s.onnx",
logger=logger
)
ages_genders_detector = HumanAG_ONNX(
model_path="./pa100k_repvggB0_multihead_v3.onnx",
# logger=logger
)
def detect_objects(img, thr):
# infer img
objs = person_detector.infer(img, threshold=thr)
# logger.info(f"Number of objs: {len(objs)}")
# draw img
img, class_names = PersonDetectorAttrib.visualize(img, objs)
output_text = ""
for i, obj in enumerate(objs):
bbox = obj['points']
output_text += class_names[i] + "\n"
# return img
return img, output_text
def detect_poses(img, thr):
# infer img
objs = person_detector.infer(img, threshold=thr)
objs = [obj for obj in objs if obj['confidence'] > 0.5]
output_text = ""
for i, obj in enumerate(objs):
bbox = obj['points']
# crop img
img_crop = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]
objs_pose = pose_detector.infer(img_crop, threshold=0.0)
# each_point in objs_pose['points'], add bbox[0] and bbox[1] to each_point
for each_point in objs_pose:
each_point['point'][0] += bbox[0]
each_point['point'][1] += bbox[1]
# pose dicts to list
objs_pose = [each_point['point']
for each_point in objs_pose]
display_frame_pose(img, [bbox], [objs_pose])
# draw bbox
img = cv2.rectangle(
img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), 2)
# put bbox ID and pose to output_text
output_text += f"ID-{i}: " + "\t".join(
[f"point-{i}-[x-{int(point[0])},y-{int(point[1])}]" for i, point in enumerate(objs_pose)]) + "\n"
# get text size of output_text
text_size = cv2.getTextSize(
f"ID-{i}", cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
# put rectangle background to output_text at top left bbox
img = cv2.rectangle(img, (bbox[0], bbox[1]-text_size[0][1]-10),
(bbox[0]+text_size[0][0], bbox[1]), (0, 0, 0), cv2.FILLED)
# putText output_text to image
img = cv2.putText(img, f"ID-{i}", (bbox[0], bbox[1]-10),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
# return img
return img, output_text
def detect_ages_gender(img, thr):
# infer img
objs = person_detector.infer(img, threshold=thr)
objs = [obj for obj in objs if obj['confidence'] > 0.5]
width = img.shape[1]
font_size = width/1284
thickness = int((width/1284)*4)
output_text = ""
for i, obj in enumerate(objs):
bbox = obj['points']
# crop img
img_crop = img[bbox[1]:bbox[3], bbox[0]:bbox[2]]
objs_ag = ages_genders_detector.infer(
[img_crop])
# draw img
# show bbox and age_gender to image
ages_genders = f"ID-{i} :" + \
objs_ag['ages'][0] + "_"+objs_ag['genders'][0]
# draw bbox = [x1,y1,x2,y2]
(text_width, text_height) = cv2.getTextSize(
ages_genders, cv2.FONT_HERSHEY_SIMPLEX, font_size, thickness=thickness)[0]
# set the text start position
text_offset_x = bbox[0]
text_offset_y = bbox[1]
# make the coords of the box with a small padding of two pixels
box_coords = (
(text_offset_x, text_offset_y),
(text_offset_x + text_width + 2, text_offset_y - text_height - 2))
cv2.rectangle(img, box_coords[0],
box_coords[1], (0, 0, 0), cv2.FILLED)
img = cv2.rectangle(
img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 0, 255), thickness)
# putText ages_genders to bbox
cv2.putText(img, ages_genders, (bbox[0], bbox[1]-2),
cv2.FONT_HERSHEY_SIMPLEX, font_size, (0, 0, 255), thickness, cv2.LINE_AA)
output_text += ages_genders
# return img
return img, output_text
# ------------------------------------------------------------------------------
# Main execution
# ------------------------------------------------------------------------------
if __name__ == "__main__":
person_detector_gr = gr.Interface(
fn=detect_objects,
inputs=[
gr.Image(type="filepath", label="Drop Image",
width=800, height=480),
gr.Slider(label="Detection Threshold",
minimum=0, maximum=1, step=0.01, value=0.3),
],
outputs=[
gr.Image(type="numpy", label="Processed Image",
width=800, height=480),
gr.Textbox(label="Person Detection info:")
],
title="Model Testing",
description="Drag an image onto the box",
allow_flagging="never",
# examples=image_list,
)
pose_detector_gr = gr.Interface(
fn=detect_poses,
inputs=[
gr.Image(type="numpy", label="Drop Image",
width=800, height=480),
gr.Slider(label="Detection Threshold",
minimum=0, maximum=1, step=0.01, value=0.3),
],
outputs=[
gr.Image(type="numpy", label="Processed Image",
width=800, height=480),
gr.Textbox(label="Human pose detection info:"),
],
title="Model Testing",
description="Drag an image onto the box",
allow_flagging="never",
# examples=image_list,
)
ages_genders_gr = gr.Interface(
fn=detect_ages_gender,
inputs=[
gr.Image(type="numpy", label="Drop Image",
width=800, height=480),
gr.Slider(label="Detection Threshold",
minimum=0, maximum=1, step=0.01, value=0.3),
],
outputs=[
gr.Image(type="numpy", label="Processed Image",
width=800, height=480),
gr.Textbox(label="Age Gender detection info:"),
],
title="Model Testing",
description="Drag an image onto the box",
allow_flagging="never",
# examples=image_list,
)
# demo = gr.Blocks(title="Hinobus Models")
# with demo.clear():
demo= gr.TabbedInterface(
[person_detector_gr, pose_detector_gr, ages_genders_gr],
["Person Detector", "Pose Detector", "Ages Genders"]
)
# if with authentication
if GRADIO_USERNAME != '' and GRADIO_PASSWORD != '':
demo.auth = [(GRADIO_USERNAME, GRADIO_PASSWORD)]
demo.auth_message = "Please enter username and password for login"
# run app
# app = gr.mount_gradio_app(app, demo, path="/")
# uvicorn.run(app, host="0.0.0.0", port=int(GRADIO_PORT))
demo.launch(server_name="0.0.0.0", auth = (GRADIO_USERNAME, GRADIO_PASSWORD),auth_message = "Please enter username and password for login", server_port=7860)