gender-age / app.py
MuGeminorum
translate
d2c8095
raw
history blame
No virus
3.02 kB
import cv2
import numpy as np
import gradio as gr
from dataclasses import dataclass
from mivolo.predictor import Predictor
from utils import *
import warnings
warnings.filterwarnings("ignore")
@dataclass
class Cfg:
detector_weights: str
checkpoint: str
device: str = "cpu"
with_persons: bool = True
disable_faces: bool = False
draw: bool = True
class ValidImgDetector:
predictor = None
def __init__(self):
# Use a pipeline as a high-level helper
detector_file = "yolov8x_person_face.pt"
age_gender_file = "model_imdb_cross_person_4.22_99.46.pth.tar"
detector_path = f"./model/{detector_file}"
age_gender_path = f"./model/{age_gender_file}"
domain = "https://huggingface.co/monet-joe/human-detector/resolve/main/"
download_file(f"{domain}{detector_file}", detector_path)
download_file(f"{domain}{age_gender_file}", age_gender_path)
predictor_cfg = Cfg(detector_path, age_gender_path)
self.predictor = Predictor(predictor_cfg)
def _detect(
self,
image: np.ndarray,
score_threshold: float,
iou_threshold: float,
mode: str,
predictor: Predictor
) -> np.ndarray:
# input is rgb image, output must be rgb too
predictor.detector.detector_kwargs['conf'] = score_threshold
predictor.detector.detector_kwargs['iou'] = iou_threshold
if mode == "Use persons and faces":
use_persons = True
disable_faces = False
elif mode == "Use persons only":
use_persons = True
disable_faces = True
elif mode == "Use faces only":
use_persons = False
disable_faces = False
predictor.age_gender_model.meta.use_persons = use_persons
predictor.age_gender_model.meta.disable_faces = disable_faces
# image = image[:, :, ::-1] # RGB -> BGR
detected_objects, out_im = predictor.recognize(image)
has_child, has_female, has_male = False, False, False
if len(detected_objects.ages) > 0:
has_child = min(detected_objects.ages) < 18
has_female = 'female' in detected_objects.genders
has_male = 'male' in detected_objects.genders
return out_im[:, :, ::-1], has_child, has_female, has_male
def valid_img(self, img_path):
image = cv2.imread(img_path)
return self._detect(
image,
0.4,
0.7,
"Use persons and faces",
self.predictor
)
def inference(photo):
detector = ValidImgDetector()
return detector.valid_img(photo)
iface = gr.Interface(
fn=inference,
inputs=gr.Image(label='Upload photo', type='filepath'),
outputs=[
gr.Image(label='Detection result', type='numpy'),
gr.Textbox(label='Has child'),
gr.Textbox(label='Has female'),
gr.Textbox(label='Has male')
],
examples=get_jpg_files('./examples')
)
iface.launch()