|
|
|
|
|
import cv2 |
|
import onnxruntime as ort |
|
import argparse |
|
import numpy as np |
|
from dependencies.box_utils import predict |
|
|
|
|
|
|
|
os.system("wget https://github.com/AK391/models/raw/main/vision/body_analysis/ultraface/models/version-RFB-320.onnx") |
|
face_detector_onnx = "version-RFB-320.onnx" |
|
|
|
|
|
|
|
|
|
|
|
|
|
face_detector = ort.InferenceSession(face_detector_onnx) |
|
|
|
|
|
def scale(box): |
|
width = box[2] - box[0] |
|
height = box[3] - box[1] |
|
maximum = max(width, height) |
|
dx = int((maximum - width)/2) |
|
dy = int((maximum - height)/2) |
|
|
|
bboxes = [box[0] - dx, box[1] - dy, box[2] + dx, box[3] + dy] |
|
return bboxes |
|
|
|
|
|
def cropImage(image, box): |
|
num = image[box[1]:box[3], box[0]:box[2]] |
|
return num |
|
|
|
|
|
def faceDetector(orig_image, threshold = 0.7): |
|
image = cv2.cvtColor(orig_image, cv2.COLOR_BGR2RGB) |
|
image = cv2.resize(image, (320, 240)) |
|
image_mean = np.array([127, 127, 127]) |
|
image = (image - image_mean) / 128 |
|
image = np.transpose(image, [2, 0, 1]) |
|
image = np.expand_dims(image, axis=0) |
|
image = image.astype(np.float32) |
|
|
|
input_name = face_detector.get_inputs()[0].name |
|
confidences, boxes = face_detector.run(None, {input_name: image}) |
|
boxes, labels, probs = predict(orig_image.shape[1], orig_image.shape[0], confidences, boxes, threshold) |
|
return boxes, labels, probs |
|
|
|
|
|
|
|
|
|
def inference(img): |
|
color = (255, 128, 0) |
|
|
|
orig_image = cv2.imread(img) |
|
boxes, labels, probs = faceDetector(orig_image) |
|
|
|
for i in range(boxes.shape[0]): |
|
box = scale(boxes[i, :]) |
|
cv2.rectangle(orig_image, (box[0], box[1]), (box[2], box[3]), color, 4) |
|
cv2.imwrite("out.png",orig_image) |
|
return "out.png" |
|
|
|
|
|
gr.Interface(inference,gr.inputs.Image(type="filepath"),gr.outputs.Image(type="file")).launch() |
|
|
|
|