#!/usr/bin/env python from __future__ import annotations import cv2 import gradio as gr import numpy as np import onnxruntime as ort DESCRIPTION = "# [atksh/onnx-facial-lmk-detector](https://github.com/atksh/onnx-facial-lmk-detector)" options = ort.SessionOptions() options.intra_op_num_threads = 8 options.inter_op_num_threads = 8 sess = ort.InferenceSession( "onnx-facial-lmk-detector/model.onnx", sess_options=options, providers=["CPUExecutionProvider"] ) def run(image: np.ndarray) -> np.ndarray: # float32, int, int, uint8, int, float32 # (N,), (N, 4), (N, 5, 2), (N, 224, 224, 3), (N, 106, 2), (N, 2, 3) scores, bboxes, keypoints, aligned_images, landmarks, affine_matrices = sess.run( None, {"input": image[:, :, ::-1].copy()} ) res = image[:, :, ::-1].copy() for box in bboxes: cv2.rectangle(res, tuple(box[:2]), tuple(box[2:]), (0, 255, 0), 1) for pts in landmarks: for pt in pts: cv2.circle(res, tuple(pt), 1, (255, 255, 0), cv2.FILLED) return res[:, :, ::-1], [face[:, :, ::-1] for face in aligned_images] examples = ["onnx-facial-lmk-detector/input.jpg", "images/pexels-ksenia-chernaya-8535230.jpg"] with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) with gr.Row(): with gr.Column(): image = gr.Image(label="Input", type="numpy") run_button = gr.Button() with gr.Column(): result = gr.Image(label="Output") gallery = gr.Gallery(label="Aligned Faces") gr.Examples( examples=examples, inputs=image, outputs=[result, gallery], fn=run, ) run_button.click( fn=run, inputs=image, outputs=[result, gallery], api_name="run", ) if __name__ == "__main__": demo.queue(max_size=10).launch()