File size: 1,846 Bytes
b325e7d
 
 
 
 
 
 
 
 
653a10e
b325e7d
9e80b23
 
 
 
 
 
 
b325e7d
9e80b23
b325e7d
 
 
9e80b23
653a10e
b325e7d
 
 
 
 
 
 
 
 
 
 
9e80b23
289c129
653a10e
289c129
 
 
653a10e
289c129
 
653a10e
 
 
 
 
 
9e80b23
653a10e
9e80b23
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/usr/bin/env python

from __future__ import annotations

import cv2
import gradio as gr
import numpy as np
import onnxruntime as ort

DESCRIPTION = "# [atksh/onnx-facial-lmk-detector](https://github.com/atksh/onnx-facial-lmk-detector)"

options = ort.SessionOptions()
options.intra_op_num_threads = 8
options.inter_op_num_threads = 8
sess = ort.InferenceSession(
    "onnx-facial-lmk-detector/model.onnx", sess_options=options, providers=["CPUExecutionProvider"]
)


def run(image: np.ndarray) -> np.ndarray:
    # float32, int, int, uint8, int, float32
    # (N,), (N, 4), (N, 5, 2), (N, 224, 224, 3), (N, 106, 2), (N, 2, 3)
    scores, bboxes, keypoints, aligned_images, landmarks, affine_matrices = sess.run(
        None, {"input": image[:, :, ::-1].copy()}
    )

    res = image[:, :, ::-1].copy()
    for box in bboxes:
        cv2.rectangle(res, tuple(box[:2]), tuple(box[2:]), (0, 255, 0), 1)
    for pts in landmarks:
        for pt in pts:
            cv2.circle(res, tuple(pt), 1, (255, 255, 0), cv2.FILLED)

    return res[:, :, ::-1], [face[:, :, ::-1] for face in aligned_images]


examples = ["onnx-facial-lmk-detector/input.jpg", "images/pexels-ksenia-chernaya-8535230.jpg"]

with gr.Blocks(css="style.css") as demo:
    gr.Markdown(DESCRIPTION)
    with gr.Row():
        with gr.Column():
            image = gr.Image(label="Input", type="numpy")
            run_button = gr.Button()
        with gr.Column():
            result = gr.Image(label="Output")
            gallery = gr.Gallery(label="Aligned Faces")
    gr.Examples(
        examples=examples,
        inputs=image,
        outputs=[result, gallery],
        fn=run,
    )
    run_button.click(
        fn=run,
        inputs=image,
        outputs=[result, gallery],
        api_name="run",
    )

if __name__ == "__main__":
    demo.queue(max_size=10).launch()