File size: 3,395 Bytes
827c688
 
 
d1ffd11
 
 
 
1b9d9ae
 
 
 
 
 
 
 
 
 
d1ffd11
 
9d84828
d1ffd11
 
 
 
 
 
 
 
 
 
 
 
 
5990ce9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
pip install insightface
pip install onnxruntime

import gradio as gr
import numpy as np
import tensorflow as tf
import cv2
import numpy as np
import os
import glob
import cv2
import matplotlib.pyplot as plt
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image



# Load your trained model
#model = tf.keras.models.load_model('path_to_your_model.h5')

def predict_gender(image):
    # Convert image to format expected by your model & preprocess
    img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
    img = cv2.resize(img, (224, 224))  # Example size
    img = img / 255.0  # Normalizing
    img = np.expand_dims(img, axis=0)

    prediction = model.predict(img)

    # Assuming binary classification with a single output neuron
    return "Male" if prediction[0] < 0.5 else "Female"


def predict(video_in, image_in_video, image_in_img):
    if video_in == None and image_in_video == None and image_in_img == None:
        raise gr.Error("Please upload a video or image.")
    if image_in_video or image_in_img:
        print("image", image_in_video, image_in_img)
        image = image_in_video or image_in_img
        return image

    return video_in


def toggle(choice):
    if choice == "webcam":
        return gr.update(visible=True, value=None), gr.update(visible=False, value=None)
    else:
        return gr.update(visible=False, value=None), gr.update(visible=True, value=None)


with gr.Blocks() as blocks:
    gr.Markdown("### Video or Image? WebCam or Upload?""")
    with gr.Tab("Video") as tab:
        with gr.Row():
            with gr.Column():
                video_or_file_opt = gr.Radio(["webcam", "upload"], value="webcam",
                                             label="How would you like to upload your video?")
                video_in = gr.Video(source="webcam", include_audio=False)
                video_or_file_opt.change(fn=lambda s: gr.update(source=s, value=None), inputs=video_or_file_opt,
                                         outputs=video_in, queue=False, show_progress=False)
            with gr.Column():
                video_out = gr.Video()
        run_btn = gr.Button("Run")
        run_btn.click(fn=predict, inputs=[video_in], outputs=[video_out])
        gr.Examples(fn=predict, examples=[], inputs=[
                    video_in], outputs=[video_out])

    with gr.Tab("Image"):
        with gr.Row():
            with gr.Column():
                image_or_file_opt = gr.Radio(["webcam", "file"], value="webcam",
                                             label="How would you like to upload your image?")
                image_in_video = gr.Image(source="webcam", type="filepath")
                image_in_img = gr.Image(
                    source="upload", visible=False, type="filepath")

                image_or_file_opt.change(fn=toggle, inputs=[image_or_file_opt],
                                         outputs=[image_in_video, image_in_img], queue=False, show_progress=False)
            with gr.Column():
                image_out = gr.Image()
        run_btn = gr.Button("Run")
        run_btn.click(fn=predict, inputs=[
                      image_in_img, image_in_video], outputs=[image_out])
        gr.Examples(fn=predict, examples=[],  inputs=[
                    image_in_img, image_in_video], outputs=[image_out])

blocks.queue()
blocks.launch()