AVCER / app.py
ElenaRyumina's picture
Summary
47aeb66
raw
history blame
4.2 kB
"""
File: app.py
Author: Elena Ryumina and Dmitry Ryumin
Description: Description: Main application file for Facial_Expression_Recognition.
The file defines the Gradio interface, sets up the main blocks,
and includes event handlers for various components.
License: MIT License
"""
import gradio as gr
# Importing necessary components for the Gradio app
from app.description import DESCRIPTION_DYNAMIC
from app.authors import AUTHORS
from app.app_utils import preprocess_video_and_predict
def clear_static_info():
return (
gr.Image(value=None, type="pil"),
gr.Image(value=None, scale=1, elem_classes="dl5"),
gr.Image(value=None, scale=1, elem_classes="dl2"),
gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"),
)
def clear_dynamic_info():
return (
gr.Video(value=None),
gr.Plot(value=None),
gr.Plot(value=None),
gr.Plot(value=None),
gr.Textbox(value=None),
# gr.HTML(value=None),
gr.File(value=None),
gr.File(value=None),
)
with gr.Blocks(css="app.css") as demo:
with gr.Tab("AVCER App"):
gr.Markdown(value=DESCRIPTION_DYNAMIC)
with gr.Row():
with gr.Column(scale=2):
input_video = gr.Video(elem_classes="video1")
with gr.Row():
clear_btn_dynamic = gr.Button(
value="Clear", interactive=True, scale=1
)
submit_dynamic = gr.Button(
value="Submit", interactive=True, scale=1, elem_classes="submit"
)
text = gr.Textbox(label="Result", info='Positive state includes Happiness, Surprise, Happily Surprised, and Happily Disgusted emotions. Negative state includes other emotions and Surprise.')
# question_mark = gr.HTML(tooltip_html)
with gr.Column(scale=2, elem_classes="dl4"):
output_face = gr.Plot(label="Face images", elem_classes="img")
output_heatmaps = gr.Plot(label="Waveform", elem_classes="audio")
output_statistics = gr.Plot(label="Statistics of emotions", elem_classes="stat")
with gr.Row():
output_video = gr.File(label="Original video",
file_count="single",
file_types=[".mp4"],
show_label=True,
interactive=False,
visible=True,
elem_classes="video")
prediction_file = gr.File(label="Prediction file",
file_count="single",
file_types=[".csv"],
show_label=True,
interactive=False,
visible=True,
elem_classes="pred")
gr.Examples(
["videos/video1.mp4",
"videos/video2.mp4",
"videos/video3.mp4",
"videos/video4.mp4",
],
[input_video],
)
with gr.Tab("Authors"):
gr.Markdown(value=AUTHORS)
submit_dynamic.click(
fn=preprocess_video_and_predict,
inputs=input_video,
outputs=[
output_face,
output_heatmaps,
output_statistics,
text,
# question_mark,
output_video,
prediction_file,
],
queue=True,
)
clear_btn_dynamic.click(
fn=clear_dynamic_info,
inputs=[],
outputs=[
input_video,
output_face,
output_heatmaps,
output_statistics,
text,
# question_mark,
output_video,
prediction_file,
],
queue=True,
)
if __name__ == "__main__":
demo.queue(api_open=False).launch(share=False)