File size: 2,049 Bytes
d5e9efc 031ec86 d5e9efc 031ec86 b0005f4 031ec86 b0005f4 031ec86 b0005f4 d5e9efc 031ec86 d5e9efc 031ec86 b0005f4 031ec86 d5e9efc 031ec86 b0005f4 031ec86 d5e9efc 031ec86 b0005f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
"""
File: app.py
Author: Elena Ryumina and Dmitry Ryumin
Description: Description: Main application file for Facial_Expression_Recognition.
The file defines the Gradio interface, sets up the main blocks,
and includes event handlers for various components.
License: MIT License
"""
import gradio as gr
# Importing necessary components for the Gradio app
from app.description import DESCRIPTION
from app.app_utils import preprocess_and_predict
def clear():
return (
gr.Image(value=None, type="pil"),
gr.Image(value=None, scale=1, elem_classes="dl2"),
gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"),
)
with gr.Blocks(css="app.css") as demo:
gr.Markdown(value=DESCRIPTION)
with gr.Row():
with gr.Column(scale=2, elem_classes="dl1"):
input_image = gr.Image(type="pil")
with gr.Row():
clear_btn = gr.Button(
value="Clear", interactive=True, scale=1, elem_classes="clear"
)
submit = gr.Button(
value="Submit", interactive=True, scale=1, elem_classes="submit"
)
with gr.Column(scale=1, elem_classes="dl4"):
output_image = gr.Image(scale=1, elem_classes="dl2")
output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
gr.Examples(
[
"images/fig7.jpg",
"images/fig1.jpg",
"images/fig2.jpg",
"images/fig3.jpg",
"images/fig4.jpg",
"images/fig5.jpg",
"images/fig6.jpg",
],
[input_image],
)
submit.click(
fn=preprocess_and_predict,
inputs=[input_image],
outputs=[output_image, output_label],
queue=True,
)
clear_btn.click(
fn=clear,
inputs=[],
outputs=[input_image, output_image, output_label],
queue=True,
)
if __name__ == "__main__":
demo.queue(api_open=False).launch(share=False)
|