ElenaRyumina commited on
Commit
6be1573
β€’
1 Parent(s): 813d818
Files changed (2) hide show
  1. app.py +52 -27
  2. app/description.py +1 -1
app.py CHANGED
@@ -21,35 +21,60 @@ def clear():
21
  gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"),
22
  )
23
 
 
 
24
 
25
- with gr.Blocks(css="app.css") as demo:
26
- gr.Markdown(value=DESCRIPTION)
27
 
28
- with gr.Row():
29
- with gr.Column(scale=2, elem_classes="dl1"):
30
- input_image = gr.Image(type="pil")
31
- with gr.Row():
32
- clear_btn = gr.Button(
33
- value="Clear", interactive=True, scale=1, elem_classes="clear"
34
- )
35
- submit = gr.Button(
36
- value="Submit", interactive=True, scale=1, elem_classes="submit"
37
- )
38
- with gr.Column(scale=1, elem_classes="dl4"):
39
- output_image = gr.Image(scale=1, elem_classes="dl2")
40
- output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
41
- gr.Examples(
42
- [
43
- "images/fig7.jpg",
44
- "images/fig1.jpg",
45
- "images/fig2.jpg",
46
- "images/fig3.jpg",
47
- "images/fig4.jpg",
48
- "images/fig5.jpg",
49
- "images/fig6.jpg",
50
- ],
51
- [input_image],
52
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  submit.click(
55
  fn=preprocess_and_predict,
 
21
  gr.Label(value=None, num_top_classes=3, scale=1, elem_classes="dl3"),
22
  )
23
 
24
+ md = """
25
+ App developers: ``Elena Ryumina`` and ``Dmitry Ryumin``
26
 
27
+ Methodology developers: ``Elena Ryumina``, ``Denis Dresvyanskiy`` and ``Alexey Karpov``
 
28
 
29
+ Model developer: ``Elena Ryumina``
30
+
31
+ TensorFlow to PyTorch model converter: ``Maxim Markitantov`` and ``Elena Ryumina``
32
+
33
+ Citation
34
+
35
+ If you are using EMO-AffectNetModel in your research, please consider to cite research [paper](https://www.sciencedirect.com/science/article/pii/S0925231222012656). Here is an example of BibTeX entry:
36
+
37
+ <div class="highlight highlight-text-bibtex notranslate position-relative overflow-auto" dir="auto"><pre><span class="pl-k">@article</span>{<span class="pl-en">RYUMINA2022</span>,
38
+ <span class="pl-s">title</span> = <span class="pl-s"><span class="pl-pds">{</span>In Search of a Robust Facial Expressions Recognition Model: A Large-Scale Visual Cross-Corpus Study<span class="pl-pds">}</span></span>,
39
+ <span class="pl-s">author</span> = <span class="pl-s"><span class="pl-pds">{</span>Elena Ryumina and Denis Dresvyanskiy and Alexey Karpov<span class="pl-pds">}</span></span>,
40
+ <span class="pl-s">journal</span> = <span class="pl-s"><span class="pl-pds">{</span>Neurocomputing<span class="pl-pds">}</span></span>,
41
+ <span class="pl-s">year</span> = <span class="pl-s"><span class="pl-pds">{</span>2022<span class="pl-pds">}</span></span>,
42
+ <span class="pl-s">doi</span> = <span class="pl-s"><span class="pl-pds">{</span>10.1016/j.neucom.2022.10.013<span class="pl-pds">}</span></span>,
43
+ <span class="pl-s">url</span> = <span class="pl-s"><span class="pl-pds">{</span>https://www.sciencedirect.com/science/article/pii/S0925231222012656<span class="pl-pds">}</span></span>,
44
+ }</div>
45
+ """
46
+
47
+
48
+ with gr.Blocks(css="app.css") as demo:
49
+ with gr.Tab("App"):
50
+ gr.Markdown(value=DESCRIPTION)
51
+ with gr.Row():
52
+ with gr.Column(scale=2, elem_classes="dl1"):
53
+ input_image = gr.Image(type="pil")
54
+ with gr.Row():
55
+ clear_btn = gr.Button(
56
+ value="Clear", interactive=True, scale=1, elem_classes="clear"
57
+ )
58
+ submit = gr.Button(
59
+ value="Submit", interactive=True, scale=1, elem_classes="submit"
60
+ )
61
+ with gr.Column(scale=1, elem_classes="dl4"):
62
+ output_image = gr.Image(scale=1, elem_classes="dl2")
63
+ output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
64
+ gr.Examples(
65
+ [
66
+ "images/fig7.jpg",
67
+ "images/fig1.jpg",
68
+ "images/fig2.jpg",
69
+ "images/fig3.jpg",
70
+ "images/fig4.jpg",
71
+ "images/fig5.jpg",
72
+ "images/fig6.jpg",
73
+ ],
74
+ [input_image],
75
+ )
76
+ with gr.Tab("Authors"):
77
+ gr.Markdown(value=md)
78
 
79
  submit.click(
80
  fn=preprocess_and_predict,
app/description.py CHANGED
@@ -9,7 +9,7 @@ License: MIT License
9
  from app.config import config_data
10
 
11
  DESCRIPTION = f"""\
12
- # Facial_Expression_Recognition
13
  <div class="app-flex-container">
14
  <img src="https://img.shields.io/badge/version-v{config_data.APP_VERSION}-rc0" alt="Version">
15
  <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition&countColor=%23263759&style=flat" /></a>
 
9
  from app.config import config_data
10
 
11
  DESCRIPTION = f"""\
12
+ # Static Facial Expression Recognition
13
  <div class="app-flex-container">
14
  <img src="https://img.shields.io/badge/version-v{config_data.APP_VERSION}-rc0" alt="Version">
15
  <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition&countColor=%23263759&style=flat" /></a>