Spaces:
Running
Running
Feat: Add instructions page
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import pandas as pd
|
3 |
import os
|
4 |
import gradio as gr
|
5 |
-
|
6 |
|
7 |
css = """#myProgress {
|
8 |
width: 100%;
|
@@ -39,7 +39,8 @@ css = """#myProgress {
|
|
39 |
|
40 |
h4 {
|
41 |
margin: 0px;
|
42 |
-
color: var(--
|
|
|
43 |
}
|
44 |
|
45 |
#instructions {
|
@@ -53,8 +54,12 @@ css = """#myProgress {
|
|
53 |
background: var(--block-background-fill);
|
54 |
padding: var(--block-label-padding);
|
55 |
}
|
|
|
|
|
56 |
"""
|
57 |
|
|
|
|
|
58 |
js_progress_bar = """
|
59 |
function move(n_ann, total_ann) {
|
60 |
|
@@ -106,7 +111,6 @@ js_progress_bar = """
|
|
106 |
|
107 |
|
108 |
intro_html = """
|
109 |
-
<h1>Emotionality in Speech</h1>
|
110 |
|
111 |
<div class="content-box">
|
112 |
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
|
@@ -118,15 +122,15 @@ intro_html = """
|
|
118 |
<li><h4>Neutral</h4></li>
|
119 |
</ul>
|
120 |
|
121 |
-
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection—303 hours of interviews! (That’s 13 days of nonstop listening!)</p>
|
122 |
</div>
|
123 |
|
124 |
-
<h2>
|
125 |
|
126 |
<div class="content-box">
|
127 |
-
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project. This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic
|
128 |
-
|
129 |
-
|
130 |
</div>
|
131 |
|
132 |
<h2>What will you be annotating?</h2>
|
@@ -136,27 +140,74 @@ intro_html = """
|
|
136 |
|
137 |
<ul>
|
138 |
<li>
|
139 |
-
<
|
140 |
</li>
|
141 |
|
142 |
<li>
|
143 |
-
<
|
144 |
</li>
|
145 |
|
146 |
<li>
|
147 |
-
<
|
148 |
</li>
|
149 |
</ul>
|
150 |
|
151 |
-
<p>Further, you will be asked to fill "How confident you are that the annotated emotion is present in the recording?" from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident".</p>
|
|
|
|
|
152 |
|
153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
155 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
</div>
|
157 |
"""
|
158 |
|
159 |
-
|
160 |
# List of all audio files to annotate
|
161 |
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
|
162 |
total_annotations = len(file_list)
|
@@ -196,13 +247,13 @@ def save_annotation(emotions, confidence, comments, participant_id):
|
|
196 |
else:
|
197 |
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments]
|
198 |
ann_completed.value += 1
|
199 |
-
annotations.to_csv(f"{participant_id}_annotations.csv", index=False) # Save to a CSV file
|
200 |
|
201 |
|
202 |
def next_example(emotions, confidence, comments, participant_id):
|
203 |
"""Move to the next example."""
|
204 |
if emotions == "Blank":
|
205 |
-
gr.Warning("Please fill out the emotion section")
|
206 |
else:
|
207 |
|
208 |
save_annotation(emotions, confidence, comments, participant_id)
|
@@ -227,7 +278,7 @@ def deactivate_participant_id(participant_id, lets_go):
|
|
227 |
def activate_elements(emotions, confidence, comments, next_button, previous_button):
|
228 |
|
229 |
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = True)
|
230 |
-
confidence = gr.Slider(label="
|
231 |
comments = gr.Textbox(label="Comments", visible =True)
|
232 |
previous_button = gr.Button("Previous Example", visible = True)
|
233 |
next_button = gr.Button("Next Example",visible = True)
|
@@ -243,14 +294,32 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
|
243 |
with gr.Sidebar():
|
244 |
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
|
245 |
lets_go = gr.Button("Let's go!")
|
|
|
246 |
#happy_words = gr.Textbox(label = "Happy")
|
247 |
-
|
248 |
|
249 |
with gr.Tab("Instructions", elem_id = 'instructions'):
|
250 |
instructions = gr.HTML(intro_html, padding = False)
|
251 |
|
252 |
-
|
253 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
|
255 |
|
256 |
with gr.Tab("Annotation Interface"):
|
@@ -310,9 +379,9 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
|
310 |
|
311 |
|
312 |
lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
|
317 |
|
318 |
demo.launch()
|
|
|
2 |
import pandas as pd
|
3 |
import os
|
4 |
import gradio as gr
|
5 |
+
from pathlib import Path
|
6 |
|
7 |
css = """#myProgress {
|
8 |
width: 100%;
|
|
|
39 |
|
40 |
h4 {
|
41 |
margin: 0px;
|
42 |
+
color: var(--block-title-background-fill);
|
43 |
+
background: var(--block-title-text-color);
|
44 |
}
|
45 |
|
46 |
#instructions {
|
|
|
54 |
background: var(--block-background-fill);
|
55 |
padding: var(--block-label-padding);
|
56 |
}
|
57 |
+
|
58 |
+
|
59 |
"""
|
60 |
|
61 |
+
|
62 |
+
|
63 |
js_progress_bar = """
|
64 |
function move(n_ann, total_ann) {
|
65 |
|
|
|
111 |
|
112 |
|
113 |
intro_html = """
|
|
|
114 |
|
115 |
<div class="content-box">
|
116 |
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
|
|
|
122 |
<li><h4>Neutral</h4></li>
|
123 |
</ul>
|
124 |
|
125 |
+
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection— <strong>303 hours of interviews! (That’s 13 days of nonstop listening! 😮)</strong> </p>
|
126 |
</div>
|
127 |
|
128 |
+
<h2>The ACT-UP Oral History Project</h2>
|
129 |
|
130 |
<div class="content-box">
|
131 |
+
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project developed by Sarah Schulman and Jim Hubbard . This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic. In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP. The project comprises 187 interviews with members of the AIDS Coalition to Unleash Power (ACT UP) during the AIDS epidemic in New York in the late 1980s and early 1990s.
|
132 |
+
</p><p>Schulman sought to document the group’s public activism and capture the atmosphere among its members at the height of the crisis: </p><h4>"I wanted to show how crazy and desperate everyone was at that point, organizing political funerals and riding around in vans with the bodies of their dead friends. I wanted to convey what the suffering was like at that point."</h4><p>
|
133 |
+
Sullivan describes the archive as a space that embodies challenging emotions, such as the pervasive fear of death, grief, and what Jim Hubbard refers to as the activists' "righteous anger." </p>
|
134 |
</div>
|
135 |
|
136 |
<h2>What will you be annotating?</h2>
|
|
|
140 |
|
141 |
<ul>
|
142 |
<li>
|
143 |
+
<h4>Predominant Emotion:</h4> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
|
144 |
</li>
|
145 |
|
146 |
<li>
|
147 |
+
<h4>Perceived Emotion at the Time of Recording:</h4> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
|
148 |
</li>
|
149 |
|
150 |
<li>
|
151 |
+
<h4>Speech Emotionality:</h4> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
|
152 |
</li>
|
153 |
</ul>
|
154 |
|
155 |
+
<p>Further, you will be asked to fill <strong>"How confident you are that the annotated emotion is present in the recording?"</strong> from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident". There will be a <strong>"Comment/Feedback"</strong> section where you can makes notes. Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
|
156 |
+
</div>
|
157 |
+
"""
|
158 |
|
159 |
+
examples_explanation = """<h3>Audio examples</h3>
|
160 |
+
<div class="content-box">
|
161 |
+
<p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
|
162 |
+
</div>"""
|
163 |
+
side_bar_html = """
|
164 |
+
<h3>The task</h3>
|
165 |
+
<div class="content-box">
|
166 |
+
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
|
167 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
|
168 |
+
<span>✅</span>
|
169 |
+
<h4 style="margin: 0;">Predominant Emotion</h4>
|
170 |
+
</div>
|
171 |
+
|
172 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
|
173 |
+
<span>✅</span>
|
174 |
+
<h4 style="margin: 0;">Perceived Emotion at the Time of Recording</h4>
|
175 |
+
</div>
|
176 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
|
177 |
+
<span>✅</span>
|
178 |
+
<h4 style="margin: 0;">Speech Emotionality</h4>
|
179 |
+
</div>
|
180 |
+
|
181 |
+
</div>
|
182 |
+
<h3>Major subclasses</h3>
|
183 |
|
184 |
+
<div class="content-box">
|
185 |
+
<table border="1">
|
186 |
+
<thead>
|
187 |
+
<tr>
|
188 |
+
<th>Emotion Label</th>
|
189 |
+
<th>Major Subclasses</th>
|
190 |
+
</tr>
|
191 |
+
</thead>
|
192 |
+
<tbody>
|
193 |
+
<tr>
|
194 |
+
<td>Happiness</td>
|
195 |
+
<td>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</td>
|
196 |
+
</tr>
|
197 |
+
<tr>
|
198 |
+
<td>Sadness</td>
|
199 |
+
<td>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</td>
|
200 |
+
</tr>
|
201 |
+
<tr>
|
202 |
+
<td>Anger</td>
|
203 |
+
<td>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</td>
|
204 |
+
</tr>
|
205 |
+
</tbody>
|
206 |
+
</table>
|
207 |
</div>
|
208 |
"""
|
209 |
|
210 |
+
persistent_storage = Path('/data')
|
211 |
# List of all audio files to annotate
|
212 |
file_list = pd.read_excel(os.path.join('combined_annotations.xlsx'))
|
213 |
total_annotations = len(file_list)
|
|
|
247 |
else:
|
248 |
annotations.loc[len(annotations)] = [sample_id, sentence, emotions, confidence, comments]
|
249 |
ann_completed.value += 1
|
250 |
+
annotations.to_csv(f"{persistent_storage}/{participant_id}_annotations.csv", index=False) # Save to a CSV file
|
251 |
|
252 |
|
253 |
def next_example(emotions, confidence, comments, participant_id):
|
254 |
"""Move to the next example."""
|
255 |
if emotions == "Blank":
|
256 |
+
gr.Warning("Please fill out the emotion section. 'Blank' is not a valid emotion.")
|
257 |
else:
|
258 |
|
259 |
save_annotation(emotions, confidence, comments, participant_id)
|
|
|
278 |
def activate_elements(emotions, confidence, comments, next_button, previous_button):
|
279 |
|
280 |
emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = "Blank", visible = True)
|
281 |
+
confidence = gr.Slider(label="How confident are you that the annotated emotion is present in the recording? (%)", minimum=0, maximum=100, step=10, visible = True)
|
282 |
comments = gr.Textbox(label="Comments", visible =True)
|
283 |
previous_button = gr.Button("Previous Example", visible = True)
|
284 |
next_button = gr.Button("Next Example",visible = True)
|
|
|
294 |
with gr.Sidebar():
|
295 |
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
|
296 |
lets_go = gr.Button("Let's go!")
|
297 |
+
cheat_sheet = gr.HTML(side_bar_html, padding = False)
|
298 |
#happy_words = gr.Textbox(label = "Happy")
|
|
|
299 |
|
300 |
with gr.Tab("Instructions", elem_id = 'instructions'):
|
301 |
instructions = gr.HTML(intro_html, padding = False)
|
302 |
|
303 |
+
with gr.Blocks("Audio examples"):
|
304 |
+
description = gr.HTML(examples_explanation, padding = False)
|
305 |
+
|
306 |
+
with gr.Accordion(label = "Neutral", open= False):
|
307 |
+
neutral_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/neutral.wav', label = "Neutral")
|
308 |
+
|
309 |
+
with gr.Accordion(label = "Happy", open = False):
|
310 |
+
happy_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_low.wav', label = "Happy (Low Intensity)")
|
311 |
+
happy_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/happy_intense.wav', label = "Happy (High Intensity)")
|
312 |
+
|
313 |
+
with gr.Accordion(label = "Sad", open = False):
|
314 |
+
sad_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_low.wav', label = "Sad (Low Intensity)")
|
315 |
+
sad_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/sad_intense.wav', label = "Sad (High Intensity)")
|
316 |
+
|
317 |
+
with gr.Accordion(label = "Anger", open = False):
|
318 |
+
angry_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_low.wav', label = "Anger (Low Intensity)")
|
319 |
+
angry_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_intense.wav', label = "Anger (High Intensity)")
|
320 |
+
|
321 |
+
agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
|
322 |
+
|
323 |
|
324 |
|
325 |
with gr.Tab("Annotation Interface"):
|
|
|
379 |
|
380 |
|
381 |
lets_go.click(None, [], [ann_completed, total], js = js_progress_bar)
|
382 |
+
lets_go.click(deactivate_participant_id, [participant_id, lets_go], [participant_id, lets_go])
|
383 |
+
lets_go.click(activate_elements, [emotions, confidence, comments, next_button, previous_button], [emotions, confidence, comments, next_button, previous_button])
|
384 |
+
lets_go.click(load_example, inputs = [gr.Number(current_index["index"], visible = False)], outputs = [sentence_text, audio_player, emotions, confidence, ann_completed, comments])
|
385 |
|
386 |
|
387 |
demo.launch()
|