fpessanha commited on
Commit
e65f31d
·
1 Parent(s): 61ec2c0

Feat: Update instructions

Browse files
Files changed (1) hide show
  1. app.py +77 -54
app.py CHANGED
@@ -6,6 +6,8 @@ from pathlib import Path
6
  from huggingface_hub import login
7
  from mutagen.mp3 import MP3
8
  from mutagen.wave import WAVE
 
 
9
  css = """#myProgress {
10
  width: 100%;
11
  background-color: var(--block-border-color);
@@ -116,7 +118,9 @@ js_progress_bar = """
116
 
117
  intro_html = """
118
 
 
119
  <div class="content-box">
 
120
  <p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
121
 
122
  <ul>
@@ -138,25 +142,29 @@ intro_html = """
138
  </div>
139
 
140
  <h2>What will you be annotating?</h2>
141
-
142
  <div class="content-box">
143
  <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
144
 
145
  <ul>
146
- <li>
147
- <h4>Predominant Emotion:</h4> The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
148
- </li>
149
-
150
- <li>
151
- <h4>Perceived Emotion at the Time of Recording:</h4> In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
152
- </li>
153
-
154
- <li>
155
- <h4>Speech Emotionality:</h4> Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
156
- </li>
 
 
 
157
  </ul>
158
 
159
- <p>Further, you will be asked to fill <strong>"How confident you are that the annotated emotion is present in the recording?"</strong> from a scale of 0 to 10, with 0 being "not at all confident" and 1 being "certain, completely confident". There will be a <strong>"Comment/Feedback"</strong> section where you can makes notes. Below the audio, there will be an option to view the transcribed sentence. Please use this only if you are struggling to understand the audio.</p>
 
 
160
  </div>
161
  """
162
 
@@ -165,63 +173,75 @@ examples_explanation = """<h3>Audio examples</h3>
165
  <p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
166
  </div>"""
167
  side_bar_html = """
168
- <h3>The task</h3>
169
  <div class="content-box">
170
- <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
171
- <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
172
- <span>&#9989;</span>
173
- <h4 style="margin: 0;">Predominant Emotion</h4>
 
 
 
 
174
  </div>
175
 
176
- <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
177
- <span>&#9989;</span>
178
- <h4 style="margin: 0;">Perceived Emotion at the Time of Recording</h4>
179
  </div>
180
- <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 20px;">
181
- <span>&#9989;</span>
182
- <h4 style="margin: 0;">Speech Emotionality</h4>
 
 
 
 
 
 
 
 
183
  </div>
184
-
185
  </div>
186
- <h3>Major subclasses</h3>
187
 
188
- <div class="content-box">
189
- <table border="1">
190
- <thead>
191
- <tr>
192
- <th>Emotion Label</th>
193
- <th>Major Subclasses</th>
194
- </tr>
195
- </thead>
196
- <tbody>
197
- <tr>
198
- <td>Happiness</td>
199
- <td>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</td>
200
- </tr>
201
- <tr>
202
- <td>Sadness</td>
203
- <td>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</td>
204
- </tr>
205
- <tr>
206
- <td>Anger</td>
207
- <td>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</td>
208
- </tr>
209
- </tbody>
210
- </table>
211
- </div>
212
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  global file_list
214
  persistent_storage = Path('/data')
215
 
216
  password_files = os.getenv("password_files")
217
 
 
218
  possible_ids = {'Tiger-001': 0, 'Panda-002': 0,
219
  'Falcon-003': 1, 'Wolf-004': 1,
220
  'Dolphin-005': 2, 'Eagle-006': 2,
221
  'Jaguar-007': 3, 'Rhino-008': 3,
222
  'Zebra-009': 4, 'Lion-010': 4,
223
  'Cheetah-011': 5, 'Bear-012': 5}
224
-
225
 
226
 
227
  def get_audio_duration(file_path):
@@ -353,7 +373,7 @@ def deactivate_participant_id(participant_id, lets_go, total, previous_button, n
353
  lets_go = gr.Button("Participant selected!", interactive = False)
354
 
355
  sentence_text = gr.Textbox(label="Transcription", interactive=False, value = sentence)
356
- emotions = gr.Radio(["Blank", "Joy", "Sad", "Angry", "Neutral"], label="Predominant Emotion", value = emotions, visible = True)
357
  confidence = gr.Radio(["Blank","Very Uncertain", "Somewhat Uncertain", "Neutral", "Somewhat confident", "Very confident"], label="How confident are you that the annotated emotion is present in the recording?", visible = True, value = confidence)
358
  comments = gr.Textbox(label="Comments", visible =True, value = comments)
359
  previous_button = gr.Button("Previous Example", visible = True)
@@ -380,7 +400,7 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
380
  annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments', 'n_clicks'])
381
 
382
  # Instructions for emotion annotation
383
- with gr.Sidebar():
384
  participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
385
  lets_go = gr.Button("Let's go!")
386
  cheat_sheet = gr.HTML(side_bar_html, padding = False)
@@ -409,6 +429,9 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
409
 
410
  #agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
411
 
 
 
 
412
 
413
  with gr.Tab("Annotation Interface"):
414
  ann_completed = gr.Number(0, visible=False)
@@ -483,5 +506,5 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
483
  get_files_button.click(get_storage, inputs= [password], outputs=[files, storage], postprocess=False)
484
 
485
 
486
- demo.launch(allowed_paths = ['/data/files_to_annotate_2round'])
487
 
 
6
  from huggingface_hub import login
7
  from mutagen.mp3 import MP3
8
  from mutagen.wave import WAVE
9
+ import json
10
+
11
  css = """#myProgress {
12
  width: 100%;
13
  background-color: var(--block-border-color);
 
118
 
119
  intro_html = """
120
 
121
+ <h1>Emotionality in Speech</h1>
122
  <div class="content-box">
123
+
124
  <p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
125
 
126
  <ul>
 
142
  </div>
143
 
144
  <h2>What will you be annotating?</h2>
 
145
  <div class="content-box">
146
  <p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
147
 
148
  <ul>
149
+ <li>
150
+ <h4>Predominant Emotion:</h4>
151
+ The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
152
+ </li>
153
+
154
+ <li>
155
+ <h4>Perceived Emotion at the Time of Recording:</h4>
156
+ In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
157
+ </li>
158
+
159
+ <li>
160
+ <h4>Speech Emotionality:</h4>
161
+ Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
162
+ </li>
163
  </ul>
164
 
165
+ <div style="text-align: center; padding: 1.5em 0;">
166
+ <strong>If you're uncertain about which emotion you are hearing, open the sidebar by clicking the arrow in the upper left corner. There, you'll find a list of major emotions grouped under each category!</strong>
167
+ </div>
168
  </div>
169
  """
170
 
 
173
  <p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
174
  </div>"""
175
  side_bar_html = """
176
+ <h3>Major subclasses</h3>
177
  <div class="content-box">
178
+ <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
179
+ <span>&#128578;</span>
180
+ <h4 style="margin: 0;">Happiness</h4>
181
+
182
+ </div>
183
+
184
+ <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
185
+ <p>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</p>
186
  </div>
187
 
188
+ <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
189
+ <span>&#128577;</span>
190
+ <h4 style="margin: 0;">Sadness</h4>
191
  </div>
192
+ <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
193
+ <p>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</p>
194
+ </div>
195
+ <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
196
+ <span>&#128545;</span>
197
+ <h4 style="margin: 0;">Anger</h4>
198
+ </div>
199
+ <div>
200
+ <div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
201
+ <p>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</p>
202
+ <p></p>
203
  </div>
 
204
  </div>
 
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  """
207
+
208
+ start_annotating = """<h2>How to use the annotation interface?</h2>
209
+ <div class="content-box">
210
+ <ol>
211
+ <li>
212
+ Open the sidebar by clicking the arrow in the upper right corner.
213
+ </li>
214
+ <li>
215
+ Enter the participant ID you received via email.
216
+ </li>
217
+ <li>
218
+ Click <strong>"Let's go!"</strong> — this will lock your participant ID.
219
+ </li>
220
+ <li>
221
+ You’ll be directed to the annotation interface. The task will resume where you left off (on the last example you annotated), or start from the first audio if this is your first session.
222
+ </li>
223
+ <li>
224
+ When you finish all annotations, please send an email to <a href="mailto:f.pessanha@uu.nl">f.pessanha@uu.nl</a>.
225
+ </li>
226
+ </ol>
227
+ <div style="text-align: center; padding: 1.5em 0;">
228
+ <p><strong>Below you can find an overview of the annotation interface.</strong></p>
229
+ </div>
230
+
231
+ </div>"""
232
  global file_list
233
  persistent_storage = Path('/data')
234
 
235
  password_files = os.getenv("password_files")
236
 
237
+
238
  possible_ids = {'Tiger-001': 0, 'Panda-002': 0,
239
  'Falcon-003': 1, 'Wolf-004': 1,
240
  'Dolphin-005': 2, 'Eagle-006': 2,
241
  'Jaguar-007': 3, 'Rhino-008': 3,
242
  'Zebra-009': 4, 'Lion-010': 4,
243
  'Cheetah-011': 5, 'Bear-012': 5}
244
+ #possible_ids = json.load(os.getenv("possible_ids"))
245
 
246
 
247
  def get_audio_duration(file_path):
 
373
  lets_go = gr.Button("Participant selected!", interactive = False)
374
 
375
  sentence_text = gr.Textbox(label="Transcription", interactive=False, value = sentence)
376
+ emotions = gr.Radio(["Blank", "Happy", "Sad", "Angry", "Neutral"], label="Predominant Emotion (Check the sidebar for major subclasses)", value = emotions, visible = True)
377
  confidence = gr.Radio(["Blank","Very Uncertain", "Somewhat Uncertain", "Neutral", "Somewhat confident", "Very confident"], label="How confident are you that the annotated emotion is present in the recording?", visible = True, value = confidence)
378
  comments = gr.Textbox(label="Comments", visible =True, value = comments)
379
  previous_button = gr.Button("Previous Example", visible = True)
 
400
  annotations = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments', 'n_clicks'])
401
 
402
  # Instructions for emotion annotation
403
+ with gr.Sidebar(open = False):
404
  participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
405
  lets_go = gr.Button("Let's go!")
406
  cheat_sheet = gr.HTML(side_bar_html, padding = False)
 
429
 
430
  #agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
431
 
432
+ instructions = gr.HTML(start_annotating, padding = False)
433
+ image = gr.Image(label = "Annotation Interface", value = f"{persistent_storage}/instructions_annotation.png", container = False, type = "filepath", show_label = False, show_download_button = False, show_fullscreen_button = False,show_share_button = False)
434
+
435
 
436
  with gr.Tab("Annotation Interface"):
437
  ann_completed = gr.Number(0, visible=False)
 
506
  get_files_button.click(get_storage, inputs= [password], outputs=[files, storage], postprocess=False)
507
 
508
 
509
+ demo.launch(allowed_paths = ['/data'])
510