Spaces:
Running
Running
Fix: fix states errors - allow multiple users to annotate at the same time
Browse files- __pycache__/load_and_save.cpython-310.pyc +0 -0
- __pycache__/text_explanations.cpython-310.pyc +0 -0
- __pycache__/utils.cpython-310.pyc +0 -0
- app.py +47 -394
- load_and_save.py +233 -0
- text_explanations.py +223 -0
- utils.py +45 -0
__pycache__/load_and_save.cpython-310.pyc
ADDED
Binary file (7.83 kB). View file
|
|
__pycache__/text_explanations.cpython-310.pyc
ADDED
Binary file (8.77 kB). View file
|
|
__pycache__/utils.cpython-310.pyc
ADDED
Binary file (1.59 kB). View file
|
|
app.py
CHANGED
@@ -7,396 +7,24 @@ from huggingface_hub import login
|
|
7 |
from mutagen.mp3 import MP3
|
8 |
from mutagen.wave import WAVE
|
9 |
import json
|
|
|
|
|
|
|
10 |
|
11 |
-
css = """#myProgress {
|
12 |
-
width: 100%;
|
13 |
-
background-color: var(--block-border-color);
|
14 |
-
border-radius: 2px;
|
15 |
-
}
|
16 |
|
17 |
-
|
18 |
-
width: 0%;
|
19 |
-
height: 30px;
|
20 |
-
background-color: var(--block-title-background-fill);
|
21 |
-
border-radius: 2px;
|
22 |
-
}
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
font-weight: regular;
|
31 |
-
font-size: 14px;
|
32 |
-
}
|
33 |
|
34 |
-
h1, h2, h3, h4 {
|
35 |
-
padding: var(--block-title-padding);
|
36 |
-
color: var(--block-title-text-color);
|
37 |
-
border: solid var(--block-title-border-width) var(--block-title-border-color);
|
38 |
-
border-radius: var(--block-title-radius);
|
39 |
-
background: var(--block-title-background-fill);
|
40 |
-
width: fit-content;
|
41 |
-
display: inline-block;
|
42 |
-
}
|
43 |
|
44 |
-
h4 {
|
45 |
-
margin: 0px;
|
46 |
-
color: var(--block-title-background-fill);
|
47 |
-
background: var(--block-title-text-color);
|
48 |
-
}
|
49 |
-
|
50 |
-
#instructions {
|
51 |
-
max-width: 980px;
|
52 |
-
align-self: center;
|
53 |
-
}
|
54 |
-
|
55 |
-
.content-box {
|
56 |
-
border-color: var(--block-border-color);
|
57 |
-
border-radius: var(--block-radius);
|
58 |
-
background: var(--block-background-fill);
|
59 |
-
padding: var(--block-label-padding);
|
60 |
-
}
|
61 |
-
|
62 |
-
|
63 |
-
"""
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
js_progress_bar = """
|
68 |
-
function move(start, end, total_duration, current_index, n_ann, total_ann) {
|
69 |
-
|
70 |
-
var elem = document.getElementById("myBar");
|
71 |
-
elem.style.width = n_ann/total_ann * 100 + "%";
|
72 |
-
progressText.innerText = `${current_index} / ${total_ann} (Completed: ${n_ann})`;
|
73 |
-
|
74 |
-
const waveform = document.querySelector('#audio_to_annotate #waveform div');
|
75 |
-
const shadowRoot = waveform.shadowRoot;
|
76 |
-
const canvases = shadowRoot.querySelector('.wrapper');
|
77 |
-
|
78 |
-
console.log(canvases.offsetWidth)
|
79 |
-
|
80 |
-
const leftOffsetPct = start / total_duration;
|
81 |
-
const widthPct = (end - start) / total_duration;
|
82 |
-
|
83 |
-
// Get CSS variable for background color
|
84 |
-
const blockColor = getComputedStyle(document.documentElement)
|
85 |
-
.getPropertyValue('--block-title-background-fill')
|
86 |
-
.trim() || 'red'; // Default to red if variable is not found
|
87 |
-
|
88 |
-
// Create a style element for the shadow DOM
|
89 |
-
const style = document.createElement('style');
|
90 |
-
style.textContent = `
|
91 |
-
.wrapper::after {
|
92 |
-
content: '';
|
93 |
-
position: absolute;
|
94 |
-
top: 0;
|
95 |
-
left: ${canvases.offsetWidth * leftOffsetPct}px;
|
96 |
-
width: ${canvases.offsetWidth * widthPct}px;
|
97 |
-
height: 100%;
|
98 |
-
background-color: blue;
|
99 |
-
z-index: 999;
|
100 |
-
opacity: 0.5;
|
101 |
-
}
|
102 |
-
|
103 |
-
/* Ensure parent has positioning context */
|
104 |
-
.wrapper {
|
105 |
-
position: relative;
|
106 |
-
}
|
107 |
-
`;
|
108 |
-
|
109 |
-
// Append the style to the shadow root
|
110 |
-
shadowRoot.appendChild(style);
|
111 |
-
|
112 |
-
console.log(start + ' ' + end + ' ' + total_duration);
|
113 |
-
}
|
114 |
-
"""
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
intro_html = """
|
120 |
-
|
121 |
-
<h1>Emotionality in Speech</h1>
|
122 |
-
<div class="content-box">
|
123 |
-
|
124 |
-
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
|
125 |
-
|
126 |
-
<ul>
|
127 |
-
<li><h4>Anger</h4></li>
|
128 |
-
<li><h4>Happiness</h4></li>
|
129 |
-
<li><h4>Sadness</h4></li>
|
130 |
-
<li><h4>Neutral</h4></li>
|
131 |
-
</ul>
|
132 |
-
|
133 |
-
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection— <strong>303 hours of interviews! (That’s 13 days of nonstop listening! 😮)</strong> </p>
|
134 |
-
</div>
|
135 |
-
|
136 |
-
<h2>The ACT-UP Oral History Project</h2>
|
137 |
-
|
138 |
-
<div class="content-box">
|
139 |
-
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project developed by Sarah Schulman and Jim Hubbard .
|
140 |
-
This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic.
|
141 |
-
In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP.</p>
|
142 |
-
</div>
|
143 |
-
|
144 |
-
<h2>What will you be annotating?</h2>
|
145 |
-
<div class="content-box">
|
146 |
-
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
|
147 |
-
|
148 |
-
<ul>
|
149 |
-
<li>
|
150 |
-
<h4>Predominant Emotion:</h4>
|
151 |
-
The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
|
152 |
-
</li>
|
153 |
-
|
154 |
-
<li>
|
155 |
-
<h4>Perceived Emotion at the Time of Recording:</h4>
|
156 |
-
In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
|
157 |
-
</li>
|
158 |
-
|
159 |
-
<li>
|
160 |
-
<h4>Speech Emotionality:</h4>
|
161 |
-
Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
|
162 |
-
</li>
|
163 |
-
</ul>
|
164 |
-
|
165 |
-
<div style="text-align: center; padding: 1.5em 0;">
|
166 |
-
<strong>If you're uncertain about which emotion you are hearing, open the sidebar by clicking the arrow in the upper left corner. There, you'll find a list of major emotions grouped under each category!</strong>
|
167 |
-
</div>
|
168 |
-
</div>
|
169 |
-
"""
|
170 |
-
|
171 |
-
examples_explanation = """<h3>Audio examples</h3>
|
172 |
-
<div class="content-box">
|
173 |
-
<p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
|
174 |
-
</div>"""
|
175 |
-
side_bar_html = """
|
176 |
-
<h3>Major subclasses</h3>
|
177 |
-
<div class="content-box">
|
178 |
-
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
179 |
-
<span>🙂</span>
|
180 |
-
<h4 style="margin: 0;">Happiness</h4>
|
181 |
-
|
182 |
-
</div>
|
183 |
-
|
184 |
-
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
185 |
-
<p>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</p>
|
186 |
-
</div>
|
187 |
-
|
188 |
-
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
189 |
-
<span>🙁</span>
|
190 |
-
<h4 style="margin: 0;">Sadness</h4>
|
191 |
-
</div>
|
192 |
-
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
193 |
-
<p>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</p>
|
194 |
-
</div>
|
195 |
-
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
196 |
-
<span>😡</span>
|
197 |
-
<h4 style="margin: 0;">Anger</h4>
|
198 |
-
</div>
|
199 |
-
<div>
|
200 |
-
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
201 |
-
<p>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</p>
|
202 |
-
<p></p>
|
203 |
-
</div>
|
204 |
-
</div>
|
205 |
-
|
206 |
-
"""
|
207 |
-
|
208 |
-
start_annotating = """<h2>How to use the annotation interface?</h2>
|
209 |
-
<div class="content-box">
|
210 |
-
<ol>
|
211 |
-
<li>
|
212 |
-
Open the sidebar by clicking the arrow in the upper left corner.
|
213 |
-
</li>
|
214 |
-
<li>
|
215 |
-
Enter the participant ID you received via email.
|
216 |
-
</li>
|
217 |
-
<li>
|
218 |
-
Click <strong>"Let's go!"</strong> — this will lock your participant ID.
|
219 |
-
</li>
|
220 |
-
<li>
|
221 |
-
You’ll be directed to the annotation interface. The task will resume where you left off (on the last example you annotated), or start from the first audio if this is your first session.
|
222 |
-
</li>
|
223 |
-
<li>
|
224 |
-
When you finish all annotations, please send an email to <a href="mailto:f.pessanha@uu.nl">f.pessanha@uu.nl</a>.
|
225 |
-
</li>
|
226 |
-
</ol>
|
227 |
-
<p><strong>Note:</strong> You can click on any part of the audio to start playing from that point. Please avoid clicking on the audio while it is playing (pause it first). This will not affect the program, but it will help us understand how you interact with the interface.</p>
|
228 |
-
<div style="text-align: center; padding: 1.5em 0;">
|
229 |
-
<p><strong>Below you can find an overview of the annotation interface.</strong></p>
|
230 |
-
</div>
|
231 |
-
|
232 |
-
</div>"""
|
233 |
-
|
234 |
-
persistent_storage = Path('/data')
|
235 |
-
|
236 |
-
password_files = os.getenv("password_files")
|
237 |
-
|
238 |
-
|
239 |
-
possible_ids = {'Tiger-001': 0, 'Falcon-002': 0,
|
240 |
-
'Elephant-003': 1, 'Panther-004': 1,
|
241 |
-
'Zebra-005': 2, 'Wolf-006': 2,
|
242 |
-
'Koala-007': 3, 'Otter-008': 3,
|
243 |
-
'Leopard-009': 4, 'Panda-010': 4,
|
244 |
-
'Cheetah-011': 5, 'Gorilla-012': 5,
|
245 |
-
'Dolphin-013' : 6, 'Lynx-014': 6,
|
246 |
-
'Moose-015': 7, 'Raccoon-016': 7}
|
247 |
-
|
248 |
-
#possible_ids = json.load(os.getenv("possible_ids"))
|
249 |
-
|
250 |
-
|
251 |
-
def get_audio_duration(file_path):
|
252 |
-
if file_path.lower().endswith('.mp3'):
|
253 |
-
audio = MP3(file_path)
|
254 |
-
elif file_path.lower().endswith(('.wav', '.wave')):
|
255 |
-
audio = WAVE(file_path)
|
256 |
-
else:
|
257 |
-
raise ValueError("Unsupported file format")
|
258 |
-
|
259 |
-
return audio.info.length # Duration in seconds
|
260 |
-
|
261 |
-
def get_storage(password):
|
262 |
-
# Check if the password is correct
|
263 |
-
if password == password_files:
|
264 |
-
# Get the list of file paths and calculate the total usage
|
265 |
-
files = [
|
266 |
-
file for file in persistent_storage.glob("**/*.csv") if file.is_file()
|
267 |
-
]
|
268 |
-
|
269 |
-
# Calculate total usage (in bytes)
|
270 |
-
usage = sum([file.stat().st_size for file in files])
|
271 |
-
|
272 |
-
# Convert file paths to strings for Gradio's File component
|
273 |
-
file_paths = [str(file.resolve()) for file in files]
|
274 |
-
|
275 |
-
# Return the file paths (as strings) and the total usage in GB
|
276 |
-
return file_paths, f"{usage / (1024.0 ** 3):.3f}GB"
|
277 |
-
|
278 |
-
else:
|
279 |
-
return gr.Warning("Please provide the correct password"), None
|
280 |
-
|
281 |
-
|
282 |
-
def load_first_example(annotations_df, file_list_df, participant_id, ann_completed, current_index):
|
283 |
-
""" Loads and first example and updates index"""
|
284 |
-
path_ann = f'{persistent_storage}/{participant_id}_annotations.csv'
|
285 |
-
print(path_ann)
|
286 |
-
if os.path.exists(path_ann):
|
287 |
-
annotations_df = pd.read_csv(path_ann, keep_default_na=False)
|
288 |
-
|
289 |
-
current_index = len(annotations_df)
|
290 |
-
print('path was found')
|
291 |
-
|
292 |
-
ann_completed = gr.Number(len(annotations_df) - 1, visible=False)
|
293 |
-
|
294 |
-
else:
|
295 |
-
# Initialize an empty DataFrame to store annotations
|
296 |
-
annotations_df = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments', 'n_clicks'])
|
297 |
-
|
298 |
-
return annotations_df, *load_example(annotations_df, file_list_df, current_index), ann_completed, current_index
|
299 |
-
|
300 |
-
def load_example(annotations_df, file_list_df, index):
|
301 |
-
"""Loads the example in row #index from dataframe file_list.
|
302 |
-
If there are any annotations it will give those values to the annotation dataframe"""
|
303 |
-
|
304 |
-
row = file_list_df.iloc[index]
|
305 |
-
audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
|
306 |
-
sentence = row["sentence"]
|
307 |
-
|
308 |
-
# If the user already made an annotation for this example, gradio will return said annotation
|
309 |
-
previous_annotation = (
|
310 |
-
annotations_df.iloc[index].to_dict() if index < len(annotations) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 'Blank',
|
311 |
-
"comments": '', "n_clicks": 0}
|
312 |
-
)
|
313 |
-
|
314 |
-
start = row['start']
|
315 |
-
end = row['end']
|
316 |
-
duration = get_audio_duration(audio_path)
|
317 |
-
print(f'{start} {end} {duration}')
|
318 |
-
return sentence, audio_path, previous_annotation['emotion'], previous_annotation['confidence'], previous_annotation["comments"], n_clicks, start, end, duration
|
319 |
-
|
320 |
-
|
321 |
-
def save_annotation(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
|
322 |
-
"""Save the annotation for the current example."""
|
323 |
-
|
324 |
-
row = file_list.iloc[current_index]
|
325 |
-
sample_id = row["sample_id"]
|
326 |
-
sentence = row["sentence"]
|
327 |
-
|
328 |
-
# Update or append annotation
|
329 |
-
if sample_id in annotations_df["sample_id"].values:
|
330 |
-
annotations_df.loc[annotations_df["sample_id"] == sample_id, ["emotion", "confidence", "comments", "n_clicks"]] = \
|
331 |
-
[emotions, confidence, comments, n_clicks]
|
332 |
-
else:
|
333 |
-
annotations_df.loc[len(annotations_df)] = [sample_id, sentence, emotions, confidence, comments, n_clicks]
|
334 |
-
ann_completed = gr.Number(ann_completed + 1, visible=False)
|
335 |
-
annotations_df.to_csv(f"{persistent_storage}/{participant_id}_annotations.csv", index=False) # Save to a CSV file
|
336 |
-
|
337 |
-
return annotations_df, ann_completed
|
338 |
-
|
339 |
-
def next_example(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
|
340 |
-
"""Move to the next example."""
|
341 |
-
|
342 |
-
if emotions == "Blank":
|
343 |
-
gr.Warning("Please fill out the emotion section. 'Blank' is not a valid emotion.")
|
344 |
-
elif confidence == "Blank":
|
345 |
-
gr.Warning("Please fill out the confidence section. 'Blank' is not a valid input.")
|
346 |
-
|
347 |
-
else:
|
348 |
-
annotations_df, ann_completed = save_annotation(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
|
349 |
-
if current_index < len(file_list_df) - 1:
|
350 |
-
current_index += 1
|
351 |
-
return annotations_df, *load_example(annotations_df, file_list_df, current_index), ann_completed, current_index
|
352 |
-
|
353 |
-
def previous_example(annotations_df, file_list_df, emotion, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
|
354 |
-
"""Move to the previous example."""
|
355 |
-
|
356 |
-
if emotion != "Blank":
|
357 |
-
annotations_df, ann_completed = save_annotation(annotations_df, file_list_df, emotion, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
|
358 |
-
if current_index > 0:
|
359 |
-
current_index -= 1
|
360 |
-
|
361 |
-
return annotations_df, *load_example(annotations_df, file_list_df, current_index), ann_completed, current_index
|
362 |
-
|
363 |
-
def deactivate_participant_id(annotations_df, file_list_df, total_annotations, participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, ann_completed, current_index):
|
364 |
-
print(possible_ids.keys())
|
365 |
-
if participant_id in possible_ids.keys():
|
366 |
-
file_list_df = pd.read_csv(os.path.join(persistent_storage, 'files_to_annotate_2round', f'group_{possible_ids[participant_id]}_v2.csv'), keep_default_na=False)
|
367 |
-
|
368 |
-
|
369 |
-
total_annotations = len(file_list_df)
|
370 |
-
total = gr.Number(total_annotations, visible=False)
|
371 |
-
|
372 |
-
|
373 |
-
annotations_df, sentence, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index = load_first_example(annotations_df, file_list_df, participant_id, ann_completed, current_index)
|
374 |
-
print(sentence)
|
375 |
-
print(start, end, duration)
|
376 |
-
participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
|
377 |
-
lets_go = gr.Button("Participant selected!", interactive = False)
|
378 |
-
|
379 |
-
sentence_text = gr.Textbox(label="Transcription", interactive=False, value = sentence)
|
380 |
-
emotions = gr.Radio(["Blank", "Happy", "Sad", "Angry", "Neutral"], label="Predominant Emotion (Check the sidebar for major subclasses)", value = emotions, visible = True)
|
381 |
-
confidence = gr.Radio(["Blank","Very Uncertain", "Somewhat Uncertain", "Neutral", "Somewhat confident", "Very confident"], label="How confident are you that the annotated emotion is present in the recording?", visible = True, value = confidence)
|
382 |
-
comments = gr.Textbox(label="Comments", visible =True, value = comments)
|
383 |
-
previous_button = gr.Button("Previous Example", visible = True)
|
384 |
-
next_button = gr.Button("Next Example",visible = True)
|
385 |
-
|
386 |
-
return annotations_df, file_list_df, participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index
|
387 |
-
|
388 |
-
else:
|
389 |
-
raise gr.Error("Please insert a valid participant ID")
|
390 |
-
|
391 |
-
def count_clicks(n_clicks):
|
392 |
-
n_clicks = gr.Number(n_clicks + 1, visible = False)
|
393 |
-
|
394 |
-
return n_clicks
|
395 |
# ===================
|
396 |
# Gradio Interface
|
397 |
# ===================
|
398 |
-
|
399 |
-
|
400 |
with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
401 |
# List of all audio files to annotate
|
402 |
|
@@ -407,8 +35,7 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
|
407 |
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
|
408 |
lets_go = gr.Button("Let's go!")
|
409 |
cheat_sheet = gr.HTML(side_bar_html, padding = False)
|
410 |
-
|
411 |
-
|
412 |
with gr.Tab("Instructions", elem_id = 'instructions'):
|
413 |
instructions = gr.HTML(intro_html, padding = False)
|
414 |
|
@@ -430,20 +57,28 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
|
430 |
angry_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_low.wav', label = "Anger (Low Intensity)")
|
431 |
angry_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_intense.wav', label = "Anger (High Intensity)")
|
432 |
|
433 |
-
#agreement = gr.Checkbox(value = False, label = "I agree", info = "I agree to have my annotations, comments, and questionnaire answers used for research purposes. I understand that any personal information will be anonymized.", interactive = True)
|
434 |
-
|
435 |
instructions = gr.HTML(start_annotating, padding = False)
|
436 |
image = gr.Image(label = "Annotation Interface", value = f"{persistent_storage}/instructions_annotation.png", container = False, type = "filepath", show_label = False, show_download_button = False, show_fullscreen_button = False,show_share_button = False)
|
437 |
|
438 |
|
439 |
with gr.Tab("Annotation Interface"):
|
440 |
ann_completed = gr.State(0)
|
|
|
441 |
total = gr.State(0)
|
|
|
442 |
current_index = gr.State(0)
|
443 |
-
|
444 |
-
|
445 |
-
|
|
|
|
|
|
|
|
|
446 |
n_clicks = gr.State(0)
|
|
|
|
|
|
|
|
|
447 |
annotations = gr.State(pd.DataFrame())
|
448 |
file_list = gr.State(pd.DataFrame())
|
449 |
|
@@ -486,21 +121,39 @@ with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
|
486 |
# Go back
|
487 |
previous_button.click(
|
488 |
previous_example,
|
489 |
-
inputs=[annotations, file_list, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index],
|
490 |
-
outputs=[annotations, sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration,
|
|
|
|
|
|
|
|
|
491 |
|
492 |
# Go to the next example
|
493 |
next_button.click(
|
494 |
next_example,
|
495 |
inputs=[annotations, file_list,emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index],
|
496 |
-
outputs=[annotations,sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed,
|
|
|
|
|
|
|
|
|
|
|
|
|
497 |
|
498 |
buttons = [previous_button, next_button]
|
499 |
data = [sentence_text, audio_player, emotions, confidence, comments]
|
500 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
501 |
audio_player.play(count_clicks, [n_clicks], [n_clicks])
|
502 |
|
503 |
-
sidebar.collapse(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)
|
504 |
sidebar.expand(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)
|
505 |
|
506 |
|
|
|
7 |
from mutagen.mp3 import MP3
|
8 |
from mutagen.wave import WAVE
|
9 |
import json
|
10 |
+
from text_explanations import *
|
11 |
+
from utils import *
|
12 |
+
from load_and_save import *
|
13 |
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
def state_to_number(*state_obj_list):
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
+
list_numbers = []
|
18 |
+
for state_obj in state_obj_list:
|
19 |
+
number_obj = gr.Number(state_obj, visible = False)
|
20 |
+
list_numbers.append(number_obj)
|
21 |
+
|
22 |
+
return list_numbers
|
|
|
|
|
|
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
# ===================
|
26 |
# Gradio Interface
|
27 |
# ===================
|
|
|
|
|
28 |
with (gr.Blocks(theme=gr.themes.Soft(), css = css) as demo):
|
29 |
# List of all audio files to annotate
|
30 |
|
|
|
35 |
participant_id = gr.Textbox(label='What is your participant ID?', interactive = True)
|
36 |
lets_go = gr.Button("Let's go!")
|
37 |
cheat_sheet = gr.HTML(side_bar_html, padding = False)
|
38 |
+
|
|
|
39 |
with gr.Tab("Instructions", elem_id = 'instructions'):
|
40 |
instructions = gr.HTML(intro_html, padding = False)
|
41 |
|
|
|
57 |
angry_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_low.wav', label = "Anger (Low Intensity)")
|
58 |
angry_int_audio = gr.Audio(value=f'{persistent_storage}/emotion_examples/angry_intense.wav', label = "Anger (High Intensity)")
|
59 |
|
|
|
|
|
60 |
instructions = gr.HTML(start_annotating, padding = False)
|
61 |
image = gr.Image(label = "Annotation Interface", value = f"{persistent_storage}/instructions_annotation.png", container = False, type = "filepath", show_label = False, show_download_button = False, show_fullscreen_button = False,show_share_button = False)
|
62 |
|
63 |
|
64 |
with gr.Tab("Annotation Interface"):
|
65 |
ann_completed = gr.State(0)
|
66 |
+
ann_completed_temp = gr.Number(0, visible = False)
|
67 |
total = gr.State(0)
|
68 |
+
total_temp = gr.Number(0, visible = False)
|
69 |
current_index = gr.State(0)
|
70 |
+
current_index_temp = gr.Number(0, visible = False)
|
71 |
+
start = gr.State(0.0)
|
72 |
+
start_temp = gr.Number(0, visible = False)
|
73 |
+
end = gr.State(0.0)
|
74 |
+
end_temp = gr.Number(0, visible = False)
|
75 |
+
duration = gr.State(0.0)
|
76 |
+
duration_temp = gr.Number(0, visible = False)
|
77 |
n_clicks = gr.State(0)
|
78 |
+
|
79 |
+
part_id = gr.State('')
|
80 |
+
|
81 |
+
|
82 |
annotations = gr.State(pd.DataFrame())
|
83 |
file_list = gr.State(pd.DataFrame())
|
84 |
|
|
|
121 |
# Go back
|
122 |
previous_button.click(
|
123 |
previous_example,
|
124 |
+
inputs=[annotations, file_list, emotions, confidence, comments, n_clicks, participant_id, ann_completed , current_index],
|
125 |
+
outputs=[annotations, sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration,
|
126 |
+
ann_completed, current_index],).then(state_to_number, [start, end, duration, current_index, ann_completed, total],
|
127 |
+
[start_temp, end_temp, duration_temp, current_index_temp, ann_completed_temp, total_temp]).then(None, [],
|
128 |
+
[start_temp, end_temp, duration_temp, current_index_temp,
|
129 |
+
ann_completed_temp, total_temp], js = js_progress_bar)
|
130 |
|
131 |
# Go to the next example
|
132 |
next_button.click(
|
133 |
next_example,
|
134 |
inputs=[annotations, file_list,emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index],
|
135 |
+
outputs=[annotations,sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed,
|
136 |
+
current_index],).then(state_to_number, start, []).then(state_to_number, [start, end, duration, current_index, ann_completed, total],
|
137 |
+
[start_temp, end_temp, duration_temp, current_index_temp, ann_completed_temp, total_temp]).then(None, [],
|
138 |
+
[start_temp, end_temp, duration_temp, current_index_temp,
|
139 |
+
ann_completed_temp, total_temp], js = js_progress_bar)
|
140 |
+
|
141 |
+
|
142 |
|
143 |
buttons = [previous_button, next_button]
|
144 |
data = [sentence_text, audio_player, emotions, confidence, comments]
|
145 |
+
|
146 |
+
lets_go.click(deactivate_participant_id, [annotations, file_list, total, participant_id,
|
147 |
+
lets_go, *buttons, *data, n_clicks, ann_completed, current_index],
|
148 |
+
[annotations, file_list, participant_id, part_id, lets_go, total, *buttons,
|
149 |
+
*data, n_clicks, start, end, duration, ann_completed, current_index]).then(state_to_number, [start, end, duration, current_index, ann_completed, total],
|
150 |
+
[start_temp, end_temp, duration_temp, current_index_temp, ann_completed_temp, total_temp]).then(None, [],
|
151 |
+
[start_temp, end_temp, duration_temp, current_index_temp,
|
152 |
+
ann_completed_temp, total_temp], js = js_progress_bar)
|
153 |
+
|
154 |
audio_player.play(count_clicks, [n_clicks], [n_clicks])
|
155 |
|
156 |
+
sidebar.collapse(None, [], [start, end, duration, current_index, ann_completed, total], js = js_progress_bar)
|
157 |
sidebar.expand(None, [], [start, end, duration, current_index,ann_completed, total], js = js_progress_bar)
|
158 |
|
159 |
|
load_and_save.py
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import os
|
4 |
+
import gradio as gr
|
5 |
+
from pathlib import Path
|
6 |
+
from huggingface_hub import login
|
7 |
+
from mutagen.mp3 import MP3
|
8 |
+
from mutagen.wave import WAVE
|
9 |
+
import json
|
10 |
+
from text_explanations import *
|
11 |
+
from utils import *
|
12 |
+
|
13 |
+
possible_ids = {'Tiger-001': 0, 'Falcon-002': 0,
|
14 |
+
'Elephant-003': 1, 'Panther-004': 1,
|
15 |
+
'Zebra-005': 2, 'Wolf-006': 2,
|
16 |
+
'Koala-007': 3, 'Otter-008': 3,
|
17 |
+
'Leopard-009': 4, 'Panda-010': 4,
|
18 |
+
'Cheetah-011': 5, 'Gorilla-012': 5,
|
19 |
+
'Dolphin-013' : 6, 'Lynx-014': 6,
|
20 |
+
'Moose-015': 7, 'Raccoon-016': 7}
|
21 |
+
|
22 |
+
persistent_storage = Path('/data')
|
23 |
+
password_files = os.getenv("password_files")
|
24 |
+
|
25 |
+
def load_first_example(annotations_df, file_list_df, id, completed, index):
|
26 |
+
""" Loads and first example and updates index
|
27 |
+
|
28 |
+
Parameters:
|
29 |
+
* annotations_df: annotation file
|
30 |
+
* file_list_df: files to annotate
|
31 |
+
* id: participant ID
|
32 |
+
* completed: number of examples annotated
|
33 |
+
* index: current index (in the files to annotate list)
|
34 |
+
|
35 |
+
return:
|
36 |
+
* annotations_df: dataframe with current annotations
|
37 |
+
* load_example: current example to annotate
|
38 |
+
* completed: updated number of completed annotations
|
39 |
+
* index: updated current index
|
40 |
+
|
41 |
+
"""
|
42 |
+
path_ann = f'{persistent_storage}/{id}_annotations.csv'
|
43 |
+
|
44 |
+
if os.path.exists(path_ann):
|
45 |
+
annotations_df = pd.read_csv(path_ann, keep_default_na=False)
|
46 |
+
index = len(annotations_df)
|
47 |
+
completed = len(annotations_df) - 1 # update how many examples were completed
|
48 |
+
|
49 |
+
else:
|
50 |
+
# Initialize an empty DataFrame to store annotations
|
51 |
+
annotations_df = pd.DataFrame(columns=['sample_id', 'sentence', 'emotion', 'confidence', 'comments', 'n_clicks'])
|
52 |
+
|
53 |
+
return annotations_df, *load_example(annotations_df, file_list_df, index), completed, index
|
54 |
+
|
55 |
+
|
56 |
+
def load_example(annotations_df, file_list_df, index):
|
57 |
+
"""Loads the example in row #index from dataframe file_list.
|
58 |
+
If there are any annotations it will give those values to the annotation dataframe
|
59 |
+
|
60 |
+
Parameters:
|
61 |
+
* annotations_df: dataframe with current annotations
|
62 |
+
* index: current index
|
63 |
+
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
* sentence: current sentence
|
67 |
+
* audio_path: current_audio path
|
68 |
+
* ann['emotion']: current emotion
|
69 |
+
* ann['confidence']: current confidence
|
70 |
+
* ann['comments']: current comments
|
71 |
+
* ann['n_clicks']: current number of clicks
|
72 |
+
* start: current start
|
73 |
+
* end: current end
|
74 |
+
* duration: current sentence duration
|
75 |
+
|
76 |
+
"""
|
77 |
+
|
78 |
+
row = file_list_df.iloc[index]
|
79 |
+
audio_path = os.path.join(persistent_storage, 'files_to_annotate_2round', row["sample_id"].split('-')[0], row["sample_id"] + '.wav')
|
80 |
+
sentence = row["sentence"]
|
81 |
+
|
82 |
+
# If the user already made an annotation for this example, gradio will return said annotation
|
83 |
+
ann = (
|
84 |
+
annotations_df.iloc[index].to_dict() if index < len(annotations_df) else {"sample_id": row["sample_id"], "emotion": 'Blank', "confidence": 'Blank',
|
85 |
+
"comments": '', "n_clicks": 0}
|
86 |
+
)
|
87 |
+
|
88 |
+
start = row['start']
|
89 |
+
end = row['end']
|
90 |
+
duration = get_audio_duration(audio_path)
|
91 |
+
print(f'start/end/duration (load example) - {start} {end} {duration}')
|
92 |
+
return sentence, audio_path, ann['emotion'], ann['confidence'], ann["comments"], ann['n_clicks'], start, end, duration
|
93 |
+
|
94 |
+
|
95 |
+
def save_annotation(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
|
96 |
+
"""Save the annotation for the current example.
|
97 |
+
|
98 |
+
Parameters:
|
99 |
+
* annotations_df: dataframe with all annotations so far
|
100 |
+
* file_list_df: list of files to annotate
|
101 |
+
* emotions, confidence, comments, n_clicks: annotations to save
|
102 |
+
* participant_id: to indicate where to save the annotations
|
103 |
+
* ann_completed: number of annotations completed
|
104 |
+
* current_index: current index
|
105 |
+
|
106 |
+
Return:
|
107 |
+
* annotations_df: updated annotations_df
|
108 |
+
* ann_completed: updated number of annotations completed
|
109 |
+
"""
|
110 |
+
|
111 |
+
row = file_list_df.iloc[current_index]
|
112 |
+
sample_id = row["sample_id"]
|
113 |
+
sentence = row["sentence"]
|
114 |
+
|
115 |
+
# Update or append annotation
|
116 |
+
if sample_id in annotations_df["sample_id"].values:
|
117 |
+
annotations_df.loc[annotations_df["sample_id"] == sample_id, ["emotion", "confidence", "comments", "n_clicks"]] = \
|
118 |
+
[emotions, confidence, comments, n_clicks]
|
119 |
+
else:
|
120 |
+
annotations_df.loc[len(annotations_df)] = [sample_id, sentence, emotions, confidence, comments, n_clicks]
|
121 |
+
ann_completed += 1
|
122 |
+
annotations_df.to_csv(f"{persistent_storage}/{participant_id}_annotations.csv", index=False) # Save to a CSV file
|
123 |
+
|
124 |
+
return annotations_df, ann_completed
|
125 |
+
|
126 |
+
def next_example(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
|
127 |
+
"""Move to the next example.
|
128 |
+
|
129 |
+
Parameters:
|
130 |
+
* annotations_df: current annotation dataframe
|
131 |
+
* file_list_df: all files to annotate
|
132 |
+
* emotions, confidence, comments, n_clicks: annotations to save
|
133 |
+
* participant_id: to indicate where to save the annotations
|
134 |
+
* ann_completed: number of annotations completed
|
135 |
+
* current_index: current index
|
136 |
+
|
137 |
+
Return:
|
138 |
+
* annotations_df: updated annotations_df
|
139 |
+
|
140 |
+
|
141 |
+
* sentence: current sentence
|
142 |
+
* audio_path: current_audio path
|
143 |
+
* ann['emotion']: current emotion
|
144 |
+
* ann['confidence']: current confidence
|
145 |
+
* ann['comments']: current comments
|
146 |
+
* ann['n_clicks']: current number of clicks
|
147 |
+
* start: current start
|
148 |
+
* end: current end
|
149 |
+
* duration: current sentence duration
|
150 |
+
|
151 |
+
* ann_completed: updated number of annotations completed
|
152 |
+
* current_index: current index
|
153 |
+
|
154 |
+
"""
|
155 |
+
|
156 |
+
if emotions == "Blank":
|
157 |
+
gr.Warning("Please fill out the emotion section. 'Blank' is not a valid emotion.")
|
158 |
+
elif confidence == "Blank":
|
159 |
+
gr.Warning("Please fill out the confidence section. 'Blank' is not a valid input.")
|
160 |
+
|
161 |
+
else:
|
162 |
+
annotations_df, ann_completed = save_annotation(annotations_df, file_list_df, emotions, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
|
163 |
+
if current_index < len(file_list_df) - 1:
|
164 |
+
current_index += 1
|
165 |
+
|
166 |
+
sentence, audio_path, emotions, confidence, comments, clicks, start, end, duration = load_example(annotations_df, file_list_df, current_index)
|
167 |
+
print(f'start/end/duration (next) {start}, {end}, {duration}')
|
168 |
+
|
169 |
+
return annotations_df, sentence, audio_path, emotions, confidence, comments, clicks, gr.State(start), gr.State(end), gr.State(duration), ann_completed, current_index
|
170 |
+
|
171 |
+
def previous_example(annotations_df, file_list_df, emotion, confidence, comments, n_clicks, participant_id, ann_completed, current_index):
|
172 |
+
|
173 |
+
"""Move to the previous example.
|
174 |
+
|
175 |
+
Parameters:
|
176 |
+
* annotations_df: current annotation dataframe
|
177 |
+
* file_list_df: all files to annotate
|
178 |
+
* emotions, confidence, comments, n_clicks: annotations to save
|
179 |
+
* participant_id: to indicate where to save the annotations
|
180 |
+
* ann_completed: number of annotations completed
|
181 |
+
* current_index: current index
|
182 |
+
|
183 |
+
Return:
|
184 |
+
* annotations_df: updated annotations_df
|
185 |
+
|
186 |
+
|
187 |
+
* sentence: current sentence
|
188 |
+
* audio_path: current_audio path
|
189 |
+
* ann['emotion']: current emotion
|
190 |
+
* ann['confidence']: current confidence
|
191 |
+
* ann['comments']: current comments
|
192 |
+
* ann['n_clicks']: current number of clicks
|
193 |
+
* start: current start
|
194 |
+
* end: current end
|
195 |
+
* duration: current sentence duration
|
196 |
+
|
197 |
+
* ann_completed: updated number of annotations completed
|
198 |
+
* current_index: current index
|
199 |
+
"""
|
200 |
+
|
201 |
+
if emotion != "Blank":
|
202 |
+
annotations_df, ann_completed = save_annotation(annotations_df, file_list_df, emotion, confidence, comments, n_clicks, participant_id, ann_completed, current_index)
|
203 |
+
if current_index > 0:
|
204 |
+
current_index -= 1
|
205 |
+
|
206 |
+
return annotations_df, *load_example(annotations_df, file_list_df, current_index), ann_completed, current_index
|
207 |
+
|
208 |
+
|
209 |
+
def deactivate_participant_id(annotations_df, file_list_df, total, participant_id, lets_go, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, ann_completed, current_index):
|
210 |
+
|
211 |
+
|
212 |
+
if participant_id in possible_ids.keys():
|
213 |
+
file_list_df = pd.read_csv(os.path.join(persistent_storage, 'files_to_annotate_2round', f'group_{possible_ids[participant_id]}_v2.csv'), keep_default_na=False)
|
214 |
+
|
215 |
+
total = len(file_list_df)
|
216 |
+
|
217 |
+
|
218 |
+
annotations_df, sentence, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index = load_first_example(annotations_df, file_list_df, participant_id, ann_completed, current_index)
|
219 |
+
|
220 |
+
participant_id = gr.Textbox(label='What is your participant ID?', value = participant_id, interactive = False)
|
221 |
+
lets_go = gr.Button("Participant selected!", interactive = False)
|
222 |
+
|
223 |
+
sentence_text = gr.Textbox(label="Transcription", interactive=False, value = sentence)
|
224 |
+
emotions = gr.Radio(["Blank", "Happy", "Sad", "Angry", "Neutral"], label="Predominant Emotion (Check the sidebar for major subclasses)", value = emotions, visible = True)
|
225 |
+
confidence = gr.Radio(["Blank","Very Uncertain", "Somewhat Uncertain", "Neutral", "Somewhat confident", "Very confident"], label="How confident are you that the annotated emotion is present in the recording?", visible = True, value = confidence)
|
226 |
+
comments = gr.Textbox(label="Comments", visible =True, value = comments)
|
227 |
+
previous_button = gr.Button("Previous Example", visible = True)
|
228 |
+
next_button = gr.Button("Next Example",visible = True)
|
229 |
+
|
230 |
+
return annotations_df, file_list_df, participant_id, participant_id, lets_go, total, previous_button, next_button, sentence_text, audio_player, emotions, confidence, comments, n_clicks, start, end, duration, ann_completed, current_index
|
231 |
+
|
232 |
+
else:
|
233 |
+
raise gr.Error("Please insert a valid participant ID")
|
text_explanations.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
css = """#myProgress {
|
2 |
+
width: 100%;
|
3 |
+
background-color: var(--block-border-color);
|
4 |
+
border-radius: 2px;
|
5 |
+
}
|
6 |
+
|
7 |
+
#myBar {
|
8 |
+
width: 0%;
|
9 |
+
height: 30px;
|
10 |
+
background-color: var(--block-title-background-fill);
|
11 |
+
border-radius: 2px;
|
12 |
+
}
|
13 |
+
|
14 |
+
#progressText {
|
15 |
+
position: absolute;
|
16 |
+
top: 50%;
|
17 |
+
left: 50%;
|
18 |
+
transform: translate(-50%, -50%);
|
19 |
+
color: var(--block-title-text-color);
|
20 |
+
font-weight: regular;
|
21 |
+
font-size: 14px;
|
22 |
+
}
|
23 |
+
|
24 |
+
h1, h2, h3, h4 {
|
25 |
+
padding: var(--block-title-padding);
|
26 |
+
color: var(--block-title-text-color);
|
27 |
+
border: solid var(--block-title-border-width) var(--block-title-border-color);
|
28 |
+
border-radius: var(--block-title-radius);
|
29 |
+
background: var(--block-title-background-fill);
|
30 |
+
width: fit-content;
|
31 |
+
display: inline-block;
|
32 |
+
}
|
33 |
+
|
34 |
+
h4 {
|
35 |
+
margin: 0px;
|
36 |
+
color: var(--block-title-background-fill);
|
37 |
+
background: var(--block-title-text-color);
|
38 |
+
}
|
39 |
+
|
40 |
+
#instructions {
|
41 |
+
max-width: 980px;
|
42 |
+
align-self: center;
|
43 |
+
}
|
44 |
+
|
45 |
+
.content-box {
|
46 |
+
border-color: var(--block-border-color);
|
47 |
+
border-radius: var(--block-radius);
|
48 |
+
background: var(--block-background-fill);
|
49 |
+
padding: var(--block-label-padding);
|
50 |
+
}
|
51 |
+
|
52 |
+
|
53 |
+
"""
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
js_progress_bar = """
|
58 |
+
function move(start, end, total_duration, current_index, n_ann, total_ann) {
|
59 |
+
|
60 |
+
var elem = document.getElementById("myBar");
|
61 |
+
elem.style.width = n_ann/total_ann * 100 + "%";
|
62 |
+
progressText.innerText = `${current_index} / ${total_ann} (Completed: ${n_ann})`;
|
63 |
+
|
64 |
+
const waveform = document.querySelector('#audio_to_annotate #waveform div');
|
65 |
+
const shadowRoot = waveform.shadowRoot;
|
66 |
+
const canvases = shadowRoot.querySelector('.wrapper');
|
67 |
+
|
68 |
+
console.log(canvases.offsetWidth)
|
69 |
+
|
70 |
+
const leftOffsetPct = start / total_duration;
|
71 |
+
const widthPct = (end - start) / total_duration;
|
72 |
+
|
73 |
+
// Get CSS variable for background color
|
74 |
+
const blockColor = getComputedStyle(document.documentElement)
|
75 |
+
.getPropertyValue('--block-title-background-fill')
|
76 |
+
.trim() || 'red'; // Default to red if variable is not found
|
77 |
+
|
78 |
+
// Create a style element for the shadow DOM
|
79 |
+
const style = document.createElement('style');
|
80 |
+
style.textContent = `
|
81 |
+
.wrapper::after {
|
82 |
+
content: '';
|
83 |
+
position: absolute;
|
84 |
+
top: 0;
|
85 |
+
left: ${canvases.offsetWidth * leftOffsetPct}px;
|
86 |
+
width: ${canvases.offsetWidth * widthPct}px;
|
87 |
+
height: 100%;
|
88 |
+
background-color: blue;
|
89 |
+
z-index: 999;
|
90 |
+
opacity: 0.5;
|
91 |
+
}
|
92 |
+
|
93 |
+
/* Ensure parent has positioning context */
|
94 |
+
.wrapper {
|
95 |
+
position: relative;
|
96 |
+
}
|
97 |
+
`;
|
98 |
+
|
99 |
+
// Append the style to the shadow root
|
100 |
+
shadowRoot.appendChild(style);
|
101 |
+
|
102 |
+
console.log(start + ' ' + end + ' ' + total_duration);
|
103 |
+
console.log(n_ann + ' ' + total_ann);
|
104 |
+
}
|
105 |
+
"""
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
intro_html = """
|
111 |
+
|
112 |
+
<h1>Emotionality in Speech</h1>
|
113 |
+
<div class="content-box">
|
114 |
+
|
115 |
+
<p>Spoken language communicates more than just words. Speakers use tone, pitch, and other nonverbal cues to express emotions. In emotional speech, these cues can strengthen or even contradict the meaning of the words—for example, irony can make a positive phrase sound sarcastic. For this research, we will focus on three basic emotions plus neutral:</p>
|
116 |
+
|
117 |
+
<ul>
|
118 |
+
<li><h4>Anger</h4></li>
|
119 |
+
<li><h4>Happiness</h4></li>
|
120 |
+
<li><h4>Sadness</h4></li>
|
121 |
+
<li><h4>Neutral</h4></li>
|
122 |
+
</ul>
|
123 |
+
|
124 |
+
<p>This may seem like a small set, but it's a great starting point for analyzing emotions in such a large collection— <strong>303 hours of interviews! (That’s 13 days of nonstop listening! 😮)</strong> </p>
|
125 |
+
</div>
|
126 |
+
|
127 |
+
<h2>The ACT-UP Oral History Project</h2>
|
128 |
+
|
129 |
+
<div class="content-box">
|
130 |
+
<p>You will be annotating short audio clips extracted from the ACT UP (AIDS Coalition to Unleash Power) Oral History Project developed by Sarah Schulman and Jim Hubbard .
|
131 |
+
This archive features interviews with individuals who were part of ACT UP during the late 1980s and early 1990s, amidst the AIDS epidemic.
|
132 |
+
In each video, the subjects talk about their life before the epidemic, how they were affected by AIDS and their work in ACT UP.</p>
|
133 |
+
</div>
|
134 |
+
|
135 |
+
<h2>What will you be annotating?</h2>
|
136 |
+
<div class="content-box">
|
137 |
+
<p>You will annotate one emotion per short audio clip, based on the following criteria:</p>
|
138 |
+
|
139 |
+
<ul>
|
140 |
+
<li>
|
141 |
+
<h4>Predominant Emotion:</h4>
|
142 |
+
The emotion expressed with the highest intensity. Emotions can be complex, and multiple emotions may occur at the same time.
|
143 |
+
</li>
|
144 |
+
|
145 |
+
<li>
|
146 |
+
<h4>Perceived Emotion at the Time of Recording:</h4>
|
147 |
+
In Oral History Archives, interviewees discuss their past. However, you should annotate the emotion they appear to feel at the time of recording, NOT what they felt during the event they describe.
|
148 |
+
</li>
|
149 |
+
|
150 |
+
<li>
|
151 |
+
<h4>Speech Emotionality:</h4>
|
152 |
+
Focus on how something is said rather than what is said. For example, if a friend recounts an awful day with humor, the content may be sad, but the delivery is joyful. In this case, linguistic emotionality (content) would be classified as sad, while paralinguistic emotionality (tone and delivery) would be classified as joyful.
|
153 |
+
</li>
|
154 |
+
</ul>
|
155 |
+
|
156 |
+
<div style="text-align: center; padding: 1.5em 0;">
|
157 |
+
<strong>If you're uncertain about which emotion you are hearing, open the sidebar by clicking the arrow in the upper left corner. There, you'll find a list of major emotions grouped under each category!</strong>
|
158 |
+
</div>
|
159 |
+
</div>
|
160 |
+
"""
|
161 |
+
|
162 |
+
examples_explanation = """<h3>Audio examples</h3>
|
163 |
+
<div class="content-box">
|
164 |
+
<p>Let's check out examples for the four emotions to annotate. Note that all these examples use the same sentence and are acted out, making the emotionality in speech more apparent. In a real-world setting, emotionality is more complex, so you will find a list of additional emotions within each of the three emotion categories (Happy, Sad, and Angry) to assist you during annotation.</p>
|
165 |
+
</div>"""
|
166 |
+
side_bar_html = """
|
167 |
+
<h3>Major subclasses</h3>
|
168 |
+
<div class="content-box">
|
169 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
170 |
+
<span>🙂</span>
|
171 |
+
<h4 style="margin: 0;">Happiness</h4>
|
172 |
+
|
173 |
+
</div>
|
174 |
+
|
175 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
176 |
+
<p>Affection, Goodwill, Joy, Satisfaction, Zest, Acceptance, Pride, Hope, Excitement, Relief, Passion, Caring</p>
|
177 |
+
</div>
|
178 |
+
|
179 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
180 |
+
<span>🙁</span>
|
181 |
+
<h4 style="margin: 0;">Sadness</h4>
|
182 |
+
</div>
|
183 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
184 |
+
<p>Suffering, Regret, Displeasure, Embarrassment, Sympathy, Depression</p>
|
185 |
+
</div>
|
186 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
187 |
+
<span>😡</span>
|
188 |
+
<h4 style="margin: 0;">Anger</h4>
|
189 |
+
</div>
|
190 |
+
<div>
|
191 |
+
<div style="display: flex; align-items: center; gap: 8px; margin-bottom: 10px;">
|
192 |
+
<p>Irritability, Torment, Jealousy, Disgust, Rage, Frustration</p>
|
193 |
+
<p></p>
|
194 |
+
</div>
|
195 |
+
</div>
|
196 |
+
|
197 |
+
"""
|
198 |
+
|
199 |
+
start_annotating = """<h2>How to use the annotation interface?</h2>
|
200 |
+
<div class="content-box">
|
201 |
+
<ol>
|
202 |
+
<li>
|
203 |
+
Open the sidebar by clicking the arrow in the upper left corner.
|
204 |
+
</li>
|
205 |
+
<li>
|
206 |
+
Enter the participant ID you received via email.
|
207 |
+
</li>
|
208 |
+
<li>
|
209 |
+
Click <strong>"Let's go!"</strong> — this will lock your participant ID.
|
210 |
+
</li>
|
211 |
+
<li>
|
212 |
+
You’ll be directed to the annotation interface. The task will resume where you left off (on the last example you annotated), or start from the first audio if this is your first session.
|
213 |
+
</li>
|
214 |
+
<li>
|
215 |
+
When you finish all annotations, please send an email to <a href="mailto:f.pessanha@uu.nl">f.pessanha@uu.nl</a>.
|
216 |
+
</li>
|
217 |
+
</ol>
|
218 |
+
<p><strong>Note:</strong> You can click on any part of the audio to start playing from that point. Please avoid clicking on the audio while it is playing (pause it first). This will not affect the program, but it will help us understand how you interact with the interface.</p>
|
219 |
+
<div style="text-align: center; padding: 1.5em 0;">
|
220 |
+
<p><strong>Below you can find an overview of the annotation interface.</strong></p>
|
221 |
+
</div>
|
222 |
+
|
223 |
+
</div>"""
|
utils.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import pandas as pd
|
3 |
+
import os
|
4 |
+
import gradio as gr
|
5 |
+
from pathlib import Path
|
6 |
+
from huggingface_hub import login
|
7 |
+
from mutagen.mp3 import MP3
|
8 |
+
from mutagen.wave import WAVE
|
9 |
+
import json
|
10 |
+
from text_explanations import *
|
11 |
+
from utils import *
|
12 |
+
|
13 |
+
def get_audio_duration(file_path):
|
14 |
+
if file_path.lower().endswith('.mp3'):
|
15 |
+
audio = MP3(file_path)
|
16 |
+
elif file_path.lower().endswith(('.wav', '.wave')):
|
17 |
+
audio = WAVE(file_path)
|
18 |
+
else:
|
19 |
+
raise ValueError("Unsupported file format")
|
20 |
+
|
21 |
+
return audio.info.length # Duration in seconds
|
22 |
+
|
23 |
+
def get_storage(password):
|
24 |
+
# Check if the password is correct
|
25 |
+
if password == password_files:
|
26 |
+
# Get the list of file paths and calculate the total usage
|
27 |
+
files = [
|
28 |
+
file for file in persistent_storage.glob("**/*.csv") if file.is_file()
|
29 |
+
]
|
30 |
+
|
31 |
+
# Calculate total usage (in bytes)
|
32 |
+
usage = sum([file.stat().st_size for file in files])
|
33 |
+
|
34 |
+
# Convert file paths to strings for Gradio's File component
|
35 |
+
file_paths = [str(file.resolve()) for file in files]
|
36 |
+
|
37 |
+
# Return the file paths (as strings) and the total usage in GB
|
38 |
+
return file_paths, f"{usage / (1024.0 ** 3):.3f}GB"
|
39 |
+
|
40 |
+
else:
|
41 |
+
return gr.Warning("Please provide the correct password"), None
|
42 |
+
|
43 |
+
def count_clicks(n_clicks):
|
44 |
+
n_clicks = n_clicks + 1
|
45 |
+
return n_clicks
|