Datasets:
Update `num_to_words`, `filter_and_clean_text` & `preprocess_text` and add `map_special_terms`
Browse files
chall.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import json
|
2 |
import os
|
3 |
-
|
|
|
|
|
4 |
from datasets import DatasetInfo, BuilderConfig, GeneratorBasedBuilder, Version, Features, Value, Audio, SplitGenerator, Split, logging
|
5 |
from datasets.features import Sequence
|
6 |
import soundfile as sf
|
7 |
import importlib.util
|
8 |
|
9 |
-
|
10 |
_SAMPLE_RATE = 16000
|
11 |
|
12 |
_DESCRIPTION = "tbd"
|
@@ -22,17 +23,32 @@ class ChallConfig(BuilderConfig):
|
|
22 |
split_segments: bool = False
|
23 |
|
24 |
# settings that can only be used together with split_segments
|
25 |
-
max_chunk_length: Union[float, None]
|
26 |
-
min_chunk_length: Union[float, None]
|
27 |
-
max_pause_length: Union[float, None]
|
28 |
remove_trailing_pauses: bool = False
|
29 |
|
|
|
|
|
|
|
|
|
|
|
30 |
def __init__(self, **kwargs):
|
31 |
-
self.split_segments = kwargs.pop("split_segments",
|
32 |
-
self.remove_trailing_pauses = kwargs.pop("remove_trailing_pauses",
|
33 |
-
|
34 |
-
self.
|
35 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
super(ChallConfig, self).__init__(**kwargs)
|
38 |
|
@@ -66,6 +82,8 @@ class Chall(GeneratorBasedBuilder):
|
|
66 |
max_chunk_length=12,
|
67 |
min_chunk_length=0.5,
|
68 |
remove_trailing_pauses=True,
|
|
|
|
|
69 |
description="Settings used for the paper."
|
70 |
)
|
71 |
]
|
@@ -86,7 +104,6 @@ class Chall(GeneratorBasedBuilder):
|
|
86 |
"""
|
87 |
self._check_dependencies()
|
88 |
super().__init__(**kwargs)
|
89 |
-
print(self.config)
|
90 |
|
91 |
@staticmethod
|
92 |
def _check_dependencies() -> None:
|
@@ -122,6 +139,8 @@ class Chall(GeneratorBasedBuilder):
|
|
122 |
"area_of_school_code": Value("int32"),
|
123 |
"background_noise": Value("bool"),
|
124 |
"speaker": Value("string"),
|
|
|
|
|
125 |
"words": Sequence(
|
126 |
{
|
127 |
"start": Value("float"),
|
@@ -138,6 +157,8 @@ class Chall(GeneratorBasedBuilder):
|
|
138 |
"intervention": Value("int32"),
|
139 |
"school_grade": Value("string"),
|
140 |
"area_of_school_code": Value("int32"),
|
|
|
|
|
141 |
"participants": Sequence(
|
142 |
{
|
143 |
"pseudonym": Value("string"),
|
@@ -243,8 +264,7 @@ class Chall(GeneratorBasedBuilder):
|
|
243 |
else:
|
244 |
yield from self._generate_transcript_examples(audio_id, str(audio_file_path), row, transcript)
|
245 |
|
246 |
-
|
247 |
-
def _generate_transcript_examples(audio_id: str, audio_file_path: str, data: dict, transcript: dict):
|
248 |
"""
|
249 |
Generates examples based on the entire audio file and its associated transcript metadata. This method reads the
|
250 |
entire audio file, extracts speaker and segment information from the transcript, and packages these along with
|
@@ -261,6 +281,9 @@ class Chall(GeneratorBasedBuilder):
|
|
261 |
transcript_data["speakers"] = transcript.get("speakers", [])
|
262 |
transcript_data["segments"] = transcript.get("segments", [])
|
263 |
|
|
|
|
|
|
|
264 |
with sf.SoundFile(audio_file_path) as audio_file:
|
265 |
audio = audio_file.read(dtype='float32')
|
266 |
|
@@ -295,6 +318,9 @@ class Chall(GeneratorBasedBuilder):
|
|
295 |
segment_data["speaker_id"] = segment["speaker"]
|
296 |
segment_data["words"] = segment["words"]
|
297 |
|
|
|
|
|
|
|
298 |
start_time = segment["words"][0]["start"]
|
299 |
end_time = segment["words"][-1]["end"]
|
300 |
start_frame = int(_SAMPLE_RATE * start_time)
|
@@ -443,4 +469,176 @@ class Chall(GeneratorBasedBuilder):
|
|
443 |
|
444 |
return list_of_chunks
|
445 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
|
|
|
|
1 |
import json
|
2 |
import os
|
3 |
+
import re
|
4 |
+
import string
|
5 |
+
from typing import Union, List, Dict
|
6 |
from datasets import DatasetInfo, BuilderConfig, GeneratorBasedBuilder, Version, Features, Value, Audio, SplitGenerator, Split, logging
|
7 |
from datasets.features import Sequence
|
8 |
import soundfile as sf
|
9 |
import importlib.util
|
10 |
|
|
|
11 |
_SAMPLE_RATE = 16000
|
12 |
|
13 |
_DESCRIPTION = "tbd"
|
|
|
23 |
split_segments: bool = False
|
24 |
|
25 |
# settings that can only be used together with split_segments
|
26 |
+
max_chunk_length: Union[float, None]
|
27 |
+
min_chunk_length: Union[float, None]
|
28 |
+
max_pause_length: Union[float, None]
|
29 |
remove_trailing_pauses: bool = False
|
30 |
|
31 |
+
lowercase: bool
|
32 |
+
num_to_words: bool
|
33 |
+
allowed_chars: set
|
34 |
+
special_terms_mapping: dict
|
35 |
+
|
36 |
def __init__(self, **kwargs):
|
37 |
+
self.split_segments = kwargs.pop("split_segments", False)
|
38 |
+
self.remove_trailing_pauses = kwargs.pop("remove_trailing_pauses", False)
|
39 |
+
|
40 |
+
self.max_chunk_length = kwargs.pop("max_chunk_length", None)
|
41 |
+
self.min_chunk_length = kwargs.pop("min_chunk_length", None)
|
42 |
+
self.max_pause_length = kwargs.pop("max_pause_length", None)
|
43 |
+
|
44 |
+
self.lowercase = kwargs.pop("lowercase", True)
|
45 |
+
self.num_to_words = kwargs.pop("num_to_words", True)
|
46 |
+
self.special_terms_mapping = kwargs.pop("special_terms_mapping", {})
|
47 |
+
|
48 |
+
if self.lowercase:
|
49 |
+
self.allowed_chars = set(string.ascii_lowercase + " äöü'")
|
50 |
+
else:
|
51 |
+
self.allowed_chars: set = set(string.ascii_lowercase + string.ascii_uppercase + " ÄÖÜäöü'")
|
52 |
|
53 |
super(ChallConfig, self).__init__(**kwargs)
|
54 |
|
|
|
82 |
max_chunk_length=12,
|
83 |
min_chunk_length=0.5,
|
84 |
remove_trailing_pauses=True,
|
85 |
+
lowercase=True,
|
86 |
+
num_to_words=True,
|
87 |
description="Settings used for the paper."
|
88 |
)
|
89 |
]
|
|
|
104 |
"""
|
105 |
self._check_dependencies()
|
106 |
super().__init__(**kwargs)
|
|
|
107 |
|
108 |
@staticmethod
|
109 |
def _check_dependencies() -> None:
|
|
|
139 |
"area_of_school_code": Value("int32"),
|
140 |
"background_noise": Value("bool"),
|
141 |
"speaker": Value("string"),
|
142 |
+
"raw_text": Value("string"),
|
143 |
+
"clear_text": Value("string"),
|
144 |
"words": Sequence(
|
145 |
{
|
146 |
"start": Value("float"),
|
|
|
157 |
"intervention": Value("int32"),
|
158 |
"school_grade": Value("string"),
|
159 |
"area_of_school_code": Value("int32"),
|
160 |
+
"raw_text": Value("string"),
|
161 |
+
"clear_text": Value("string"),
|
162 |
"participants": Sequence(
|
163 |
{
|
164 |
"pseudonym": Value("string"),
|
|
|
264 |
else:
|
265 |
yield from self._generate_transcript_examples(audio_id, str(audio_file_path), row, transcript)
|
266 |
|
267 |
+
def _generate_transcript_examples(self, audio_id: str, audio_file_path: str, data: dict, transcript: dict):
|
|
|
268 |
"""
|
269 |
Generates examples based on the entire audio file and its associated transcript metadata. This method reads the
|
270 |
entire audio file, extracts speaker and segment information from the transcript, and packages these along with
|
|
|
281 |
transcript_data["speakers"] = transcript.get("speakers", [])
|
282 |
transcript_data["segments"] = transcript.get("segments", [])
|
283 |
|
284 |
+
transcript_data["raw_text"] = raw_text = self.get_raw_text([word for segment in transcript["segments"] for word in segment["words"]])
|
285 |
+
transcript_data["clear_text"] = self.get_clear_text(raw_text)
|
286 |
+
|
287 |
with sf.SoundFile(audio_file_path) as audio_file:
|
288 |
audio = audio_file.read(dtype='float32')
|
289 |
|
|
|
318 |
segment_data["speaker_id"] = segment["speaker"]
|
319 |
segment_data["words"] = segment["words"]
|
320 |
|
321 |
+
segment_data["raw_text"] = raw_text = self.get_raw_text(segment["words"])
|
322 |
+
segment_data["clear_text"] = self.get_clear_text(raw_text)
|
323 |
+
|
324 |
start_time = segment["words"][0]["start"]
|
325 |
end_time = segment["words"][-1]["end"]
|
326 |
start_frame = int(_SAMPLE_RATE * start_time)
|
|
|
469 |
|
470 |
return list_of_chunks
|
471 |
|
472 |
+
@staticmethod
|
473 |
+
def get_raw_text(words: List[Dict]) -> str:
|
474 |
+
"""
|
475 |
+
|
476 |
+
"""
|
477 |
+
raw_text = " ".join([word["text"] for word in words])
|
478 |
+
return raw_text
|
479 |
+
|
480 |
+
def get_clear_text(self, raw_text: str) -> str:
|
481 |
+
"""
|
482 |
+
Processes the raw text to produce a clear, cleaned version by removing annotations,
|
483 |
+
preprocessing the text, converting numbers to words, mapping special terms,
|
484 |
+
converting to lowercase, and filtering allowed characters.
|
485 |
+
|
486 |
+
:param raw_text: The raw input text to be processed.
|
487 |
+
:return: A string representing the processed clear text.
|
488 |
+
"""
|
489 |
+
|
490 |
+
clear_text = self.remove_annotations(raw_text)
|
491 |
+
clear_text = self.preprocess_text(clear_text)
|
492 |
+
|
493 |
+
if self.config.num_to_words:
|
494 |
+
clear_text = self.num_to_words(clear_text)
|
495 |
+
|
496 |
+
if self.config.special_terms_mapping:
|
497 |
+
self.map_special_terms(clear_text, special_terms_mapping=self.config.special_terms_mapping)
|
498 |
+
|
499 |
+
if self.config.lowercase:
|
500 |
+
clear_text = clear_text.lower()
|
501 |
+
|
502 |
+
if self.config.allowed_chars:
|
503 |
+
clear_text = self.filter_and_clean_text(clear_text, allowed_chars=self.config.allowed_chars)
|
504 |
+
|
505 |
+
return clear_text
|
506 |
+
|
507 |
+
@staticmethod
|
508 |
+
def preprocess_text(transcript: str) -> str:
|
509 |
+
"""
|
510 |
+
Preprocesses the text by removing words between brackets and parentheses,
|
511 |
+
standardizing spaces before apostrophes, removing commas between digits,
|
512 |
+
and replacing special characters.
|
513 |
+
|
514 |
+
:param transcript: The input transcript to preprocess.
|
515 |
+
:return: The preprocessed transcript with various text normalization applied.
|
516 |
+
"""
|
517 |
+
|
518 |
+
transcript = re.sub(r"[<\[][^>\]]*[>\]]", "", transcript) # remove words between brackets
|
519 |
+
transcript = re.sub(r"\(([^)]+?)\)", "", transcript) # remove words between parenthesis
|
520 |
+
transcript = re.sub(r"\s+'", "'", transcript) # standardize when there's a space before an apostrophe
|
521 |
+
transcript = re.sub(r"(\d),(\d)", r"\1\2", transcript) # remove commas between digits
|
522 |
+
transcript = re.sub(r"\.([^0-9]|$)", r" \1", transcript) # remove periods not followed by numbers
|
523 |
+
|
524 |
+
# Replace special characters
|
525 |
+
special_chars = {
|
526 |
+
'ß': 'ss', 'ç': 'c', 'á': 'a', 'à': 'a', 'â': 'a', 'é': 'e', 'è': 'e', 'ê': 'e', 'í': 'i', 'ì': 'i', 'î': 'i',
|
527 |
+
'ó': 'o', 'ò': 'o', 'ô': 'o', 'ú': 'u', 'ù': 'u', 'û': 'u', '-': ' ', '\u2013': ' ', '\xad': ' ', '/': ' '
|
528 |
+
}
|
529 |
+
for char, replacement in special_chars.items():
|
530 |
+
transcript = transcript.replace(char, replacement)
|
531 |
+
|
532 |
+
# Normalize whitespace
|
533 |
+
transcript = re.compile(r'[ \t]+').sub(' ', transcript)
|
534 |
+
|
535 |
+
return transcript
|
536 |
+
|
537 |
+
@staticmethod
|
538 |
+
def remove_annotations(transcript: str) -> str:
|
539 |
+
"""
|
540 |
+
Removes specific annotations and conventions from the transcript
|
541 |
+
|
542 |
+
:param transcript: The transcript to preprocess.
|
543 |
+
:return: The preprocessed transcript with conventions and annotations removed.
|
544 |
+
"""
|
545 |
+
transcript = transcript.replace('@g', '') # (Swiss-)German words
|
546 |
+
transcript = transcript.replace('@?', '') # best guess
|
547 |
+
transcript = transcript.replace('@!', '') # Errors
|
548 |
+
transcript = transcript.replace('-', '') # Repetitions
|
549 |
+
transcript = transcript.replace('--', '') # Reformulations
|
550 |
+
transcript = transcript.replace('(...)', '') # Long pauses
|
551 |
+
transcript = transcript.replace('(Whispering)', '') # Whispering
|
552 |
+
transcript = transcript.replace('(whispers)', '') # Whispering
|
553 |
+
transcript = transcript.replace('(whispering)', '') # Whispering
|
554 |
+
transcript = transcript.replace('(unv.)', '') # ?
|
555 |
+
transcript = transcript.replace('(laughing)', '') # Laughing
|
556 |
+
transcript = transcript.replace('(laughs)', '') # Laughing
|
557 |
+
transcript = transcript.replace('(Laughter)', '') # Laughing
|
558 |
+
return transcript
|
559 |
+
|
560 |
+
@staticmethod
|
561 |
+
def num_to_words(transcript: str) -> str:
|
562 |
+
"""
|
563 |
+
Converts numerical expressions in the transcript to their word equivalents using the num2words library.
|
564 |
+
|
565 |
+
:param transcript: The input transcript containing numerical expressions.
|
566 |
+
:return: The transcript with numerical expressions converted to words.
|
567 |
+
"""
|
568 |
+
|
569 |
+
from num2words import num2words
|
570 |
+
|
571 |
+
def replace(match):
|
572 |
+
number_str = match.group(0)
|
573 |
+
if re.match(r'\d+\.', number_str):
|
574 |
+
# Check if this is an ordinal context by looking at the following character
|
575 |
+
next_char_index = match.end()
|
576 |
+
if next_char_index < len(transcript) and transcript[next_char_index].islower():
|
577 |
+
# Convert to ordinal if followed by a lowercase letter
|
578 |
+
number = int(number_str[:-1]) # Remove the period
|
579 |
+
return num2words(number, to='ordinal')
|
580 |
+
else:
|
581 |
+
# Treat as the end of a sentence, return as is
|
582 |
+
return number_str
|
583 |
+
|
584 |
+
elif re.match(r'\d{4}s', number_str):
|
585 |
+
# Convert decades
|
586 |
+
number = int(number_str[:-1])
|
587 |
+
return num2words(number, to='year') + "s"
|
588 |
+
|
589 |
+
elif re.match(r'\d+m\b', number_str):
|
590 |
+
# Convert numbers with 'm' (meters) suffix
|
591 |
+
number = int(number_str[:-1])
|
592 |
+
return num2words(number) + " meters"
|
593 |
+
|
594 |
+
elif number_str[-2:] in ['st', 'nd', 'rd', 'th']:
|
595 |
+
# Convert ordinal numbers with suffix
|
596 |
+
number = int(re.match(r'\d+', number_str).group(0))
|
597 |
+
|
598 |
+
return num2words(number, to='ordinal')
|
599 |
+
|
600 |
+
else:
|
601 |
+
# Convert cardinal numbers
|
602 |
+
return num2words(number_str)
|
603 |
+
|
604 |
+
# Regular expression to find numbers, ordinals, ordinals with period, decades, and numbers with 'm' suffix
|
605 |
+
pattern = re.compile(r'\b\d+(\.\d+)?\b|\b\d+(st|nd|rd|th)\b|\b\d+\.\b|\b\d{4}s\b|\b\d+m\b')
|
606 |
+
|
607 |
+
# Substitute numbers with their word equivalent
|
608 |
+
new_sentence = pattern.sub(replace, transcript)
|
609 |
+
|
610 |
+
return new_sentence
|
611 |
+
|
612 |
+
@staticmethod
|
613 |
+
def map_special_terms(transcript: str, special_terms_mapping: dict):
|
614 |
+
"""
|
615 |
+
Maps special terms in the transcript to their corresponding replacements using dictionary of pairs
|
616 |
+
|
617 |
+
:param transcript: The input transcript containing special terms to be mapped.
|
618 |
+
:param special_terms_mapping: A dictionary where keys are special terms and values are their replacements.
|
619 |
+
:return: The transcript with special terms replaced.
|
620 |
+
"""
|
621 |
+
|
622 |
+
for term, replacement in special_terms_mapping.items():
|
623 |
+
transcript = re.sub(r'\b' + re.escape(term) + r'\b', replacement, transcript, flags=re.IGNORECASE)
|
624 |
+
return transcript
|
625 |
+
|
626 |
+
@staticmethod
|
627 |
+
def filter_and_clean_text(transcript: str, allowed_chars: set = None):
|
628 |
+
"""
|
629 |
+
Filters the transcript to include only the allowed characters and normalizes
|
630 |
+
whitespace by removing extra spaces and trimming the text.
|
631 |
+
|
632 |
+
:param transcript: The input transcript to be filtered and cleaned.
|
633 |
+
:param allowed_chars: A set of allowed characters. If provided, only these characters will be retained in the transcript.
|
634 |
+
:return: The filtered and cleaned transcript.
|
635 |
+
"""
|
636 |
+
|
637 |
+
# Filter allowed characters
|
638 |
+
if allowed_chars is not None:
|
639 |
+
transcript = ''.join([char for char in transcript if char in allowed_chars])
|
640 |
+
|
641 |
+
# Normalize whitespace
|
642 |
+
transcript = re.compile(r'[ \t]+').sub(' ', transcript).strip()
|
643 |
|
644 |
+
return transcript
|