|
import json |
|
import os |
|
import re |
|
import string |
|
from typing import Union, List, Dict |
|
from datasets import DatasetInfo, BuilderConfig, GeneratorBasedBuilder, Version, Features, Value, Audio, SplitGenerator, Split, logging |
|
from datasets.features import Sequence |
|
import soundfile as sf |
|
import importlib.util |
|
|
|
_SAMPLE_RATE = 16000 |
|
|
|
_DESCRIPTION = "tbd" |
|
_CITATION = "tbd" |
|
|
|
_METAFILE = "chall_metadata.json" |
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
class ChallConfig(BuilderConfig): |
|
split_segments: bool = False |
|
|
|
|
|
max_chunk_length: Union[float, None] |
|
min_chunk_length: Union[float, None] |
|
max_pause_length: Union[float, None] |
|
remove_trailing_pauses: bool = False |
|
|
|
lowercase: bool |
|
num_to_words: bool |
|
allowed_chars: set |
|
special_terms_mapping: dict |
|
|
|
stratify_column: Union[None, str] |
|
folds: Union[None, Dict[str, List]] |
|
|
|
def __init__(self, **kwargs): |
|
|
|
self.split_segments = kwargs.pop("split_segments", False) |
|
self.remove_trailing_pauses = kwargs.pop("remove_trailing_pauses", False) |
|
|
|
self.max_chunk_length = kwargs.pop("max_chunk_length", None) |
|
self.min_chunk_length = kwargs.pop("min_chunk_length", None) |
|
self.max_pause_length = kwargs.pop("max_pause_length", None) |
|
|
|
self.lowercase = kwargs.pop("lowercase", True) |
|
self.num_to_words = kwargs.pop("num_to_words", True) |
|
self.special_terms_mapping = kwargs.pop("special_terms_mapping", {}) |
|
|
|
if self.lowercase: |
|
self.allowed_chars = set(string.ascii_lowercase + " äöü'") |
|
else: |
|
self.allowed_chars: set = set(string.ascii_lowercase + string.ascii_uppercase + " ÄÖÜäöü'") |
|
|
|
self.stratify_column = kwargs.pop("stratify_column", None) |
|
self.folds = kwargs.pop("folds", None) |
|
|
|
super(ChallConfig, self).__init__(**kwargs) |
|
|
|
|
|
class Chall(GeneratorBasedBuilder): |
|
VERSION = Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = ChallConfig |
|
|
|
DEFAULT_CONFIG_NAME = "original" |
|
|
|
BUILDER_CONFIGS = [ |
|
ChallConfig( |
|
name="original", |
|
split_segments=False, |
|
description="The 'original' configuration uses data in its raw, unmodified form while ensuring all participant " |
|
"information is anonymized. This setup includes the preservation of data's original structure without " |
|
"segmentation, filtering, or other preprocessing techniques. Although participant information is available, " |
|
"it cannot be mapped back to individual speakers in the transcripts." |
|
), |
|
ChallConfig( |
|
name="asr", |
|
split_segments=True, |
|
description="tbd" |
|
), |
|
ChallConfig( |
|
name="asr_acl", |
|
split_segments=True, |
|
max_pause_length=12, |
|
max_chunk_length=12, |
|
min_chunk_length=0.5, |
|
remove_trailing_pauses=True, |
|
lowercase=True, |
|
num_to_words=True, |
|
stratify_column="intervention", |
|
folds={ |
|
"fold0": ["17", "15", "1"], |
|
"fold1": ["13", "7", "10"], |
|
"fold2": ["4", "8", "6", "14"], |
|
"fold3": ["12", "16", "5", "19"], |
|
"fold4": ["9", "2", "3", "18", "11"] |
|
}, |
|
description="Settings used for the paper." |
|
) |
|
] |
|
|
|
@property |
|
def manual_download_instructions(self): |
|
return ( |
|
"To use the chall dataset you have to download it manually. " |
|
"TBD Download Instructions. " |
|
"Please extract all files in one folder and load the dataset with: " |
|
"`datasets.load_dataset('chall', data_dir='path/to/folder/folder_name')`" |
|
) |
|
|
|
def __init__(self, **kwargs): |
|
""" |
|
Initializes the dataset builder class and checks for all required dependencies. |
|
:param kwargs: Arbitrary keyword arguments passed to the parent class's constructor |
|
""" |
|
self._check_dependencies() |
|
super().__init__(**kwargs) |
|
|
|
@staticmethod |
|
def _check_dependencies() -> None: |
|
""" |
|
Checks if all required libraries are installed and available for the dataset processing. |
|
""" |
|
required_libraries = ["soundfile"] |
|
missing_libraries = [] |
|
|
|
for library in required_libraries: |
|
if importlib.util.find_spec(library) is None: |
|
missing_libraries.append(library) |
|
|
|
if missing_libraries: |
|
missing_str = ", ".join(missing_libraries) |
|
raise ImportError(f"Missing dependencies: {missing_str}. Please install them using 'pip install {missing_str}'") |
|
|
|
def _info(self) -> DatasetInfo: |
|
""" |
|
This method specifies the datasets.DatasetInfo object which contains information and typings for the dataset |
|
:return: The DatasetInfo object |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
if self.config.split_segments: |
|
features = Features({ |
|
"audio_id": Value("string"), |
|
"intervention": Value("int32"), |
|
"school_grade": Value("string"), |
|
"area_of_school_code": Value("int32"), |
|
"background_noise": Value("bool"), |
|
"speaker": Value("string"), |
|
"raw_text": Value("string"), |
|
"clear_text": Value("string"), |
|
"words": Sequence( |
|
{ |
|
"start": Value("float"), |
|
"end": Value("float"), |
|
"duration": Value("float"), |
|
"text": Value("string"), |
|
} |
|
), |
|
"audio": Audio(sampling_rate=_SAMPLE_RATE) |
|
}) |
|
else: |
|
features = Features({ |
|
"audio_id": Value("string"), |
|
"intervention": Value("int32"), |
|
"school_grade": Value("string"), |
|
"area_of_school_code": Value("int32"), |
|
"raw_text": Value("string"), |
|
"clear_text": Value("string"), |
|
"participants": Sequence( |
|
{ |
|
"pseudonym": Value("string"), |
|
"gender": Value("string"), |
|
"year_of_birth": Value("int32"), |
|
"school_grade": Value("int32"), |
|
"languages": Value("string"), |
|
"estimated_l2_proficiency": Value("string") |
|
}, length=-1 |
|
), |
|
"background_noise": Value("bool"), |
|
"speakers": Sequence( |
|
{ |
|
"spkid": Value("string"), |
|
"name": Value("string") |
|
} |
|
), |
|
"segments": Sequence( |
|
{ |
|
"speaker": Value("string"), |
|
"words": Sequence( |
|
{ |
|
"start": Value("float"), |
|
"end": Value("float"), |
|
"duration": Value("float"), |
|
"text": Value("string") |
|
} |
|
), |
|
} |
|
), |
|
"audio": Audio(sampling_rate=_SAMPLE_RATE) |
|
}) |
|
|
|
return DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage="", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
""" |
|
This method is tasked with downloading/extracting the data and defining the splits depending on the configuration. |
|
As this dataset requires manual download due to licensing, data must be downloaded first and then extracted. |
|
:param dl_manager: dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS |
|
:return: |
|
""" |
|
|
|
|
|
|
|
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) |
|
|
|
|
|
|
|
if not os.path.exists(data_dir): |
|
raise FileNotFoundError( |
|
f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('chall', data_dir=...)` " |
|
f"that includes files unzipped from the chall zip. Manual download instructions: {self.manual_download_instructions}" |
|
) |
|
|
|
|
|
if self.config.folds and self.config.stratify_column: |
|
return [ |
|
SplitGenerator( |
|
name=fold_name, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "data"), |
|
"metafile": os.path.join(data_dir, _METAFILE), |
|
"stratify_column": self.config.stratify_column, |
|
"fold": fold |
|
}, |
|
) |
|
for (fold_name, fold) in self.config.folds.items()] |
|
|
|
|
|
else: |
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join(data_dir, "data"), |
|
"metafile": os.path.join(data_dir, _METAFILE), |
|
}, |
|
), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
def _generate_examples(self, filepath, metafile, stratify_column: str = None, fold: List = None): |
|
""" |
|
This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. |
|
:param filepath: The path where the data is located. |
|
:param metafile: The metafile describing the chall data |
|
:param stratify_column: The meta column to stratify by. |
|
:param fold: A list of values do define splits. Only works in combination with `stratify_column` |
|
:return: |
|
""" |
|
|
|
logger.info("generating examples from = %s", filepath) |
|
|
|
with open(metafile, 'r') as file: |
|
metadata = json.load(file) |
|
for row in metadata["data"]: |
|
|
|
|
|
if stratify_column and str(row[stratify_column]) not in fold: |
|
continue |
|
|
|
|
|
transcript_file = os.path.join(filepath, row["transcript_file"]) |
|
with open(transcript_file, 'r') as transcript: |
|
transcript = json.load(transcript) |
|
|
|
audio_id = row['audio_id'] |
|
audio_file_path = os.path.join(filepath, row["audio_file"]) |
|
|
|
if self.config.split_segments: |
|
yield from self._generate_utterance_examples(audio_id, str(audio_file_path), row, transcript) |
|
else: |
|
yield from self._generate_transcript_examples(audio_id, str(audio_file_path), row, transcript) |
|
|
|
def _generate_transcript_examples(self, audio_id: str, audio_file_path: str, data: dict, transcript: dict): |
|
""" |
|
Generates examples based on the entire audio file and its associated transcript metadata. This method reads the |
|
entire audio file, extracts speaker and segment information from the transcript, and packages these along with |
|
the audio data into a dictionary that is then yielded. |
|
|
|
:param audio_id: A unique identifier for the audio file. |
|
:param audio_file_path: The file system path to the audio file. |
|
:param data: A dictionary of the metadata. |
|
:param transcript: A dictionary containing details of the transcript, including speakers and segments. |
|
:return: Yields a tuple containing the audio ID and the enriched transcript dictionary. |
|
""" |
|
|
|
transcript_data = data.copy() |
|
transcript_data["speakers"] = transcript.get("speakers", []) |
|
transcript_data["segments"] = transcript.get("segments", []) |
|
|
|
transcript_data["raw_text"] = raw_text = self.get_raw_text([word for segment in transcript["segments"] for word in segment["words"]]) |
|
transcript_data["clear_text"] = self.get_clear_text(raw_text) |
|
|
|
with sf.SoundFile(audio_file_path) as audio_file: |
|
audio = audio_file.read(dtype='float32') |
|
|
|
transcript_data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": _SAMPLE_RATE} |
|
yield audio_id, transcript_data |
|
|
|
def _generate_utterance_examples(self, audio_id: str, audio_file_path: str, data: dict, transcript: dict): |
|
""" |
|
Generates examples from audio segments based on the transcript provided. Each segment is processed to produce |
|
an utterance which includes the audio slice and metadata. |
|
|
|
:param audio_id: A unique identifier for the audio file. |
|
:param audio_file_path: The filesystem path to the audio file. |
|
:param data: A dictionary containing the segments to be processed |
|
:param transcript: A dictionary containing transcript details with segments of spoken words. |
|
:return: Yields a tuple containing the audio ID and the enriched utterance dictionary. |
|
""" |
|
|
|
segments = transcript.get("segments", []) |
|
segments = self._process_segments(segments) |
|
|
|
with sf.SoundFile(audio_file_path) as track: |
|
|
|
if not track.seekable(): |
|
raise ValueError("Audio file is not seekable.") |
|
|
|
for segment_i, segment in enumerate(segments): |
|
segment_data = data.copy() |
|
segment_id = f"{audio_id}_{str(segment_i).rjust(3, '0')}" |
|
|
|
segment_data["audio_id"] = segment_id |
|
segment_data["speaker_id"] = segment["speaker"] |
|
segment_data["words"] = segment["words"] |
|
|
|
segment_data["raw_text"] = raw_text = self.get_raw_text(segment["words"]) |
|
segment_data["clear_text"] = self.get_clear_text(raw_text) |
|
|
|
start_time = segment["words"][0]["start"] |
|
end_time = segment["words"][-1]["end"] |
|
start_frame = int(_SAMPLE_RATE * start_time) |
|
frames_to_read = int(_SAMPLE_RATE * (end_time - start_time)) |
|
|
|
track.seek(start_frame) |
|
audio = track.read(frames_to_read) |
|
segment_data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": _SAMPLE_RATE} |
|
|
|
yield segment_id, segment_data |
|
|
|
def _process_segments(self, segments): |
|
""" |
|
Processes the list of segments based on configured rules. |
|
:param segments: A list of segment dictionaries |
|
:return: A list of processed segment dictionaries after applying all the filtering and splitting rules. |
|
""" |
|
if self.config.max_pause_length: |
|
segments = self._split_and_remove_long_pauses(segments) |
|
|
|
if self.config.max_chunk_length is not None: |
|
segments = self._split_long_segments(segments) |
|
|
|
if self.config.remove_trailing_pauses: |
|
segments = self._remove_trailing_pauses(segments) |
|
|
|
if self.config.max_pause_length is not None: |
|
segments = self._filter_segments_by_duration(segments, self.config.min_chunk_length, self.config.max_chunk_length) |
|
|
|
return segments |
|
|
|
@staticmethod |
|
def _remove_trailing_pauses(segments: List[dict]) -> List[dict]: |
|
""" |
|
Removes pauses at the end/start of utterances in each segment to eliminate pauses between segments. |
|
|
|
Example: |
|
[["Hello", "World!", "(...)"]] --> [["Hello"], ["World!"]] |
|
[["(...)", "Hello", "World!"]] --> [["Hello"], ["World!"]] |
|
|
|
:return: A list of Word objects representing the removed pause indicators from the segments. |
|
""" |
|
for segment in segments: |
|
if len(segment["words"]) > 0 and segment["words"][-1]["text"].strip() == "(...)": |
|
segment["words"] = segment["words"][:-1] |
|
if len(segment["words"]) > 0 and segment["words"][0]["text"].strip() == "(...)": |
|
segment["words"] = segment["words"][1:] |
|
|
|
|
|
if not segment["words"]: |
|
segments.remove(segment) |
|
return segments |
|
|
|
def _split_and_remove_long_pauses(self, segments: List[dict]) -> List[dict]: |
|
""" |
|
Remove too long pauses in a segment by splitting the segment in two segments and removing the filled pause. |
|
|
|
Example (assuming (...) is longer than max_pause_length): |
|
[["Hello", "(...)", "World!"]] --> [["Hello"], ["World!"]] |
|
|
|
:return: List of segments with long pauses removed |
|
""" |
|
|
|
split_segments = [] |
|
for segment in segments: |
|
if any(w["end"] - w["start"] >= self.config.max_pause_length and w["text"].strip() == "(...)" for w in segment["words"]): |
|
start_i = 0 |
|
for i, word in enumerate(segment["words"]): |
|
w_duration = word["end"] - word["start"] |
|
if w_duration >= self.config.max_pause_length and word["text"].strip() == "(...)": |
|
if len(segment["words"][start_i:i]) > 0: |
|
split_segments.append({"speaker": segment["speaker"], "words": segment["words"][start_i:i]}) |
|
start_i = i + 1 |
|
|
|
if len(segment["words"][start_i:]) > 0: |
|
split_segments.append({"speaker": segment["speaker"], "words": segment["words"][start_i:]}) |
|
else: |
|
split_segments.append(segment) |
|
return split_segments |
|
|
|
@staticmethod |
|
def _filter_segments_by_duration(segments: List[dict], min_duration: float = None, max_duration: float = None, ): |
|
""" |
|
Removes segments with invalid duration |
|
:param min_duration: The minimum duration allowed for a segment. |
|
:return: A list of removed short segments. |
|
""" |
|
|
|
filtered_segments = [] |
|
for segment in segments: |
|
duration = segment["words"][-1]["end"] - segment["words"][0]["start"] |
|
if min_duration is not None and duration < min_duration: |
|
continue |
|
if max_duration is not None and duration > max_duration: |
|
continue |
|
filtered_segments.append(segment) |
|
|
|
return filtered_segments |
|
|
|
def _split_long_segments(self, segments: List[dict]) -> List[dict]: |
|
""" |
|
Splits segments into smaller chunks if their duration exceeds the maximum chunk length specified in the config. |
|
|
|
Example (assuming each word is longer than max_duration): |
|
[["Hello", "World!"]] --> [["Hello"], ["World!"]] |
|
|
|
:param segments: List of original segments from the transcript. |
|
:return: List of adjusted segments, potentially split into smaller chunks. |
|
""" |
|
|
|
chunked_segments = [] |
|
for segment in segments: |
|
|
|
segment_start = segment["words"][0]["start"] |
|
segment_end = segment["words"][-1]["end"] |
|
duration = segment_end - segment_start |
|
|
|
if duration >= self.config.max_chunk_length: |
|
chunks = self._create_chunks(segment) |
|
for chunk in chunks: |
|
if len(chunk) > 0: |
|
chunked_segments.append({"speaker": segment["speaker"], "words": chunk}) |
|
else: |
|
chunked_segments.append(segment) |
|
return chunked_segments |
|
|
|
def _create_chunks(self, segment: dict): |
|
""" |
|
Splits a given segment into chunks of words, each with a maximum length. |
|
:param segment: The segment to be divided into chunks. |
|
:return: A list of chunks, where each chunk is a list of words. |
|
""" |
|
list_of_chunks = [] |
|
chunk_start = segment["words"][0]["start"] |
|
chunk_words = [] |
|
for word in segment["words"]: |
|
if (word["end"] - chunk_start) >= self.config.max_chunk_length: |
|
list_of_chunks.append(chunk_words) |
|
chunk_start = word["start"] |
|
chunk_words = [] |
|
chunk_words.append(word) |
|
|
|
|
|
if len(chunk_words) > 0: |
|
list_of_chunks.append(chunk_words) |
|
|
|
return list_of_chunks |
|
|
|
@staticmethod |
|
def get_raw_text(words: List[Dict]) -> str: |
|
""" |
|
|
|
""" |
|
raw_text = " ".join([word["text"] for word in words]) |
|
return raw_text |
|
|
|
def get_clear_text(self, raw_text: str) -> str: |
|
""" |
|
Processes the raw text to produce a clear, cleaned version by removing annotations, |
|
preprocessing the text, converting numbers to words, mapping special terms, |
|
converting to lowercase, and filtering allowed characters. |
|
|
|
:param raw_text: The raw input text to be processed. |
|
:return: A string representing the processed clear text. |
|
""" |
|
|
|
clear_text = self.remove_annotations(raw_text) |
|
clear_text = self.preprocess_text(clear_text) |
|
|
|
if self.config.num_to_words: |
|
clear_text = self.num_to_words(clear_text) |
|
|
|
if self.config.special_terms_mapping: |
|
self.map_special_terms(clear_text, special_terms_mapping=self.config.special_terms_mapping) |
|
|
|
if self.config.lowercase: |
|
clear_text = clear_text.lower() |
|
|
|
if self.config.allowed_chars: |
|
clear_text = self.filter_and_clean_text(clear_text, allowed_chars=self.config.allowed_chars) |
|
|
|
return clear_text |
|
|
|
@staticmethod |
|
def preprocess_text(transcript: str) -> str: |
|
""" |
|
Preprocesses the text by removing words between brackets and parentheses, |
|
standardizing spaces before apostrophes, removing commas between digits, |
|
and replacing special characters. |
|
|
|
:param transcript: The input transcript to preprocess. |
|
:return: The preprocessed transcript with various text normalization applied. |
|
""" |
|
|
|
transcript = re.sub(r"[<\[][^>\]]*[>\]]", "", transcript) |
|
transcript = re.sub(r"\(([^)]+?)\)", "", transcript) |
|
transcript = re.sub(r"\s+'", "'", transcript) |
|
transcript = re.sub(r"(\d),(\d)", r"\1\2", transcript) |
|
transcript = re.sub(r"\.([^0-9]|$)", r" \1", transcript) |
|
|
|
|
|
special_chars = { |
|
'ß': 'ss', 'ç': 'c', 'á': 'a', 'à': 'a', 'â': 'a', 'é': 'e', 'è': 'e', 'ê': 'e', 'í': 'i', 'ì': 'i', 'î': 'i', |
|
'ó': 'o', 'ò': 'o', 'ô': 'o', 'ú': 'u', 'ù': 'u', 'û': 'u', '-': ' ', '\u2013': ' ', '\xad': ' ', '/': ' ' |
|
} |
|
for char, replacement in special_chars.items(): |
|
transcript = transcript.replace(char, replacement) |
|
|
|
|
|
transcript = re.compile(r'[ \t]+').sub(' ', transcript) |
|
|
|
return transcript |
|
|
|
@staticmethod |
|
def remove_annotations(transcript: str) -> str: |
|
""" |
|
Removes specific annotations and conventions from the transcript |
|
|
|
:param transcript: The transcript to preprocess. |
|
:return: The preprocessed transcript with conventions and annotations removed. |
|
""" |
|
transcript = transcript.replace('@g', '') |
|
transcript = transcript.replace('@?', '') |
|
transcript = transcript.replace('@!', '') |
|
transcript = transcript.replace('-', '') |
|
transcript = transcript.replace('--', '') |
|
transcript = transcript.replace('(...)', '') |
|
transcript = transcript.replace('(Whispering)', '') |
|
transcript = transcript.replace('(whispers)', '') |
|
transcript = transcript.replace('(whispering)', '') |
|
transcript = transcript.replace('(unv.)', '') |
|
transcript = transcript.replace('(laughing)', '') |
|
transcript = transcript.replace('(laughs)', '') |
|
transcript = transcript.replace('(Laughter)', '') |
|
return transcript |
|
|
|
@staticmethod |
|
def num_to_words(transcript: str) -> str: |
|
""" |
|
Converts numerical expressions in the transcript to their word equivalents using the num2words library. |
|
|
|
:param transcript: The input transcript containing numerical expressions. |
|
:return: The transcript with numerical expressions converted to words. |
|
""" |
|
|
|
from num2words import num2words |
|
|
|
def replace(match): |
|
number_str = match.group(0) |
|
if re.match(r'\d+\.', number_str): |
|
|
|
next_char_index = match.end() |
|
if next_char_index < len(transcript) and transcript[next_char_index].islower(): |
|
|
|
number = int(number_str[:-1]) |
|
return num2words(number, to='ordinal') |
|
else: |
|
|
|
return number_str |
|
|
|
elif re.match(r'\d{4}s', number_str): |
|
|
|
number = int(number_str[:-1]) |
|
return num2words(number, to='year') + "s" |
|
|
|
elif re.match(r'\d+m\b', number_str): |
|
|
|
number = int(number_str[:-1]) |
|
return num2words(number) + " meters" |
|
|
|
elif number_str[-2:] in ['st', 'nd', 'rd', 'th']: |
|
|
|
number = int(re.match(r'\d+', number_str).group(0)) |
|
|
|
return num2words(number, to='ordinal') |
|
|
|
else: |
|
|
|
return num2words(number_str) |
|
|
|
|
|
pattern = re.compile(r'\b\d+(\.\d+)?\b|\b\d+(st|nd|rd|th)\b|\b\d+\.\b|\b\d{4}s\b|\b\d+m\b') |
|
|
|
|
|
new_sentence = pattern.sub(replace, transcript) |
|
|
|
return new_sentence |
|
|
|
@staticmethod |
|
def map_special_terms(transcript: str, special_terms_mapping: dict): |
|
""" |
|
Maps special terms in the transcript to their corresponding replacements using dictionary of pairs |
|
|
|
:param transcript: The input transcript containing special terms to be mapped. |
|
:param special_terms_mapping: A dictionary where keys are special terms and values are their replacements. |
|
:return: The transcript with special terms replaced. |
|
""" |
|
|
|
for term, replacement in special_terms_mapping.items(): |
|
transcript = re.sub(r'\b' + re.escape(term) + r'\b', replacement, transcript, flags=re.IGNORECASE) |
|
return transcript |
|
|
|
@staticmethod |
|
def filter_and_clean_text(transcript: str, allowed_chars: set = None): |
|
""" |
|
Filters the transcript to include only the allowed characters and normalizes |
|
whitespace by removing extra spaces and trimming the text. |
|
|
|
:param transcript: The input transcript to be filtered and cleaned. |
|
:param allowed_chars: A set of allowed characters. If provided, only these characters will be retained in the transcript. |
|
:return: The filtered and cleaned transcript. |
|
""" |
|
|
|
|
|
if allowed_chars is not None: |
|
transcript = ''.join([char for char in transcript if char in allowed_chars]) |
|
|
|
|
|
transcript = re.compile(r'[ \t]+').sub(' ', transcript).strip() |
|
|
|
return transcript |
|
|