File size: 28,907 Bytes
2fcf121
 
f98f1b9
 
 
5d4d143
 
2fcf121
5d4d143
2fcf121
4e7dd25
 
2fcf121
 
 
47d0df3
2fcf121
5d4d143
 
2fcf121
5d4d143
 
3cf8d44
 
f98f1b9
 
 
3cf8d44
2fcf121
f98f1b9
 
 
 
 
e7fd89e
 
 
5d4d143
e7fd89e
f98f1b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3cf8d44
e7fd89e
 
 
5d4d143
2fcf121
 
5d4d143
 
2fcf121
5d4d143
2fcf121
5d4d143
2fcf121
 
 
5d4d143
 
 
 
 
 
2fcf121
 
 
5d4d143
 
 
 
 
 
3cf8d44
5d4d143
3cf8d44
 
f98f1b9
 
e7fd89e
 
 
 
 
 
 
 
5d4d143
2fcf121
 
 
 
 
 
 
 
 
 
 
 
5d4d143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3cf8d44
5d4d143
 
 
3cf8d44
 
 
 
5d4d143
 
 
 
 
 
 
 
f98f1b9
 
5d4d143
2fcf121
5d4d143
 
 
 
2fcf121
 
4e7dd25
2fcf121
 
5d4d143
 
 
 
 
f98f1b9
 
5d4d143
2fcf121
5d4d143
 
 
 
 
 
2fcf121
 
5d4d143
 
2fcf121
5d4d143
 
2fcf121
 
5d4d143
2fcf121
5d4d143
 
2fcf121
5d4d143
 
 
 
2fcf121
 
 
 
4e7dd25
2fcf121
 
5d4d143
2fcf121
 
 
 
 
 
 
 
5d4d143
 
 
 
 
 
2fcf121
 
 
 
 
 
 
 
 
 
 
 
5d4d143
e7fd89e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e7dd25
 
 
 
e7fd89e
 
4e7dd25
 
 
 
 
 
47d0df3
 
e7fd89e
 
 
 
 
4e7dd25
47d0df3
4e7dd25
 
 
47d0df3
 
4e7dd25
 
47d0df3
4e7dd25
47d0df3
4e7dd25
f98f1b9
5d4d143
4e7dd25
 
 
5d4d143
4e7dd25
 
 
 
 
5d4d143
 
4e7dd25
 
 
5d4d143
f98f1b9
 
 
4e7dd25
 
5d4d143
4e7dd25
 
5d4d143
4e7dd25
5d4d143
 
 
 
 
 
 
 
4e7dd25
5d4d143
 
 
4e7dd25
5d4d143
4e7dd25
3cf8d44
4e7dd25
 
5d4d143
4e7dd25
 
 
5d4d143
4e7dd25
 
 
5d4d143
f98f1b9
 
 
5464bfa
 
 
4e7dd25
 
 
 
5d4d143
4e7dd25
 
 
5d4d143
4e7dd25
5d4d143
4e7dd25
 
 
 
 
 
 
 
5d4d143
4e7dd25
 
5d4d143
4e7dd25
 
5d4d143
4e7dd25
 
5d4d143
4e7dd25
5d4d143
3cf8d44
4e7dd25
5d4d143
 
3cf8d44
 
 
 
 
5d4d143
 
 
 
 
 
 
 
 
 
 
4e7dd25
5d4d143
4e7dd25
3cf8d44
4e7dd25
3cf8d44
 
 
 
 
 
 
 
 
e7fd89e
3cf8d44
 
 
 
 
 
e7fd89e
3cf8d44
 
 
 
 
 
 
 
4e7dd25
3cf8d44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e7dd25
5d4d143
 
3cf8d44
 
 
 
5d4d143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f98f1b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fcf121
f98f1b9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
import json
import os
import re
import string
from typing import Union, List, Dict
from datasets import DatasetInfo, BuilderConfig, GeneratorBasedBuilder, Version, Features, Value, Audio, SplitGenerator, Split, logging
from datasets.features import Sequence
import soundfile as sf
import importlib.util

_SAMPLE_RATE = 16000

_DESCRIPTION = "tbd"
_CITATION = "tbd"

_METAFILE = "chall_metadata.json"

logger = logging.get_logger(__name__)


class ChallConfig(BuilderConfig):
    split_segments: bool = False

    # settings that can only be used together with split_segments
    max_chunk_length: Union[float, None]
    min_chunk_length: Union[float, None]
    max_pause_length: Union[float, None]
    remove_trailing_pauses: bool = False

    lowercase: bool
    num_to_words: bool
    allowed_chars: set
    special_terms_mapping: dict

    stratify_column: Union[None, str]
    folds: Union[None, Dict[str, List]]

    def __init__(self, **kwargs):

        self.split_segments = kwargs.pop("split_segments", False)
        self.remove_trailing_pauses = kwargs.pop("remove_trailing_pauses", False)

        self.max_chunk_length = kwargs.pop("max_chunk_length", None)
        self.min_chunk_length = kwargs.pop("min_chunk_length", None)
        self.max_pause_length = kwargs.pop("max_pause_length", None)

        self.lowercase = kwargs.pop("lowercase", True)
        self.num_to_words = kwargs.pop("num_to_words", True)
        self.special_terms_mapping = kwargs.pop("special_terms_mapping", {})

        if self.lowercase:
            self.allowed_chars = set(string.ascii_lowercase + " äöü'")
        else:
            self.allowed_chars: set = set(string.ascii_lowercase + string.ascii_uppercase + " ÄÖÜäöü'")

        self.stratify_column = kwargs.pop("stratify_column", None)
        self.folds = kwargs.pop("folds", None)

        super(ChallConfig, self).__init__(**kwargs)


class Chall(GeneratorBasedBuilder):
    VERSION = Version("1.0.0")

    BUILDER_CONFIG_CLASS = ChallConfig

    DEFAULT_CONFIG_NAME = "original"

    BUILDER_CONFIGS = [
        ChallConfig(
            name="original",
            split_segments=False,
            description="The 'original' configuration uses data in its raw, unmodified form while ensuring all participant "
                        "information is anonymized. This setup includes the preservation of data's original structure without "
                        "segmentation, filtering, or other preprocessing techniques. Although participant information is available, "
                        "it cannot be mapped back to individual speakers in the transcripts."
        ),
        ChallConfig(
            name="asr",
            split_segments=True,
            description="tbd"
        ),
        ChallConfig(
            name="asr_acl",
            split_segments=True,
            max_pause_length=12,
            max_chunk_length=12,
            min_chunk_length=0.5,
            remove_trailing_pauses=True,
            lowercase=True,
            num_to_words=True,
            stratify_column="intervention",
            folds={
                "fold0": ["17", "15", "1"],
                "fold1": ["13", "7", "10"],
                "fold2": ["4", "8", "6", "14"],
                "fold3": ["12", "16", "5", "19"],
                "fold4": ["9", "2", "3", "18", "11"]
            },
            description="Settings used for the paper."
        )
    ]

    @property
    def manual_download_instructions(self):
        return (
            "To use the chall dataset you have to download it manually. "
            "TBD Download Instructions. "  # todo
            "Please extract all files in one folder and load the dataset with: "
            "`datasets.load_dataset('chall', data_dir='path/to/folder/folder_name')`"
        )

    def __init__(self, **kwargs):
        """
        Initializes the dataset builder class and checks for all required dependencies.
        :param kwargs: Arbitrary keyword arguments passed to the parent class's constructor
        """
        self._check_dependencies()
        super().__init__(**kwargs)

    @staticmethod
    def _check_dependencies() -> None:
        """
        Checks if all required libraries are installed and available for the dataset processing.
        """
        required_libraries = ["soundfile"]
        missing_libraries = []

        for library in required_libraries:
            if importlib.util.find_spec(library) is None:
                missing_libraries.append(library)

        if missing_libraries:
            missing_str = ", ".join(missing_libraries)
            raise ImportError(f"Missing dependencies: {missing_str}. Please install them using 'pip install {missing_str}'")

    def _info(self) -> DatasetInfo:
        """
        This method specifies the datasets.DatasetInfo object which contains information and typings for the dataset
        :return: The DatasetInfo object
        """

        # todo text (make word = timestamps)
        # todo duration
        # todo tasks

        if self.config.split_segments:
            features = Features({
                "audio_id": Value("string"),  # todo maybe shorten to id
                "intervention": Value("int32"),
                "school_grade": Value("string"),
                "area_of_school_code": Value("int32"),
                "background_noise": Value("bool"),
                "speaker": Value("string"),
                "raw_text": Value("string"),
                "clear_text": Value("string"),
                "words": Sequence(
                    {
                        "start": Value("float"),
                        "end": Value("float"),
                        "duration": Value("float"),
                        "text": Value("string"),
                    }
                ),
                "audio": Audio(sampling_rate=_SAMPLE_RATE)
            })
        else:
            features = Features({
                "audio_id": Value("string"),  # todo maybe shorten to id
                "intervention": Value("int32"),
                "school_grade": Value("string"),
                "area_of_school_code": Value("int32"),
                "raw_text": Value("string"),
                "clear_text": Value("string"),
                "participants": Sequence(
                    {
                        "pseudonym": Value("string"),
                        "gender": Value("string"),
                        "year_of_birth": Value("int32"),
                        "school_grade": Value("int32"),
                        "languages": Value("string"),
                        "estimated_l2_proficiency": Value("string")
                    }, length=-1
                ),
                "background_noise": Value("bool"),
                "speakers": Sequence(
                    {
                        "spkid": Value("string"),
                        "name": Value("string")
                    }
                ),
                "segments": Sequence(
                    {
                        "speaker": Value("string"),
                        "words": Sequence(
                            {
                                "start": Value("float"),
                                "end": Value("float"),
                                "duration": Value("float"),
                                "text": Value("string")
                            }
                        ),
                    }
                ),
                "audio": Audio(sampling_rate=_SAMPLE_RATE)
            })

        return DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """
        This method is tasked with downloading/extracting the data and defining the splits depending on the configuration.
        As this dataset requires manual download due to licensing, data must be downloaded first and then extracted.
        :param dl_manager: dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        :return:
        """

        # todo define splits?

        data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))

        # todo read ids for splits as we do not separate them by folder

        if not os.path.exists(data_dir):
            raise FileNotFoundError(
                f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('chall', data_dir=...)` "
                f"that includes files unzipped from the chall zip. Manual download instructions: {self.manual_download_instructions}"
            )

        # kFold Strategy
        if self.config.folds and self.config.stratify_column:
            return [
                SplitGenerator(
                    name=fold_name,
                    gen_kwargs={
                        "filepath": os.path.join(data_dir, "data"),
                        "metafile": os.path.join(data_dir, _METAFILE),
                        "stratify_column": self.config.stratify_column,
                        "fold": fold
                    },
                )
                for (fold_name, fold) in self.config.folds.items()]

        # Train Only Strategy
        else:
            return [
                SplitGenerator(
                    name=Split.TRAIN,
                    gen_kwargs={
                        "filepath": os.path.join(data_dir, "data"),
                        "metafile": os.path.join(data_dir, _METAFILE),
                    },
                ),
                # datasets.SplitGenerator(
                #     name=datasets.Split.TEST,
                #     gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _METAFILE)},
                # ),
                # datasets.SplitGenerator(
                #     name=datasets.Split.VALIDATION,
                #     gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _METAFILE)},
                # ),
            ]

    def _generate_examples(self, filepath, metafile, stratify_column: str = None, fold: List = None):
        """
        This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
        :param filepath: The path where the data is located.
        :param metafile: The metafile describing the chall data
        :param stratify_column: The meta column to stratify by.
        :param fold: A list of values do define splits. Only works in combination with `stratify_column`
        :return:
        """

        logger.info("generating examples from = %s", filepath)

        with open(metafile, 'r') as file:
            metadata = json.load(file)
            for row in metadata["data"]:

                # define splits if set
                if stratify_column and str(row[stratify_column]) not in fold:
                    continue

                # load transcript
                transcript_file = os.path.join(filepath, row["transcript_file"])
                with open(transcript_file, 'r') as transcript:
                    transcript = json.load(transcript)

                audio_id = row['audio_id']
                audio_file_path = os.path.join(filepath, row["audio_file"])

                if self.config.split_segments:
                    yield from self._generate_utterance_examples(audio_id, str(audio_file_path), row, transcript)
                else:
                    yield from self._generate_transcript_examples(audio_id, str(audio_file_path), row, transcript)

    def _generate_transcript_examples(self, audio_id: str, audio_file_path: str, data: dict, transcript: dict):
        """
        Generates examples based on the entire audio file and its associated transcript metadata. This method reads the
        entire audio file, extracts speaker and segment information from the transcript, and packages these along with
        the audio data into a dictionary that is then yielded.

        :param audio_id: A unique identifier for the audio file.
        :param audio_file_path: The file system path to the audio file.
        :param data: A dictionary of the metadata.
        :param transcript: A dictionary containing details of the transcript, including speakers and segments.
        :return: Yields a tuple containing the audio ID and the enriched transcript dictionary.
        """

        transcript_data = data.copy()  # Create a fresh copy of data to ensure no side effects
        transcript_data["speakers"] = transcript.get("speakers", [])
        transcript_data["segments"] = transcript.get("segments", [])

        transcript_data["raw_text"] = raw_text = self.get_raw_text([word for segment in transcript["segments"] for word in segment["words"]])
        transcript_data["clear_text"] = self.get_clear_text(raw_text)

        with sf.SoundFile(audio_file_path) as audio_file:
            audio = audio_file.read(dtype='float32')

        transcript_data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": _SAMPLE_RATE}
        yield audio_id, transcript_data

    def _generate_utterance_examples(self, audio_id: str, audio_file_path: str, data: dict, transcript: dict):
        """
        Generates examples from audio segments based on the transcript provided. Each segment is processed to produce
        an utterance which includes the audio slice and metadata.

        :param audio_id: A unique identifier for the audio file.
        :param audio_file_path: The filesystem path to the audio file.
        :param data: A dictionary containing the segments to be processed
        :param transcript: A dictionary containing transcript details with segments of spoken words.
        :return: Yields a tuple containing the audio ID and the enriched utterance dictionary.
        """

        segments = transcript.get("segments", [])
        segments = self._process_segments(segments)

        with sf.SoundFile(audio_file_path) as track:

            if not track.seekable():
                raise ValueError("Audio file is not seekable.")

            for segment_i, segment in enumerate(segments):
                segment_data = data.copy()  # Create a fresh copy of data for each segment
                segment_id = f"{audio_id}_{str(segment_i).rjust(3, '0')}"

                segment_data["audio_id"] = segment_id
                segment_data["speaker_id"] = segment["speaker"]
                segment_data["words"] = segment["words"]

                segment_data["raw_text"] = raw_text = self.get_raw_text(segment["words"])
                segment_data["clear_text"] = self.get_clear_text(raw_text)

                if not segment_data["clear_text"].strip():
                    continue

                start_time = segment["words"][0]["start"]
                end_time = segment["words"][-1]["end"]
                start_frame = int(_SAMPLE_RATE * start_time)
                frames_to_read = int(_SAMPLE_RATE * (end_time - start_time))

                track.seek(start_frame)
                audio = track.read(frames_to_read)
                segment_data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": _SAMPLE_RATE}

                yield segment_id, segment_data

    def _process_segments(self, segments):
        """
        Processes the list of segments based on configured rules.
        :param segments: A list of segment dictionaries
        :return: A list of processed segment dictionaries after applying all the filtering and splitting rules.
        """
        if self.config.max_pause_length:
            segments = self._split_and_remove_long_pauses(segments)

        if self.config.max_chunk_length is not None:
            segments = self._split_long_segments(segments)

        if self.config.remove_trailing_pauses:
            segments = self._remove_trailing_pauses(segments)

        if self.config.max_pause_length is not None:
            segments = self._filter_segments_by_duration(segments, self.config.min_chunk_length, self.config.max_chunk_length)

        return segments

    @staticmethod
    def _remove_trailing_pauses(segments: List[dict]) -> List[dict]:
        """
        Removes pauses at the end/start of utterances in each segment to eliminate pauses between segments.

        Example:
            [["Hello", "World!", "(...)"]] --> [["Hello"], ["World!"]]
            [["(...)", "Hello", "World!"]] --> [["Hello"], ["World!"]]

        :return: A list of Word objects representing the removed pause indicators from the segments.
        """
        for segment in segments:
            if len(segment["words"]) > 0 and segment["words"][-1]["text"].strip() == "(...)":
                segment["words"] = segment["words"][:-1]
            if len(segment["words"]) > 0 and segment["words"][0]["text"].strip() == "(...)":
                segment["words"] = segment["words"][1:]

            # Remove segment if no words left
            if not segment["words"]:
                segments.remove(segment)
        return segments

    def _split_and_remove_long_pauses(self, segments: List[dict]) -> List[dict]:
        """
        Remove too long pauses in a segment by splitting the segment in two segments and removing the filled pause.

        Example (assuming (...) is longer than max_pause_length):
            [["Hello", "(...)", "World!"]] --> [["Hello"], ["World!"]]

        :return: List of segments with long pauses removed
        """

        split_segments = []
        for segment in segments:
            if any(w["end"] - w["start"] >= self.config.max_pause_length and w["text"].strip() == "(...)" for w in segment["words"]):
                start_i = 0
                for i, word in enumerate(segment["words"]):
                    w_duration = word["end"] - word["start"]
                    if w_duration >= self.config.max_pause_length and word["text"].strip() == "(...)":
                        if len(segment["words"][start_i:i]) > 0:
                            split_segments.append({"speaker": segment["speaker"], "words": segment["words"][start_i:i]})
                        start_i = i + 1

                if len(segment["words"][start_i:]) > 0:
                    split_segments.append({"speaker": segment["speaker"], "words": segment["words"][start_i:]})
            else:
                split_segments.append(segment)
        return split_segments

    @staticmethod
    def _filter_segments_by_duration(segments: List[dict], min_duration: float = None, max_duration: float = None, ):
        """
        Removes segments with invalid duration
        :param min_duration: The minimum duration allowed for a segment.
        :return: A list of removed short segments.
        """

        filtered_segments = []
        for segment in segments:
            duration = segment["words"][-1]["end"] - segment["words"][0]["start"]
            if min_duration is not None and duration < min_duration:
                continue
            if max_duration is not None and duration > max_duration:
                continue
            filtered_segments.append(segment)

        return filtered_segments

    def _split_long_segments(self, segments: List[dict]) -> List[dict]:
        """
        Splits segments into smaller chunks if their duration exceeds the maximum chunk length specified in the config.

        Example (assuming each word is longer than max_duration):
            [["Hello", "World!"]] --> [["Hello"], ["World!"]]

        :param segments: List of original segments from the transcript.
        :return: List of adjusted segments, potentially split into smaller chunks.
        """

        chunked_segments = []
        for segment in segments:

            segment_start = segment["words"][0]["start"]
            segment_end = segment["words"][-1]["end"]
            duration = segment_end - segment_start

            if duration >= self.config.max_chunk_length:
                chunks = self._create_chunks(segment)
                for chunk in chunks:
                    if len(chunk) > 0:
                        chunked_segments.append({"speaker": segment["speaker"], "words": chunk})
            else:
                chunked_segments.append(segment)
        return chunked_segments

    def _create_chunks(self, segment: dict):
        """
        Splits a given segment into chunks of words, each with a maximum length.
        :param segment: The segment to be divided into chunks.
        :return: A list of chunks, where each chunk is a list of words.
        """
        list_of_chunks = []
        chunk_start = segment["words"][0]["start"]
        chunk_words = []
        for word in segment["words"]:
            if (word["end"] - chunk_start) >= self.config.max_chunk_length:
                list_of_chunks.append(chunk_words)
                chunk_start = word["start"]
                chunk_words = []
            chunk_words.append(word)

        # Add final chunk
        if len(chunk_words) > 0:
            list_of_chunks.append(chunk_words)

        return list_of_chunks

    @staticmethod
    def get_raw_text(words: List[Dict]) -> str:
        """

        """
        raw_text = " ".join([word["text"] for word in words])
        return raw_text

    def get_clear_text(self, raw_text: str) -> str:
        """
        Processes the raw text to produce a clear, cleaned version by removing annotations,
        preprocessing the text, converting numbers to words, mapping special terms,
        converting to lowercase, and filtering allowed characters.

        :param raw_text: The raw input text to be processed.
        :return: A string representing the processed clear text.
        """

        clear_text = self.remove_annotations(raw_text)
        clear_text = self.preprocess_text(clear_text)

        if self.config.num_to_words:
            clear_text = self.num_to_words(clear_text)

        if self.config.special_terms_mapping:
            self.map_special_terms(clear_text, special_terms_mapping=self.config.special_terms_mapping)

        if self.config.lowercase:
            clear_text = clear_text.lower()

        if self.config.allowed_chars:
            clear_text = self.filter_and_clean_text(clear_text, allowed_chars=self.config.allowed_chars)

        return clear_text

    @staticmethod
    def preprocess_text(transcript: str) -> str:
        """
        Preprocesses the text by removing words between brackets and parentheses,
        standardizing spaces before apostrophes, removing commas between digits,
        and replacing special characters.

        :param transcript: The input transcript to preprocess.
        :return: The preprocessed transcript with various text normalization applied.
        """

        transcript = re.sub(r"[<\[][^>\]]*[>\]]", "", transcript)  # remove words between brackets
        transcript = re.sub(r"\(([^)]+?)\)", "", transcript)  # remove words between parenthesis
        transcript = re.sub(r"\s+'", "'", transcript)  # standardize when there's a space before an apostrophe
        transcript = re.sub(r"(\d),(\d)", r"\1\2", transcript)  # remove commas between digits
        transcript = re.sub(r"\.([^0-9]|$)", r" \1", transcript)  # remove periods not followed by numbers

        # Replace special characters
        special_chars = {
            'ß': 'ss', 'ç': 'c', 'á': 'a', 'à': 'a', 'â': 'a', 'é': 'e', 'è': 'e', 'ê': 'e', 'í': 'i', 'ì': 'i', 'î': 'i',
            'ó': 'o', 'ò': 'o', 'ô': 'o', 'ú': 'u', 'ù': 'u', 'û': 'u', '-': ' ', '\u2013': ' ', '\xad': ' ', '/': ' '
        }
        for char, replacement in special_chars.items():
            transcript = transcript.replace(char, replacement)

        # Normalize whitespace
        transcript = re.compile(r'[ \t]+').sub(' ', transcript)

        return transcript

    @staticmethod
    def remove_annotations(transcript: str) -> str:
        """
        Removes specific annotations and conventions from the transcript

        :param transcript: The transcript to preprocess.
        :return: The preprocessed transcript with conventions and annotations removed.
        """
        transcript = transcript.replace('@g', '')  # (Swiss-)German words
        transcript = transcript.replace('@?', '')  # best guess
        transcript = transcript.replace('@!', '')  # Errors
        transcript = transcript.replace('-', '')  # Repetitions
        transcript = transcript.replace('--', '')  # Reformulations
        transcript = transcript.replace('(...)', '')  # Long pauses
        transcript = transcript.replace('(Whispering)', '')  # Whispering
        transcript = transcript.replace('(whispers)', '')  # Whispering
        transcript = transcript.replace('(whispering)', '')  # Whispering
        transcript = transcript.replace('(unv.)', '')  # ?
        transcript = transcript.replace('(laughing)', '')  # Laughing
        transcript = transcript.replace('(laughs)', '')  # Laughing
        transcript = transcript.replace('(Laughter)', '')  # Laughing
        return transcript

    @staticmethod
    def num_to_words(transcript: str) -> str:
        """
        Converts numerical expressions in the transcript to their word equivalents using the num2words library.

        :param transcript: The input transcript containing numerical expressions.
        :return: The transcript with numerical expressions converted to words.
        """

        from num2words import num2words

        def replace(match):
            number_str = match.group(0)
            if re.match(r'\d+\.', number_str):
                # Check if this is an ordinal context by looking at the following character
                next_char_index = match.end()
                if next_char_index < len(transcript) and transcript[next_char_index].islower():
                    # Convert to ordinal if followed by a lowercase letter
                    number = int(number_str[:-1])  # Remove the period
                    return num2words(number, to='ordinal')
                else:
                    # Treat as the end of a sentence, return as is
                    return number_str

            elif re.match(r'\d{4}s', number_str):
                # Convert decades
                number = int(number_str[:-1])
                return num2words(number, to='year') + "s"

            elif re.match(r'\d+m\b', number_str):
                # Convert numbers with 'm' (meters) suffix
                number = int(number_str[:-1])
                return num2words(number) + " meters"

            elif number_str[-2:] in ['st', 'nd', 'rd', 'th']:
                # Convert ordinal numbers with suffix
                number = int(re.match(r'\d+', number_str).group(0))

                return num2words(number, to='ordinal')

            else:
                # Convert cardinal numbers
                return num2words(number_str)

        # Regular expression to find numbers, ordinals, ordinals with period, decades, and numbers with 'm' suffix
        pattern = re.compile(r'\b\d+(\.\d+)?\b|\b\d+(st|nd|rd|th)\b|\b\d+\.\b|\b\d{4}s\b|\b\d+m\b')

        # Substitute numbers with their word equivalent
        new_sentence = pattern.sub(replace, transcript)

        return new_sentence

    @staticmethod
    def map_special_terms(transcript: str, special_terms_mapping: dict):
        """
        Maps special terms in the transcript to their corresponding replacements using dictionary of pairs

        :param transcript: The input transcript containing special terms to be mapped.
        :param special_terms_mapping: A dictionary where keys are special terms and values are their replacements.
        :return: The transcript with special terms replaced.
        """

        for term, replacement in special_terms_mapping.items():
            transcript = re.sub(r'\b' + re.escape(term) + r'\b', replacement, transcript, flags=re.IGNORECASE)
        return transcript

    @staticmethod
    def filter_and_clean_text(transcript: str, allowed_chars: set = None):
        """
        Filters the transcript to include only the allowed characters and normalizes
        whitespace by removing extra spaces and trimming the text.

        :param transcript: The input transcript to be filtered and cleaned.
        :param allowed_chars: A set of allowed characters. If provided, only these characters will be retained in the transcript.
        :return: The filtered and cleaned transcript.
        """

        # Filter allowed characters
        if allowed_chars is not None:
            transcript = ''.join([char for char in transcript if char in allowed_chars])

        # Normalize whitespace
        transcript = re.compile(r'[ \t]+').sub(' ', transcript).strip()

        return transcript