File size: 4,558 Bytes
a356589
 
88677fe
a356589
88677fe
 
 
a356589
88677fe
 
 
 
 
 
a356589
 
 
 
 
 
 
 
 
 
 
 
 
88677fe
a356589
88677fe
 
 
 
 
 
 
 
a356589
88677fe
 
a356589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88677fe
 
a356589
88677fe
617ccb8
88677fe
 
 
a356589
88677fe
 
 
 
a356589
 
617ccb8
 
a356589
617ccb8
 
 
a356589
617ccb8
a356589
617ccb8
a356589
617ccb8
a356589
88677fe
617ccb8
 
88677fe
a356589
 
 
617ccb8
a356589
617ccb8
a356589
617ccb8
a356589
617ccb8
a356589
 
 
 
 
 
 
 
 
 
 
 
88677fe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from pathlib import Path

import datasets
import pandas as pd

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {speech-emotion-recognition-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The audio dataset consists of a collection of texts spoken with four distinct
emotions. These texts are spoken in English and represent four different
emotional states: **euphoria, joy, sadness and surprise**.
Each audio clip captures the tone, intonation, and nuances of speech as
individuals convey their emotions through their voice.
The dataset includes a diverse range of speakers, ensuring variability in age,
gender, and cultural backgrounds*, allowing for a more comprehensive
representation of the emotional spectrum.
The dataset is labeled and organized based on the emotion expressed in each
audio sample, making it a valuable resource for emotion recognition and
analysis. Researchers and developers can utilize this dataset to train and
evaluate machine learning models and algorithms, aiming to accurately
recognize and classify emotions in speech.
"""
_NAME = 'speech-emotion-recognition-dataset'

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = "cc-by-nc-nd-4.0"

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


class SpeechEmotionRecognitionDataset(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(description=_DESCRIPTION,
                                    features=datasets.Features({
                                        'set_id': datasets.Value('string'),
                                        'euphoric': datasets.Audio(),
                                        'joyfully': datasets.Audio(),
                                        'sad': datasets.Audio(),
                                        'surprised': datasets.Audio(),
                                        'text': datasets.Value('string'),
                                        'gender': datasets.Value('string'),
                                        'age': datasets.Value('int8'),
                                        'country': datasets.Value('string')
                                    }),
                                    supervised_keys=None,
                                    homepage=_HOMEPAGE,
                                    citation=_CITATION,
                                    license=_LICENSE)

    def _split_generators(self, dl_manager):
        audio = dl_manager.download_and_extract(f"{_DATA}audio.zip")
        annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
        audio = dl_manager.iter_files(audio)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN,
                                    gen_kwargs={
                                        "audio": audio,
                                        'annotations': annotations
                                    }),
        ]

    def _generate_examples(self, audio, annotations):
        annotations_df = pd.read_csv(annotations, sep=';')
        audio = list(audio)
        audio = [audio[i:i + 4] for i in range(0, len(audio), 4)]

        for idx, set in enumerate(audio):
            for audio_file in set:
                if 'euphoric' in audio_file:
                    euphoric = audio_file
                elif 'joyfully' in audio_file:
                    joyfully = audio_file
                elif 'sad' in audio_file:
                    sad = audio_file
                elif 'surprised' in audio_file:
                    surprised = audio_file

                set_id = Path(set[0]).parent.name

            yield idx, {
                'set_id':
                    set_id,
                'euphoric':
                    euphoric,
                'joyfully':
                    joyfully,
                'sad':
                    sad,
                'surprised':
                    surprised,
                'text':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['text'].values[0],
                'gender':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['gender'].values[0],
                'age':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['age'].values[0],
                'country':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['country'].values[0]
            }