File size: 4,674 Bytes
a356589
 
88677fe
a356589
 
88677fe
 
 
 
 
a356589
88677fe
 
 
 
 
 
a356589
 
 
 
 
 
 
 
 
 
 
 
 
88677fe
a356589
88677fe
 
 
 
 
 
 
 
a356589
88677fe
 
a356589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88677fe
 
a356589
88677fe
a356589
88677fe
 
 
a356589
88677fe
 
 
 
a356589
 
9e51e19
a356589
 
 
 
 
 
 
 
 
 
 
 
 
 
88677fe
 
a356589
 
 
9e51e19
a356589
9e51e19
a356589
9e51e19
a356589
9e51e19
a356589
 
 
 
 
 
 
 
 
 
 
 
88677fe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from pathlib import Path

import datasets
import numpy as np
import pandas as pd
import PIL.Image
import PIL.ImageOps

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {speech-emotion-recognition-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The audio dataset consists of a collection of texts spoken with four distinct
emotions. These texts are spoken in English and represent four different
emotional states: **euphoria, joy, sadness and surprise**.
Each audio clip captures the tone, intonation, and nuances of speech as
individuals convey their emotions through their voice.
The dataset includes a diverse range of speakers, ensuring variability in age,
gender, and cultural backgrounds*, allowing for a more comprehensive
representation of the emotional spectrum.
The dataset is labeled and organized based on the emotion expressed in each
audio sample, making it a valuable resource for emotion recognition and
analysis. Researchers and developers can utilize this dataset to train and
evaluate machine learning models and algorithms, aiming to accurately
recognize and classify emotions in speech.
"""
_NAME = 'speech-emotion-recognition-dataset'

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = "cc-by-nc-nd-4.0"

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


class SpeechEmotionRecognitionDataset(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(description=_DESCRIPTION,
                                    features=datasets.Features({
                                        'set_id': datasets.Value('string'),
                                        'euphoric': datasets.Audio(),
                                        'joyfully': datasets.Audio(),
                                        'sad': datasets.Audio(),
                                        'surprised': datasets.Audio(),
                                        'text': datasets.Value('string'),
                                        'gender': datasets.Value('string'),
                                        'age': datasets.Value('int8'),
                                        'country': datasets.Value('string')
                                    }),
                                    supervised_keys=None,
                                    homepage=_HOMEPAGE,
                                    citation=_CITATION,
                                    license=_LICENSE)

    def _split_generators(self, dl_manager):
        audio = dl_manager.download_and_extract(f"{_DATA}audio.zip")
        annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
        # audio = dl_manager.iter_files(audio)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN,
                                    gen_kwargs={
                                        "audio": audio,
                                        'annotations': annotations
                                    }),
        ]

    def _generate_examples(self, audio, annotations):
        annotations_df = pd.read_csv(annotations, sep=';')
        audio = Path(audio).iterdir()

        for idx, sub_dir in enumerate(audio):
            sub_dir = Path(sub_dir)
            set_id = sub_dir.name

            for audio_file in sub_dir.iterdir():
                if audio_file.name.startswith('euphoric'):
                    euphoric = audio_file
                elif audio_file.name.startswith('joyfully'):
                    joyfully = audio_file
                elif audio_file.name.startswith('sad'):
                    sad = audio_file
                elif audio_file.name.startswith('surprised'):
                    surprised = audio_file

            yield idx, {
                'set_id':
                    set_id,
                'euphoric':
                    str(euphoric),
                'joyfully':
                    str(joyfully),
                'sad':
                    str(sad),
                'surprised':
                    str(surprised),
                'text':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['text'].values[0],
                'gender':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['gender'].values[0],
                'age':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['age'].values[0],
                'country':
                    annotations_df.loc[annotations_df['set_id'] == set_id]
                    ['country'].values[0]
            }