|
from pathlib import Path |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {speech-emotion-recognition-dataset}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The audio dataset consists of a collection of texts spoken with four distinct |
|
emotions. These texts are spoken in English and represent four different |
|
emotional states: **euphoria, joy, sadness and surprise**. |
|
Each audio clip captures the tone, intonation, and nuances of speech as |
|
individuals convey their emotions through their voice. |
|
The dataset includes a diverse range of speakers, ensuring variability in age, |
|
gender, and cultural backgrounds*, allowing for a more comprehensive |
|
representation of the emotional spectrum. |
|
The dataset is labeled and organized based on the emotion expressed in each |
|
audio sample, making it a valuable resource for emotion recognition and |
|
analysis. Researchers and developers can utilize this dataset to train and |
|
evaluate machine learning models and algorithms, aiming to accurately |
|
recognize and classify emotions in speech. |
|
""" |
|
_NAME = 'speech-emotion-recognition-dataset' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "cc-by-nc-nd-4.0" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class SpeechEmotionRecognitionDataset(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo(description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
'set_id': datasets.Value('string'), |
|
'euphoric': datasets.Audio(), |
|
'joyfully': datasets.Audio(), |
|
'sad': datasets.Audio(), |
|
'surprised': datasets.Audio(), |
|
'text': datasets.Value('string'), |
|
'gender': datasets.Value('string'), |
|
'age': datasets.Value('int8'), |
|
'country': datasets.Value('string') |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE) |
|
|
|
def _split_generators(self, dl_manager): |
|
audio = dl_manager.download_and_extract(f"{_DATA}audio.zip") |
|
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") |
|
audio = dl_manager.iter_files(audio) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"audio": audio, |
|
'annotations': annotations |
|
}), |
|
] |
|
|
|
def _generate_examples(self, audio, annotations): |
|
annotations_df = pd.read_csv(annotations, sep=';') |
|
audio = list(audio) |
|
audio = [audio[i:i + 4] for i in range(0, len(audio), 4)] |
|
|
|
for idx, set in enumerate(audio): |
|
for audio_file in set: |
|
if 'euphoric' in audio_file: |
|
euphoric = audio_file |
|
elif 'joyfully' in audio_file: |
|
joyfully = audio_file |
|
elif 'sad' in audio_file: |
|
sad = audio_file |
|
elif 'surprised' in audio_file: |
|
surprised = audio_file |
|
|
|
set_id = Path(set[0]).parent.name |
|
|
|
yield idx, { |
|
'set_id': |
|
set_id, |
|
'euphoric': |
|
euphoric, |
|
'joyfully': |
|
joyfully, |
|
'sad': |
|
sad, |
|
'surprised': |
|
surprised, |
|
'text': |
|
annotations_df.loc[annotations_df['set_id'] == set_id] |
|
['text'].values[0], |
|
'gender': |
|
annotations_df.loc[annotations_df['set_id'] == set_id] |
|
['gender'].values[0], |
|
'age': |
|
annotations_df.loc[annotations_df['set_id'] == set_id] |
|
['age'].values[0], |
|
'country': |
|
annotations_df.loc[annotations_df['set_id'] == set_id] |
|
['country'].values[0] |
|
} |
|
|