File size: 2,990 Bytes
745f9e9 078cc46 e6461a4 078cc46 ded9e91 3ec0f8e 078cc46 e6461a4 6a03b12 078cc46 e6461a4 078cc46 04c13ec 40b2458 078cc46 9f68c6f 04c13ec 745f9e9 e5f2aa4 078cc46 40b2458 33bc90b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import csv
import datasets
_DESCRIPTION = """\
Dusha is a bi-modal corpus suitable for speech emotion recognition (SER) tasks.
The dataset consists of audio recordings with Russian speech and their emotional labels.
The corpus contains approximately 350 hours of data. Four basic emotions that usually appear in a dialog with
a virtual assistant were selected: Happiness (Positive), Sadness, Anger and Neutral emotion.
"""
_HOMEPAGE = "https://github.com/salute-developers/golos/tree/master/dusha#dusha-dataset"
_DATA_URL_TRAIN = "https://huggingface.co/datasets/KELONMYOSA/dusha_emotion_audio/resolve/main/data/train.tar.gz"
_DATA_URL_TEST = "https://huggingface.co/datasets/KELONMYOSA/dusha_emotion_audio/resolve/main/data/test.tar.gz"
_METADATA_URL_TRAIN = "https://huggingface.co/datasets/KELONMYOSA/dusha_emotion_audio/resolve/main/data/train.csv"
_METADATA_URL_TEST = "https://huggingface.co/datasets/KELONMYOSA/dusha_emotion_audio/resolve/main/data/test.csv"
class Dusha(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 256
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"label": datasets.ClassLabel(num_classes=5, names=['neutral', 'positive', 'sad', 'angry', 'other']),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
)
def _split_generators(self, dl_manager):
metadata_train = dl_manager.download(_METADATA_URL_TRAIN)
metadata_test = dl_manager.download(_METADATA_URL_TEST)
archive_train = dl_manager.download(_DATA_URL_TRAIN)
archive_test = dl_manager.download(_DATA_URL_TEST)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"audio_files": dl_manager.iter_archive(archive_train),
"metadata": metadata_train},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_files": dl_manager.iter_archive(archive_test),
"metadata": metadata_test},
)
]
def _generate_examples(self, audio_files, metadata):
examples = dict()
with open(metadata, encoding="utf-8") as f:
csv_reader = csv.reader(f, delimiter=",")
next(csv_reader)
for row in csv_reader:
audio_path, label = row
examples[audio_path] = {
"file": audio_path,
"label": label,
}
key = 0
for path, f in audio_files:
if path in examples:
audio = {"path": path, "bytes": f.read()}
yield key, {**examples[path], "audio": audio}
key += 1
|