holylovenia commited on
Commit
48c5ff8
1 Parent(s): c0079b4

Add dataloader

Browse files
Files changed (1) hide show
  1. YueMotion.py +139 -0
YueMotion.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Common Voice Dataset"""
16
+
17
+ from datasets import AutomaticSpeechRecognition
18
+
19
+
20
+ import datasets
21
+ import os
22
+ import pandas as pd
23
+
24
+
25
+ _CITATION = """\
26
+ @misc{cahyawijaya2023crosslingual,
27
+ title={Cross-Lingual Cross-Age Group Adaptation for Low-Resource Elderly Speech Emotion Recognition},
28
+ author={Samuel Cahyawijaya and Holy Lovenia and Willy Chung and Rita Frieske and Zihan Liu and Pascale Fung},
29
+ year={2023},
30
+ eprint={2306.14517},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.CL}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ YueMotion is a Cantonese speech emotion dataset.
38
+ """
39
+
40
+ _HOMEPAGE = "https://huggingface.co/datasets/CAiRE/YueMotion"
41
+
42
+ _URL = "https://huggingface.co/datasets/CAiRE/YueMotion/raw/main/"
43
+ _URLS = {
44
+ "train": _URL + "train_metadata.csv",
45
+ "test": _URL + "test_metadata.csv",
46
+ "validation": _URL + "validation_metadata.csv",
47
+ "waves": "https://huggingface.co/datasets/CAiRE/YueMotion/resolve/main/data.tar.bz2",
48
+ }
49
+
50
+
51
+ class YueMotionConfig(datasets.BuilderConfig):
52
+ """BuilderConfig for YueMotion."""
53
+
54
+ def __init__(self, name="main", **kwargs):
55
+ """
56
+ Args:
57
+ **kwargs: keyword arguments forwarded to super.
58
+ """
59
+ super(YueMotionConfig, self).__init__(name, **kwargs)
60
+
61
+
62
+ class YueMotion(datasets.GeneratorBasedBuilder):
63
+ """YueMotion: Cantonese speech emotion recognition for both adults and elderly. Snapshot date: 28 June 2023."""
64
+
65
+ BUILDER_CONFIGS = [
66
+ YueMotionConfig(
67
+ name="main",
68
+ version=datasets.Version("1.0.0", ""),
69
+ description=_DESCRIPTION,
70
+ )
71
+ ]
72
+
73
+ def _info(self):
74
+ features = datasets.Features(
75
+ {
76
+ "split": datasets.Value("string"),
77
+ "speaker_id": datasets.Value("string"),
78
+ "path": datasets.Value("string"),
79
+ "audio": datasets.Audio(sampling_rate=16_000),
80
+ "gender": datasets.Value("string"),
81
+ "age": datasets.Value("int64"),
82
+ "sentence_id": datasets.Value("string"),
83
+ "label_id": datasets.Value("int64"),
84
+ "label": datasets.Value("string"),
85
+ }
86
+ )
87
+ return datasets.DatasetInfo(
88
+ description=_DESCRIPTION,
89
+ features=features,
90
+ supervised_keys=None,
91
+ homepage=_HOMEPAGE,
92
+ citation=_CITATION,
93
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="transcription")],
94
+ )
95
+
96
+ def _split_generators(self, dl_manager):
97
+ downloaded_files = dl_manager.download_and_extract(_URLS)
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ gen_kwargs={
103
+ "metadata_path": downloaded_files["train"],
104
+ "wave_path": downloaded_files["waves"],
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TEST,
109
+ gen_kwargs={
110
+ "metadata_path": downloaded_files["test"],
111
+ "wave_path": downloaded_files["waves"],
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION,
116
+ gen_kwargs={
117
+ "metadata_path": downloaded_files["validation"],
118
+ "wave_path": downloaded_files["waves"],
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, metadata_path, wave_path):
124
+ print(metadata_path)
125
+ metadata_df = pd.read_csv(metadata_path)
126
+
127
+ for index, row in metadata_df.iterrows():
128
+ example = {
129
+ "split": row["split"],
130
+ "speaker_id": row["speaker_id"],
131
+ "path": os.path.join(wave_path, row["file_name"]),
132
+ "audio": os.path.join(wave_path, row["file_name"]),
133
+ "gender": row["gender"],
134
+ "age": row["age"],
135
+ "sentence_id": row["sentence_id"],
136
+ "label_id": row["label_id"],
137
+ "label": row["label"],
138
+ }
139
+ yield index, example