dash8x commited on
Commit
c09a002
1 Parent(s): a8f2c54

Added files

Browse files
data/audio_test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502cfbe0ef809e0ab6d84a452874d048f50f4b64d8c1ad101949e877e75a2e77
3
+ size 72286971
data/audio_train.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4653dde51b2bfb30614b6cc8d8de57d594ca7e6f180d5b2ce43e1cb99263ef48
3
+ size 864461315
data/audio_validation.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:122dec91cd6815eab7ba38fa84d4ba1270a2a8ae8f110d1e7de63dc143782e78
3
+ size 106287604
data/metadata_test.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96c9cef76e8f4962946126c86eb7a24a9d86bb472cab87a14f7d2eb4bac54a77
3
+ size 7929
data/metadata_train.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcf699110c7c67f813fa2c6cd21001d22e20b535b8f29633c7da24a9974b498b
3
+ size 63952
data/metadata_validation.csv.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:310eff5a63d8429eeebf0415070207a1ec5b19b0744755ccbcc8fe2845e19687
3
+ size 9438
dv-presidential-speech.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import csv
16
+ import os
17
+ import datasets
18
+
19
+
20
+ _CITATION = """\
21
+ @misc{Sofwath_2023,
22
+ title = "Dhivehi Presidential Speech Dataset",
23
+ url = "https://huggingface.co/datasets/dash8x/presidential_speech",
24
+ journal = "Hugging Face",
25
+ author = "Sofwath",
26
+ year = "2018",
27
+ month = jul
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ Dhivehi Presidential Speech is a Dhivehi speech dataset created from data extracted and
33
+ processed by [Sofwath](https://github.com/Sofwath) as part of a collection of Dhivehi
34
+ datasets found [here](https://github.com/Sofwath/DhivehiDatasets).
35
+
36
+ The dataset contains around 2.5 hrs (1 GB) of speech collected from Maldives President's Office
37
+ consisting of 7 speeches given by President Yaameen Abdhul Gayyoom.
38
+ """
39
+
40
+ _HOMEPAGE = 'https://github.com/Sofwath/DhivehiDatasets'
41
+
42
+ _LICENSE = 'CC BY-NC-SA 4.0'
43
+
44
+ # Source data: 'https://drive.google.com/file/d/1vhMXoB2L23i4HfAGX7EYa4L-sfE4ThU5/view?usp=sharing'
45
+ _DATA_URL = 'data'
46
+
47
+ _PROMPTS_URLS = {
48
+ 'train': 'data/metadata_train.tsv.gz',
49
+ 'test': 'data/metadata_test.tsv.gz',
50
+ 'validation': 'data/metadata_validation.tsv.gz',
51
+ }
52
+
53
+
54
+ class DhivehiPresidentialSpeech(datasets.GeneratorBasedBuilder):
55
+ """Dhivehi Presidential Speech is a free Dhivehi speech corpus consisting of around 2.5 hours of
56
+ recorded speech prepared for Dhivehi Automatic Speech Recognition task."""
57
+
58
+ VERSION = datasets.Version('1.0.0')
59
+
60
+ # This is an example of a dataset with multiple configurations.
61
+ # If you don't want/need to define several sub-sets in your dataset,
62
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
63
+
64
+ # If you need to make complex sub-parts in the datasets with configurable options
65
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
66
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
67
+
68
+ def _info(self):
69
+ return datasets.DatasetInfo(
70
+ # This is the description that will appear on the datasets page.
71
+ description=_DESCRIPTION,
72
+ features=datasets.Features(
73
+ {
74
+ 'path': datasets.Value('string'),
75
+ 'audio': datasets.Audio(sampling_rate=16_000),
76
+ 'sentence': datasets.Value('string'),
77
+ }
78
+ ),
79
+ supervised_keys=None,
80
+ homepage=_HOMEPAGE,
81
+ license=_LICENSE,
82
+ citation=_CITATION,
83
+ )
84
+
85
+ def _split_generators(self, dl_manager):
86
+ """Returns SplitGenerators."""
87
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
88
+
89
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
90
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
91
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
92
+ dl_manager.download_config.ignore_url_params = True
93
+ audio_path = {}
94
+ local_extracted_archive = {}
95
+ metadata_path = {}
96
+
97
+ split_type = {
98
+ 'train': datasets.Split.TRAIN,
99
+ 'test': datasets.Split.TEST,
100
+ 'validation': datasets.Split.VALIDATION,
101
+ }
102
+
103
+ for split in split_type:
104
+ audio_path[split] = dl_manager.download(f'{_DATA_URL}/audio_{split}.tar.gz')
105
+ local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
106
+ metadata_path[split] = dl_manager.download_and_extract(f'{_DATA_URL}/metadata_{split}.csv.gz')
107
+
108
+ path_to_clips = 'dv-presidential-speech'
109
+
110
+ return [
111
+ datasets.SplitGenerator(
112
+ name=split_type[split],
113
+ gen_kwargs={
114
+ 'local_extracted_archive': local_extracted_archive[split],
115
+ 'audio_files': dl_manager.iter_archive(audio_path[split]),
116
+ 'metadata_path': dl_manager.download_and_extract(metadata_path[split]),
117
+ 'path_to_clips': f'{path_to_clips}-{split}/waves',
118
+ },
119
+ ) for split in split_type
120
+ ]
121
+
122
+ def _generate_examples(
123
+ self,
124
+ local_extracted_archive,
125
+ audio_files,
126
+ metadata_path,
127
+ path_to_clips,
128
+ ):
129
+ """Yields examples."""
130
+ data_fields = list(self._info().features.keys())
131
+ metadata = {}
132
+ with open(metadata_path, 'r', encoding='utf-8') as f:
133
+ reader = csv.reader(f)
134
+ row_dict = {}
135
+
136
+ for row in reader:
137
+ row_dict['path'] = row[0]
138
+ row_dict['sentence'] = row[1]
139
+
140
+ # if data is incomplete, fill with empty values
141
+ for field in data_fields:
142
+ if field not in row_dict:
143
+ row_dict[field] = ''
144
+
145
+ metadata[row_dict['path']] = row_dict
146
+
147
+ id_ = 0
148
+ for path, f in audio_files:
149
+ file_name = os.path.splitext(os.path.basename(path))[0]
150
+ os.path.join(path_to_clips, row[0])
151
+
152
+ if file_name in metadata:
153
+ result = dict(metadata[file_name])
154
+ # set the audio feature and the path to the extracted file
155
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
156
+ result['audio'] = {'path': path, 'bytes': f.read()}
157
+ result['path'] = path
158
+ yield id_, result
159
+ id_ += 1