Emilio Marinone commited on
Commit
0a5f8f6
1 Parent(s): 5101556

add loading script, dataset info

Browse files
Files changed (3) hide show
  1. .gitignore +2 -0
  2. data/train-00000-of-00001.parquet +0 -3
  3. nst_sv.py +240 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *venv/
2
+ create_dummy_datasets_for_preview.py
data/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:819e266d6630bc5159deac488172424889507700953b1d2e37cd471d4c43574e
3
- size 74363964
 
 
 
 
nst_sv.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """ Load swedish NST dataset provided by National Library of Norway | Språkbanken.
17
+
18
+ Documentation with full description of the data: https://www.nb.no/sbfil/talegjenkjenning/16kHz_2020/se_2020/se-16khz_reorganized.pdf
19
+
20
+ TODO:
21
+ * add multi channel option
22
+ * add train-validation-test split option
23
+ """
24
+
25
+ import csv
26
+ import json
27
+ import os
28
+
29
+ import datasets
30
+
31
+
32
+ _DESCRIPTION = """\
33
+ This database was created by Nordic Language Technology for the development
34
+ of automatic speech recognition and dictation in Swedish.
35
+ In this updated version, the organization of the data have been altered to improve the usefulness of the database.
36
+ In the original version of the material,
37
+ the files were organized in a specific folder structure where the folder names were meaningful.
38
+ However, the file names were not meaningful, and there were also cases of files with identical names in different folders.
39
+ This proved to be impractical, since users had to keep the original folder structure in order to use the data.
40
+ The files have been renamed, such that the file names are unique and meaningful regardless of the folder structure.
41
+ The original metadata files were in spl format. These have been converted to JSON format.
42
+ The converted metadata files are also anonymized and the text encoding has been converted from ANSI to UTF-8.
43
+ See the documentation file for a full description of the data and the changes made to the database."""
44
+
45
+ _HOMEPAGE = "https://www.nb.no/sprakbanken/en/resource-catalogue/oai-nb-no-sbr-56/"
46
+
47
+ _LICENSE = "CC0 1.0"
48
+
49
+ # TODO: Add link to the official dataset URLs here
50
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
51
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
52
+ _URLS = {
53
+ "close_channel": "https://www.nb.no/sbfil/talegjenkjenning/16kHz_2020/se_2020/lydfiler_16_1.tar.gz",
54
+ "distant_channel": "https://www.nb.no/sbfil/talegjenkjenning/16kHz_2020/se_2020/lydfiler_16_2.tar.gz",
55
+ # TODO: add handling of multi channel
56
+ # "multi_channel": "https://www.nb.no/sbfil/talegjenkjenning/16kHz_2020/se_2020/lydfiler_16_begge.tar.gz",
57
+ }
58
+
59
+ _ANNOTATIONS_URL = "https://www.nb.no/sbfil/talegjenkjenning/16kHz_2020/se_2020/ADB_SWE_0467.tar.gz"
60
+
61
+
62
+ class NstSV(datasets.GeneratorBasedBuilder):
63
+ """Audio dataset for Swedish ASR provided by National Library of Norawy.
64
+
65
+ Originally, recordings have been made on two channels: a close one and a distant one.
66
+ Channels have been separated and can be loaded independently.
67
+
68
+ TODO: enable and validate multi_channel
69
+ Two configurations available:
70
+ - close_channel
71
+ - distant_channel
72
+
73
+ Main data and metadata available:
74
+ - audio file (bytes)
75
+ - manually annotated transcription (str)
76
+ - age (str)
77
+ - gender (str)
78
+ - region of birth (str)
79
+ - region of youth (str)
80
+ - recording session info (object)
81
+ - recording system (object)
82
+ - "type" of recording (see detailed documentatin)
83
+ - common_voice-like structured information
84
+ (info mentioned above with object structure like common voice dataset for ease of merging)
85
+
86
+ """
87
+
88
+ VERSION = datasets.Version("1.1.0")
89
+
90
+ # This is an example of a dataset with multiple configurations.
91
+ # If you don't want/need to define several sub-sets in your dataset,
92
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
93
+
94
+ # If you need to make complex sub-parts in the datasets with configurable options
95
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
96
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
97
+
98
+ # You will be able to load one or the other configurations in the following list with
99
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
100
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
101
+ BUILDER_CONFIGS = [
102
+ datasets.BuilderConfig(name="close_channel", version=VERSION, description="Close channel recordings"),
103
+ datasets.BuilderConfig(name="distant_channel", version=VERSION, description="Distant channel recordings"),
104
+ ]
105
+
106
+ DEFAULT_CONFIG_NAME = "close_channel" # It's not mandatory to have a default configuration. Just use one if it make sense.
107
+
108
+ def _info(self):
109
+ features_dict = {
110
+ "info": dict,
111
+ "metadata": dict,
112
+ "pid": datasets.Value("string"),
113
+ "session": dict,
114
+ "system": dict,
115
+ "val_recordings": list,
116
+ "audio": datasets.features.Audio(sampling_rate=16000),
117
+ 'client_id': datasets.Value("string"),
118
+ 'path': datasets.Value("string"),
119
+ 'sentence': datasets.Value("string"),
120
+ 'up_votes': datasets.Value("int32"),
121
+ 'down_votes': datasets.Value("int32"),
122
+ 'age': datasets.Value("string"),
123
+ 'gender': datasets.Value("string"),
124
+ 'accent': datasets.Value("string"),
125
+ 'locale': datasets.Value("string"),
126
+ 'segment': datasets.Value("string"),
127
+ 'channel': datasets.Value("string")
128
+ }
129
+ return datasets.DatasetInfo(
130
+ # This is the description that will appear on the datasets page.
131
+ description=_DESCRIPTION,
132
+ # This defines the different columns of the dataset and their types
133
+ features=datasets.Features(features_dict),
134
+ # Here we define them above because they are different between the two configurations
135
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
136
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
137
+ # supervised_keys=("sentence", "label"),
138
+ # Homepage of the dataset for documentation
139
+ homepage=_HOMEPAGE,
140
+ # License for the dataset if available
141
+ license=_LICENSE,
142
+ )
143
+
144
+ def _split_generators(self, dl_manager):
145
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
146
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
147
+
148
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
149
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
150
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
151
+ urls = _URLS[self.config.name]
152
+ data_dir = dl_manager.download_and_extract(urls)
153
+ annotations_dir = dl_manager.download_and_extract(_ANNOTATIONS_URL)
154
+
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name="all",
158
+ # These kwargs will be passed to _generate_examples
159
+ gen_kwargs={
160
+ "data_dir": data_dir,
161
+ "annotations_dir": annotations_dir
162
+ },
163
+ ),
164
+ # TODO: add split handling
165
+ # datasets.SplitGenerator(
166
+ # name=datasets.Split.TRAIN,
167
+ # # These kwargs will be passed to _generate_examples
168
+ # gen_kwargs={
169
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
170
+ # "split": "test"
171
+ # },
172
+ # ),
173
+ # datasets.SplitGenerator(
174
+ # name=datasets.Split.TEST,
175
+ # # These kwargs will be passed to _generate_examples
176
+ # gen_kwargs={
177
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
178
+ # "split": "test"
179
+ # },
180
+ # ),
181
+ # datasets.SplitGenerator(
182
+ # name=datasets.Split.VALIDATION,
183
+ # # These kwargs will be passed to _generate_examples
184
+ # gen_kwargs={
185
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
186
+ # "split": "dev",
187
+ # },
188
+ # ),
189
+ ]
190
+
191
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
192
+ def _generate_examples(self, data_dir, annotations_dir):
193
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
194
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
195
+
196
+ if self.config.name == "close_channel":
197
+ channel_ext = "-1"
198
+ else:
199
+ channel_ext = "-2"
200
+
201
+ for annotation_filename in os.listdir(annotations_dir):
202
+
203
+ annotations_filepath = os.path.join(annotations_dir, annotation_filename)
204
+ with open(annotations_filepath, "r") as f:
205
+ annotation = json.load(f)
206
+
207
+ for recording in annotation["val_recordings"]:
208
+ # channel_ext in can either be "-1" "-2"
209
+ # so if file is "123456.wav"
210
+ # close channel file is "123456-1.wav"
211
+ # distant channel file is "123456-2.wav"
212
+ rel_filepath = f'se/{annotation["pid"]}/{annotation["pid"]}_{recording["file"]}'.replace(".wav", f"{channel_ext}.wav")
213
+ audio_filepath = f"{data_dir}/{rel_filepath}"
214
+ if os.path.exists(audio_filepath):
215
+ with open(audio_filepath, "rb") as f:
216
+ audio_bytes = f.read()
217
+ result = {
218
+ "info": annotation["info"],
219
+ "metadata": annotation["metadata"],
220
+ "pid": annotation["pid"],
221
+ "session": annotation["session"],
222
+ "system": annotation["system"],
223
+ "val_recordings": annotation["val_recordings"],
224
+ "client_id": annotation["info"]["Speaker_ID"],
225
+ 'path': rel_filepath,
226
+ 'audio': {"path": rel_filepath, "bytes": audio_bytes},
227
+ 'sentence': recording["text"],
228
+ 'up_votes': 0,
229
+ 'down_votes': 0,
230
+ 'age': annotation["info"]["Age"],
231
+ 'gender': annotation["info"]["Sex"],
232
+ 'accent': "",
233
+ 'locale': "sv",
234
+ 'segment': ""
235
+ }
236
+
237
+ yield rel_filepath, result
238
+
239
+
240
+