carlosdanielhernandezmena commited on
Commit
4ae7deb
1 Parent(s): 470b43a

Addiong audio files to the repo

Browse files
corpus/files/metadata_dev.tsv ADDED
The diff for this file is too large to render. See raw diff
 
corpus/files/metadata_test.tsv ADDED
The diff for this file is too large to render. See raw diff
 
corpus/files/metadata_train.tsv ADDED
The diff for this file is too large to render. See raw diff
 
corpus/files/tars_dev.paths ADDED
@@ -0,0 +1 @@
 
 
1
+ corpus/speech/dev.tar.gz
corpus/files/tars_test.paths ADDED
@@ -0,0 +1 @@
 
 
1
+ corpus/speech/test.tar.gz
corpus/files/tars_train.paths ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ corpus/speech/train/train_part_01.tar.gz
2
+ corpus/speech/train/train_part_02.tar.gz
corpus/speech/dev.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fb8743db0d67640b2d7e2c657d12e421e2964f1e16ad073c198b7bcef5e2f35
3
+ size 163656214
corpus/speech/test.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4ecff5d70e1834812fcbe47fe70ed983084e87bf1c743031041a880924e7e3d
3
+ size 662690894
corpus/speech/train/train_part_01.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ce05e9131dc7c8830e0f62518046b8945ecde545627635abeecd2a0d5d41113
3
+ size 2925678508
corpus/speech/train/train_part_02.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c7d633bf3cc0f30e1deec43fc93f57b572768022e0f69dcfe51dc3f1c7c3bd8
3
+ size 2834443742
malromur_asr.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ import os
3
+ import json
4
+ import csv
5
+
6
+ import datasets
7
+
8
+ _NAME="malromur_asr"
9
+ _VERSION="1.0.0"
10
+ _AUDIO_EXTENSIONS=".flac"
11
+
12
+ _DESCRIPTION = """
13
+ The Málrómur corpus is an open source corpus of Icelandic voice samples.
14
+ """
15
+
16
+ _CITATION = """
17
+ @inproceedings{steingrimsson2017malromur,
18
+ title={Málrómur: A manually verified corpus of recorded Icelandic speech},
19
+ author={Steingrímsson, Steinþór and Guðnason, Jón and Helgadóttir, Sigrún and Rögnvaldsson, Eiríkur},
20
+ booktitle={Proceedings of the 21st Nordic Conference on Computational Linguistics},
21
+ pages={237--240},
22
+ year={2017}
23
+ }
24
+ """
25
+
26
+ _HOMEPAGE = "https://clarin.is/en/resources/malromur/"
27
+
28
+ _LICENSE = "CC-BY-4.0, See https://creativecommons.org/licenses/by/4.0/"
29
+
30
+ _BASE_DATA_DIR = "corpus/"
31
+ _METADATA_TRAIN = os.path.join(_BASE_DATA_DIR,"files","metadata_train.tsv")
32
+ _METADATA_TEST = os.path.join(_BASE_DATA_DIR,"files", "metadata_test.tsv")
33
+ _METADATA_DEV = os.path.join(_BASE_DATA_DIR,"files", "metadata_dev.tsv")
34
+
35
+ _TARS_TRAIN = os.path.join(_BASE_DATA_DIR,"files","tars_train.paths")
36
+ _TARS_TEST = os.path.join(_BASE_DATA_DIR,"files", "tars_test.paths")
37
+ _TARS_DEV = os.path.join(_BASE_DATA_DIR,"files", "tars_dev.paths")
38
+
39
+ class MalromurAsrConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for The Málrómur Corpus"""
41
+
42
+ def __init__(self, name, **kwargs):
43
+ name=_NAME
44
+ super().__init__(name=name, **kwargs)
45
+
46
+ class MalromurAsr(datasets.GeneratorBasedBuilder):
47
+ """The Málrómur Corpus"""
48
+
49
+ VERSION = datasets.Version(_VERSION)
50
+ BUILDER_CONFIGS = [
51
+ MalromurAsrConfig(
52
+ name=_NAME,
53
+ version=datasets.Version(_VERSION),
54
+ )
55
+ ]
56
+
57
+ def _info(self):
58
+ features = datasets.Features(
59
+ {
60
+ "audio_id": datasets.Value("string"),
61
+ "audio": datasets.Audio(sampling_rate=16000),
62
+ "speaker_id": datasets.Value("string"),
63
+ "gender": datasets.Value("string"),
64
+ "age": datasets.Value("string"),
65
+ "duration": datasets.Value("float32"),
66
+ "normalized_text": datasets.Value("string"),
67
+ }
68
+ )
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=features,
72
+ homepage=_HOMEPAGE,
73
+ license=_LICENSE,
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+
79
+ metadata_train=dl_manager.download_and_extract(_METADATA_TRAIN)
80
+ metadata_test=dl_manager.download_and_extract(_METADATA_TEST)
81
+ metadata_dev=dl_manager.download_and_extract(_METADATA_DEV)
82
+
83
+ tars_train=dl_manager.download_and_extract(_TARS_TRAIN)
84
+ tars_test=dl_manager.download_and_extract(_TARS_TEST)
85
+ tars_dev=dl_manager.download_and_extract(_TARS_DEV)
86
+
87
+ hash_tar_files=defaultdict(dict)
88
+ with open(tars_train,'r') as f:
89
+ hash_tar_files['train']=[path.replace('\n','') for path in f]
90
+
91
+ with open(tars_test,'r') as f:
92
+ hash_tar_files['test']=[path.replace('\n','') for path in f]
93
+
94
+ with open(tars_dev,'r') as f:
95
+ hash_tar_files['dev']=[path.replace('\n','') for path in f]
96
+
97
+ hash_meta_paths={"train":metadata_train,"test":metadata_test,"dev":metadata_dev}
98
+ audio_paths = dl_manager.download(hash_tar_files)
99
+
100
+ splits=["train","dev","test"]
101
+ local_extracted_audio_paths = (
102
+ dl_manager.extract(audio_paths) if not dl_manager.is_streaming else
103
+ {
104
+ split:[None] * len(audio_paths[split]) for split in splits
105
+ }
106
+ )
107
+
108
+ return [
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={
112
+ "audio_archives":[dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
113
+ "local_extracted_archives_paths": local_extracted_audio_paths["train"],
114
+ "metadata_paths": hash_meta_paths["train"],
115
+ }
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.VALIDATION,
119
+ gen_kwargs={
120
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
121
+ "local_extracted_archives_paths": local_extracted_audio_paths["dev"],
122
+ "metadata_paths": hash_meta_paths["dev"],
123
+ }
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.TEST,
127
+ gen_kwargs={
128
+ "audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
129
+ "local_extracted_archives_paths": local_extracted_audio_paths["test"],
130
+ "metadata_paths": hash_meta_paths["test"],
131
+ }
132
+ ),
133
+ ]
134
+
135
+ def _generate_examples(self, audio_archives, local_extracted_archives_paths, metadata_paths):
136
+
137
+ features = ["speaker_id","gender","age","duration","normalized_text"]
138
+
139
+ with open(metadata_paths) as f:
140
+ metadata = {x["audio_id"]: x for x in csv.DictReader(f, delimiter="\t")}
141
+
142
+ for audio_archive, local_extracted_archive_path in zip(audio_archives, local_extracted_archives_paths):
143
+ for audio_filename, audio_file in audio_archive:
144
+ audio_id = audio_filename.split(os.sep)[-1].split(_AUDIO_EXTENSIONS)[0]
145
+ path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path else audio_filename
146
+
147
+ yield audio_id, {
148
+ "audio_id": audio_id,
149
+ **{feature: metadata[audio_id][feature] for feature in features},
150
+ "audio": {"path": path, "bytes": audio_file.read()},
151
+ }