cahya commited on
Commit
fbcaf70
1 Parent(s): b2c6f00

add the dataset

Browse files
Files changed (4) hide show
  1. languages.py +10 -0
  2. librivox-indonesia.py +157 -0
  3. librivox-indonesia.tgz +3 -0
  4. release_stats.py +18 -0
languages.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ LANGUAGES = {
2
+ 'ace': 'Acehnese',
3
+ 'bal': 'Balinese',
4
+ 'bug': 'Bugisnese',
5
+ 'id': 'Indonesian',
6
+ 'min': 'Minangkabau',
7
+ 'jav': 'Javanese',
8
+ 'sun': 'Sundanese',
9
+ 'all': 'All'
10
+ }
librivox-indonesia.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LibriVox-Indonesia Dataset"""
16
+
17
+
18
+ import csv
19
+ import os
20
+
21
+ import datasets
22
+ from datasets.utils.py_utils import size_str
23
+
24
+ from languages import LANGUAGES
25
+ from release_stats import STATS
26
+
27
+ _CITATION = """\
28
+ """
29
+
30
+ _HOMEPAGE = "https://huggingface.co/indonesian-nlp/librivox-indonesia"
31
+
32
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
33
+
34
+ _AUDIO_URL = "https://huggingface.co/datasets/cahya/librivox-indonesia/resolve/main/audio.tgz"
35
+
36
+
37
+ class LibriVoxIndonesiaConfig(datasets.BuilderConfig):
38
+ """BuilderConfig for LibriVoxIndonesia."""
39
+
40
+ def __init__(self, name, version, **kwargs):
41
+ self.language = kwargs.pop("language", None)
42
+ self.release_date = kwargs.pop("release_date", None)
43
+ self.num_clips = kwargs.pop("num_clips", None)
44
+ self.num_speakers = kwargs.pop("num_speakers", None)
45
+ self.validated_hr = kwargs.pop("validated_hr", None)
46
+ self.total_hr = kwargs.pop("total_hr", None)
47
+ self.size_bytes = kwargs.pop("size_bytes", None)
48
+ self.size_human = size_str(self.size_bytes)
49
+ description = (
50
+ f"LibriVox-Indonesia speech to text dataset in {self.language} released on {self.release_date}. "
51
+ f"The dataset comprises {self.validated_hr} hours of transcribed speech data"
52
+ )
53
+ super(LibriVoxIndonesiaConfig, self).__init__(
54
+ name=name,
55
+ version=datasets.Version(version),
56
+ description=description,
57
+ **kwargs,
58
+ )
59
+
60
+
61
+ class LibriVoxIndonesia(datasets.GeneratorBasedBuilder):
62
+ DEFAULT_CONFIG_NAME = "all"
63
+
64
+ BUILDER_CONFIGS = [
65
+ LibriVoxIndonesiaConfig(
66
+ name=lang,
67
+ version=STATS["version"],
68
+ language=LANGUAGES[lang],
69
+ release_date=STATS["date"],
70
+ num_clips=lang_stats["clips"],
71
+ num_speakers=lang_stats["users"],
72
+ total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
73
+ size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
74
+ )
75
+ for lang, lang_stats in STATS["locales"].items()
76
+ ]
77
+
78
+ def _info(self):
79
+ total_languages = len(STATS["locales"])
80
+ total_hours = self.config.total_hr
81
+ description = (
82
+ "LibriVox-Indonesia is a speech dataset generated from LibriVox with only languages from Indonesia."
83
+ f"The dataset currently consists of {total_hours} hours of speech "
84
+ f" in {total_languages} languages, but more voices and languages are always added."
85
+ )
86
+ features = datasets.Features(
87
+ {
88
+ "path": datasets.Value("string"),
89
+ "language": datasets.Value("string"),
90
+ "reader": datasets.Value("string"),
91
+ "sentence": datasets.Value("string"),
92
+ "audio": datasets.features.Audio(sampling_rate=48_000)
93
+ }
94
+ )
95
+
96
+ return datasets.DatasetInfo(
97
+ description=description,
98
+ features=features,
99
+ supervised_keys=None,
100
+ homepage=_HOMEPAGE,
101
+ license=_LICENSE,
102
+ citation=_CITATION,
103
+ version=self.config.version,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ """Returns SplitGenerators."""
108
+ dl_manager.download_config.ignore_url_params = True
109
+
110
+ archive_path = dl_manager.download(_AUDIO_URL)
111
+ local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
112
+ path_to_clips = "audio"
113
+
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ gen_kwargs={
118
+ "local_extracted_archive": local_extracted_archive,
119
+ "archive_iterator": dl_manager.iter_archive(archive_path),
120
+ "metadata_filepath": "audio_transcription.csv",
121
+ "path_to_clips": path_to_clips,
122
+ },
123
+ ),
124
+ ]
125
+
126
+ def _generate_examples(
127
+ self,
128
+ local_extracted_archive,
129
+ archive_iterator,
130
+ metadata_filepath,
131
+ path_to_clips,
132
+ ):
133
+ """Yields examples."""
134
+ data_fields = list(self._info().features.keys())
135
+ metadata = {}
136
+ filepath = local_extracted_archive + "/audio/audio_transcription.csv"
137
+ with open(filepath, "r") as f:
138
+ lines = (line for line in f)
139
+ utterances = csv.DictReader(lines)
140
+ for row in utterances:
141
+ if self.config.name == "all" or self.config.name == row["language"]:
142
+ row["path"] = os.path.join(path_to_clips, row["path"])
143
+ # if data is incomplete, fill with empty values
144
+ for field in data_fields:
145
+ if field not in row:
146
+ row[field] = ""
147
+ metadata[row["path"]] = row
148
+ for path, f in archive_iterator:
149
+ if path in metadata:
150
+ result = dict(metadata[path])
151
+ # set the audio feature and the path to the extracted file
152
+ path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
153
+ result["audio"] = {"path": path, "bytes": f.read()}
154
+ # set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
155
+ result["path"] = path if local_extracted_archive else None
156
+
157
+ yield path, result
librivox-indonesia.tgz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb20e579f5813b019c871064e10b0cc0781848283f90f243a29c2f6f7ffba1a3
3
+ size 321049598
release_stats.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ STATS = {
2
+ "name": "Librivox-Indonesia",
3
+ "bundleURLTemplate": "https://huggingface.co/datasets/cahya/test01/resolve/main/audio.tgz",
4
+ "version": "1.0.0",
5
+ "date": "",
6
+ "locales": {
7
+ "ace": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
8
+ "bal": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
9
+ "bug": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
10
+ "id": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
11
+ "min": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
12
+ "jav": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
13
+ "sun": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
14
+ "all": {'reportedSentences': 1, 'duration': 1, 'clips': 1, 'users': 416, 'size': 1, 'avgDurationSecs': 1, 'totalHrs': 1},
15
+ },
16
+ 'totalDuration': 1, 'totalHrs': 1
17
+ }
18
+