Datasets:

Languages:
Vietnamese
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
Quentin Lhoest commited on
Commit
5da959c
1 Parent(s): 0f5b577

Release: 1.18.1

Browse files

Commit from https://github.com/huggingface/datasets/commit/218e496519ff14b4bc69ea559616af6f2ef89e57

Files changed (1) hide show
  1. vivos.py +138 -138
vivos.py CHANGED
@@ -1,138 +1,138 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import datasets
17
-
18
-
19
- # Find for instance the citation on arxiv or on the dataset repo/website
20
- _CITATION = """\
21
- @InProceedings{vivos:2016,
22
- Address = {Ho Chi Minh, Vietnam}
23
- title = {VIVOS: 15 hours of recording speech prepared for Vietnamese Automatic Speech Recognition},
24
- author={Prof. Vu Hai Quan},
25
- year={2016}
26
- }
27
- """
28
-
29
- _DESCRIPTION = """\
30
- VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
31
- Vietnamese Automatic Speech Recognition task.
32
- The corpus was prepared by AILAB, a computer science lab of VNUHCM - University of Science, with Prof. Vu Hai Quan is the head of.
33
- We publish this corpus in hope to attract more scientists to solve Vietnamese speech recognition problems.
34
- """
35
-
36
- _HOMEPAGE = "https://ailab.hcmus.edu.vn/vivos"
37
-
38
- _LICENSE = "cc-by-sa-4.0"
39
-
40
- _DATA_URL = "https://ailab.hcmus.edu.vn/assets/vivos.tar.gz"
41
-
42
- _PROMPTS_URLS = {
43
- "train": "https://s3.amazonaws.com/datasets.huggingface.co/vivos/train/prompts.txt",
44
- "test": "https://s3.amazonaws.com/datasets.huggingface.co/vivos/test/prompts.txt",
45
- }
46
-
47
-
48
- class VivosDataset(datasets.GeneratorBasedBuilder):
49
- """VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
50
- Vietnamese Automatic Speech Recognition task."""
51
-
52
- VERSION = datasets.Version("1.1.0")
53
-
54
- # This is an example of a dataset with multiple configurations.
55
- # If you don't want/need to define several sub-sets in your dataset,
56
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
57
-
58
- # If you need to make complex sub-parts in the datasets with configurable options
59
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
60
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
61
-
62
- def _info(self):
63
- return datasets.DatasetInfo(
64
- # This is the description that will appear on the datasets page.
65
- description=_DESCRIPTION,
66
- features=datasets.Features(
67
- {
68
- "speaker_id": datasets.Value("string"),
69
- "path": datasets.Value("string"),
70
- "audio": datasets.Audio(sampling_rate=16_000),
71
- "sentence": datasets.Value("string"),
72
- }
73
- ),
74
- supervised_keys=None,
75
- homepage=_HOMEPAGE,
76
- license=_LICENSE,
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
- """Returns SplitGenerators."""
82
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
83
-
84
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
85
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
86
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
87
- prompts_paths = dl_manager.download(_PROMPTS_URLS)
88
- archive = dl_manager.download(_DATA_URL)
89
- train_dir = "vivos/train"
90
- test_dir = "vivos/test"
91
-
92
- return [
93
- datasets.SplitGenerator(
94
- name=datasets.Split.TRAIN,
95
- # These kwargs will be passed to _generate_examples
96
- gen_kwargs={
97
- "prompts_path": prompts_paths["train"],
98
- "path_to_clips": train_dir + "/waves",
99
- "audio_files": dl_manager.iter_archive(archive),
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name=datasets.Split.TEST,
104
- # These kwargs will be passed to _generate_examples
105
- gen_kwargs={
106
- "prompts_path": prompts_paths["test"],
107
- "path_to_clips": test_dir + "/waves",
108
- "audio_files": dl_manager.iter_archive(archive),
109
- },
110
- ),
111
- ]
112
-
113
- def _generate_examples(self, prompts_path, path_to_clips, audio_files):
114
- """Yields examples as (key, example) tuples."""
115
- # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
116
- # The `key` is here for legacy reason (tfds) and is not important in itself.
117
- examples = {}
118
- with open(prompts_path, encoding="utf-8") as f:
119
- for row in f:
120
- data = row.strip().split(" ", 1)
121
- speaker_id = data[0].split("_")[0]
122
- audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
123
- examples[audio_path] = {
124
- "speaker_id": speaker_id,
125
- "path": audio_path,
126
- "sentence": data[1],
127
- }
128
- inside_clips_dir = False
129
- id_ = 0
130
- for path, f in audio_files:
131
- if path.startswith(path_to_clips):
132
- inside_clips_dir = True
133
- if path in examples:
134
- audio = {"path": path, "bytes": f.read()}
135
- yield id_, {**examples[path], "audio": audio}
136
- id_ += 1
137
- elif inside_clips_dir:
138
- break
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import datasets
17
+
18
+
19
+ # Find for instance the citation on arxiv or on the dataset repo/website
20
+ _CITATION = """\
21
+ @InProceedings{vivos:2016,
22
+ Address = {Ho Chi Minh, Vietnam}
23
+ title = {VIVOS: 15 hours of recording speech prepared for Vietnamese Automatic Speech Recognition},
24
+ author={Prof. Vu Hai Quan},
25
+ year={2016}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
31
+ Vietnamese Automatic Speech Recognition task.
32
+ The corpus was prepared by AILAB, a computer science lab of VNUHCM - University of Science, with Prof. Vu Hai Quan is the head of.
33
+ We publish this corpus in hope to attract more scientists to solve Vietnamese speech recognition problems.
34
+ """
35
+
36
+ _HOMEPAGE = "https://ailab.hcmus.edu.vn/vivos"
37
+
38
+ _LICENSE = "cc-by-sa-4.0"
39
+
40
+ _DATA_URL = "https://ailab.hcmus.edu.vn/assets/vivos.tar.gz"
41
+
42
+ _PROMPTS_URLS = {
43
+ "train": "https://s3.amazonaws.com/datasets.huggingface.co/vivos/train/prompts.txt",
44
+ "test": "https://s3.amazonaws.com/datasets.huggingface.co/vivos/test/prompts.txt",
45
+ }
46
+
47
+
48
+ class VivosDataset(datasets.GeneratorBasedBuilder):
49
+ """VIVOS is a free Vietnamese speech corpus consisting of 15 hours of recording speech prepared for
50
+ Vietnamese Automatic Speech Recognition task."""
51
+
52
+ VERSION = datasets.Version("1.1.0")
53
+
54
+ # This is an example of a dataset with multiple configurations.
55
+ # If you don't want/need to define several sub-sets in your dataset,
56
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
57
+
58
+ # If you need to make complex sub-parts in the datasets with configurable options
59
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
60
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
61
+
62
+ def _info(self):
63
+ return datasets.DatasetInfo(
64
+ # This is the description that will appear on the datasets page.
65
+ description=_DESCRIPTION,
66
+ features=datasets.Features(
67
+ {
68
+ "speaker_id": datasets.Value("string"),
69
+ "path": datasets.Value("string"),
70
+ "audio": datasets.Audio(sampling_rate=16_000),
71
+ "sentence": datasets.Value("string"),
72
+ }
73
+ ),
74
+ supervised_keys=None,
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+ """Returns SplitGenerators."""
82
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
83
+
84
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
85
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
86
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
87
+ prompts_paths = dl_manager.download(_PROMPTS_URLS)
88
+ archive = dl_manager.download(_DATA_URL)
89
+ train_dir = "vivos/train"
90
+ test_dir = "vivos/test"
91
+
92
+ return [
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TRAIN,
95
+ # These kwargs will be passed to _generate_examples
96
+ gen_kwargs={
97
+ "prompts_path": prompts_paths["train"],
98
+ "path_to_clips": train_dir + "/waves",
99
+ "audio_files": dl_manager.iter_archive(archive),
100
+ },
101
+ ),
102
+ datasets.SplitGenerator(
103
+ name=datasets.Split.TEST,
104
+ # These kwargs will be passed to _generate_examples
105
+ gen_kwargs={
106
+ "prompts_path": prompts_paths["test"],
107
+ "path_to_clips": test_dir + "/waves",
108
+ "audio_files": dl_manager.iter_archive(archive),
109
+ },
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, prompts_path, path_to_clips, audio_files):
114
+ """Yields examples as (key, example) tuples."""
115
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
116
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
117
+ examples = {}
118
+ with open(prompts_path, encoding="utf-8") as f:
119
+ for row in f:
120
+ data = row.strip().split(" ", 1)
121
+ speaker_id = data[0].split("_")[0]
122
+ audio_path = "/".join([path_to_clips, speaker_id, data[0] + ".wav"])
123
+ examples[audio_path] = {
124
+ "speaker_id": speaker_id,
125
+ "path": audio_path,
126
+ "sentence": data[1],
127
+ }
128
+ inside_clips_dir = False
129
+ id_ = 0
130
+ for path, f in audio_files:
131
+ if path.startswith(path_to_clips):
132
+ inside_clips_dir = True
133
+ if path in examples:
134
+ audio = {"path": path, "bytes": f.read()}
135
+ yield id_, {**examples[path], "audio": audio}
136
+ id_ += 1
137
+ elif inside_clips_dir:
138
+ break