Seosnaps commited on
Commit
f69bf6a
1 Parent(s): d923ee8

Create common_voice_16_0.py

Browse files
Files changed (1) hide show
  1. common_voice_16_0.py +158 -0
common_voice_16_0.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ import json
4
+
5
+ import datasets
6
+ from datasets.utils.py_utils import size_str
7
+ from tqdm import tqdm
8
+
9
+
10
+ _CITATION = """\
11
+ @inproceedings{commonvoice:2020,
12
+ author = {Ardila, R. and Branson, M. and Davis, K. and Henretty, M. and Kohler, M. and Meyer, J. and Morais, R. and Saunders, L. and Tyers, F. M. and Weber, G.},
13
+ title = {Common Voice: A Massively-Multilingual Speech Corpus},
14
+ booktitle = {Proceedings of the 12th Conference on Language Resources and Evaluation (LREC 2020)},
15
+ pages = {4211--4215},
16
+ year = 2020
17
+ }
18
+ """
19
+
20
+ _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
21
+
22
+ _LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
23
+
24
+ # TODO: change "streaming" to "main" after merge!
25
+ _BASE_URL = "https://huggingface.co/datasets/reach-vb/common_voice_16_0/resolve/main/"
26
+
27
+ _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
28
+
29
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
30
+
31
+ _N_SHARDS_URL = _BASE_URL + "n_shards.json"
32
+
33
+
34
+ class CommonVoiceConfig(datasets.BuilderConfig):
35
+ """BuilderConfig for CommonVoice."""
36
+
37
+ def __init__(self, name, version, **kwargs):
38
+ self.language = kwargs.pop("language", None)
39
+ self.release_date = kwargs.pop("release_date", None)
40
+ self.num_clips = kwargs.pop("num_clips", None)
41
+ self.num_speakers = kwargs.pop("num_speakers", None)
42
+ self.validated_hr = kwargs.pop("validated_hr", None)
43
+ self.total_hr = kwargs.pop("total_hr", None)
44
+ self.size_bytes = kwargs.pop("size_bytes", None)
45
+ self.size_human = size_str(self.size_bytes)
46
+ description = (
47
+ f"Common Voice speech to text dataset in {self.language} released on {self.release_date}. "
48
+ f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data "
49
+ f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. "
50
+ f"The dataset contains {self.num_clips} audio clips and has a size of {self.size_human}."
51
+ )
52
+ super(CommonVoiceConfig, self).__init__(
53
+ name=name,
54
+ version=datasets.Version(version),
55
+ description=description,
56
+ **kwargs,
57
+ )
58
+
59
+
60
+ class CommonVoice(datasets.GeneratorBasedBuilder):
61
+ DEFAULT_WRITER_BATCH_SIZE = 1000
62
+
63
+ def _info(self):
64
+ description = (
65
+ "Common Voice is Mozilla's initiative to help teach machines how real people speak. "
66
+ "The dataset currently consists of validated hours of speech in one language, "
67
+ "but more voices and languages are always added."
68
+ )
69
+ features = datasets.Features(
70
+ {
71
+ "client_id": datasets.Value("string"),
72
+ "path": datasets.Value("string"),
73
+ "audio": datasets.features.Audio(sampling_rate=48_000),
74
+ "sentence": datasets.Value("string"),
75
+ "up_votes": datasets.Value("int64"),
76
+ "down_votes": datasets.Value("int64"),
77
+ "age": datasets.Value("string"),
78
+ "gender": datasets.Value("string"),
79
+ "accent": datasets.Value("string"),
80
+ "locale": datasets.Value("string"),
81
+ "segment": datasets.Value("string"),
82
+ "variant": datasets.Value("string"),
83
+ }
84
+ )
85
+
86
+ return datasets.DatasetInfo(
87
+ description=description,
88
+ features=features,
89
+ supervised_keys=None,
90
+ homepage=_HOMEPAGE,
91
+ license=_LICENSE,
92
+ citation=_CITATION,
93
+ version=datasets.Version("1.0.0"),
94
+ )
95
+
96
+ def _split_generators(self, dl_manager):
97
+ lang = "ha" # Assuming Hausa language
98
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
99
+ with open(n_shards_path, encoding="utf-8") as f:
100
+ n_shards = json.load(f)
101
+
102
+ audio_urls = {}
103
+ splits = ("train", "dev", "test", "other", "invalidated")
104
+ for split in splits:
105
+ audio_urls[split] = [
106
+ _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
107
+ ]
108
+ archive_paths = dl_manager.download(audio_urls)
109
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
110
+
111
+ meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
112
+ meta_paths = dl_manager.download_and_extract(meta_urls)
113
+
114
+ split_generators = []
115
+ split_names = {
116
+ "train": datasets.Split.TRAIN,
117
+ "dev": datasets.Split.VALIDATION,
118
+ "test": datasets.Split.TEST,
119
+ }
120
+ for split in splits:
121
+ split_generators.append(
122
+ datasets.SplitGenerator(
123
+ name=split_names.get(split, split),
124
+ gen_kwargs={
125
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
126
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
127
+ "meta_path": meta_paths[split],
128
+ },
129
+ ),
130
+ )
131
+
132
+ return split_generators
133
+
134
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
135
+ data_fields = list(self._info().features.keys())
136
+ metadata = {}
137
+ with open(meta_path, encoding="utf-8") as f:
138
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
139
+ for row in tqdm(reader, desc="Reading metadata..."):
140
+ if not row["path"].endswith(".mp3"):
141
+ row["path"] += ".mp3"
142
+ if "accents" in row:
143
+ row["accent"] = row["accents"]
144
+ del row["accents"]
145
+ for field in data_fields:
146
+ if field not in row:
147
+ row[field] = ""
148
+ metadata[row["path"]] = row
149
+
150
+ for i, audio_archive in enumerate(archives):
151
+ for path, file in audio_archive:
152
+ _, filename = os.path.split(path)
153
+ if filename in metadata:
154
+ result = dict(metadata[filename])
155
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
156
+ result["audio"] = {"path": path, "bytes": file.read()}
157
+ result["path"] = path
158
+ yield path, result