dcjack commited on
Commit
2e4ece6
1 Parent(s): 7b5cd5e

Upload 7 files

Browse files
audio/zh-CN/train/ningbo.tar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f7bc9892432324b1ec726fcd022fb6382f016b91f6fe1ec5a3e606a088d0a61
3
+ size 542720
count_n_shards.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import json
3
+
4
+
5
+ splits = ["train", "dev", "test", "other", "invalidated"]
6
+
7
+ if __name__ == "__main__":
8
+ n_files = {}
9
+ lang_dirs = [d for d in Path("audio").iterdir() if d.is_dir()]
10
+ for lang_dir in lang_dirs:
11
+ lang = lang_dir.name
12
+ n_files[lang] = {}
13
+ for split in splits:
14
+ split_dir = lang_dir / split
15
+ if split_dir.exists():
16
+ n_files_per_split = len(list(split_dir.glob("*.tar")))
17
+ else:
18
+ n_files_per_split = 0
19
+ n_files[lang][split] = n_files_per_split
20
+
21
+ with open("n_shards.json", "w") as f:
22
+ json.dump(dict(sorted(n_files.items(), key=lambda x: x[0])), f, ensure_ascii=False, indent=4)
languages.py ADDED
@@ -0,0 +1 @@
 
 
1
+ LANGUAGES = {'ab': 'Abkhaz', 'ace': 'Acehnese', 'ady': 'Adyghe', 'af': 'Afrikaans', 'am': 'Amharic', 'an': 'Aragonese', 'ar': 'Arabic', 'arn': 'Mapudungun', 'as': 'Assamese', 'ast': 'Asturian', 'az': 'Azerbaijani', 'ba': 'Bashkir', 'bas': 'Basaa', 'be': 'Belarusian', 'bg': 'Bulgarian', 'bn': 'Bengali', 'br': 'Breton', 'bs': 'Bosnian', 'bxr': 'Buryat', 'ca': 'Catalan', 'cak': 'Kaqchikel', 'ckb': 'Central Kurdish', 'cnh': 'Hakha Chin', 'co': 'Corsican', 'cs': 'Czech', 'cv': 'Chuvash', 'cy': 'Welsh', 'da': 'Danish', 'de': 'German', 'dsb': 'Sorbian, Lower', 'dv': 'Dhivehi', 'dyu': 'Dioula', 'el': 'Greek', 'en': 'English', 'eo': 'Esperanto', 'es': 'Spanish', 'et': 'Estonian', 'eu': 'Basque', 'fa': 'Persian', 'ff': 'Fulah', 'fi': 'Finnish', 'fo': 'Faroese', 'fr': 'French', 'fy-NL': 'Frisian', 'ga-IE': 'Irish', 'gl': 'Galician', 'gn': 'Guarani', 'gom': 'Goan Konkani', 'ha': 'Hausa', 'he': 'Hebrew', 'hi': 'Hindi', 'hil': 'Hiligaynon', 'hr': 'Croatian', 'hsb': 'Sorbian, Upper', 'ht': 'Haitian', 'hu': 'Hungarian', 'hy-AM': 'Armenian', 'hyw': 'Armenian Western', 'ia': 'Interlingua', 'id': 'Indonesian', 'ie': 'Interlingue', 'ig': 'Igbo', 'is': 'Icelandic', 'it': 'Italian', 'izh': 'Izhorian', 'ja': 'Japanese', 'jbo': 'Lojban', 'ka': 'Georgian', 'kaa': 'Karakalpak', 'kab': 'Kabyle', 'kbd': 'Kabardian', 'ki': 'Kikuyu', 'kk': 'Kazakh', 'km': 'Khmer', 'kmr': 'Kurmanji Kurdish', 'kn': 'Kannada', 'knn': 'Konkani (Devanagari)', 'ko': 'Korean', 'kpv': 'Komi-Zyrian', 'kw': 'Cornish', 'ky': 'Kyrgyz', 'lb': 'Luxembourgish', 'lg': 'Luganda', 'lij': 'Ligurian', 'ln': 'Lingala', 'lo': 'Lao', 'lt': 'Lithuanian', 'lv': 'Latvian', 'mai': 'Maithili', 'mdf': 'Moksha', 'mg': 'Malagasy', 'mhr': 'Meadow Mari', 'mk': 'Macedonian', 'ml': 'Malayalam', 'mn': 'Mongolian', 'mni': 'Meetei Lon', 'mos': 'Mossi', 'mr': 'Marathi', 'mrj': 'Hill Mari', 'ms': 'Malay', 'mt': 'Maltese', 'my': 'Burmese', 'myv': 'Erzya', 'nan-tw': 'Taiwanese (Minnan)', 'nb-NO': 'Norwegian Bokmål', 'nd': 'IsiNdebele (North)', 'ne-NP': 'Nepali', 'nia': 'Nias', 'nl': 'Dutch', 'nn-NO': 'Norwegian Nynorsk', 'nr': 'IsiNdebele (South)', 'nso': 'Northern Sotho', 'nyn': 'Runyankole', 'oc': 'Occitan', 'om': 'Afaan Ormoo', 'or': 'Odia', 'pa-IN': 'Punjabi', 'pap-AW': 'Papiamento (Aruba)', 'pl': 'Polish', 'ps': 'Pashto', 'pt': 'Portuguese', 'quc': "K'iche'", 'quy': 'Quechua Chanka', 'rm-sursilv': 'Romansh Sursilvan', 'rm-vallader': 'Romansh Vallader', 'ro': 'Romanian', 'ru': 'Russian', 'rw': 'Kinyarwanda', 'sah': 'Sakha', 'sat': 'Santali (Ol Chiki)', 'sc': 'Sardinian', 'scn': 'Sicilian', 'sdh': 'Southern Kurdish', 'shi': 'Shilha', 'si': 'Sinhala', 'sk': 'Slovak', 'skr': 'Saraiki', 'sl': 'Slovenian', 'snk': 'Soninke', 'so': 'Somali', 'sq': 'Albanian', 'sr': 'Serbian', 'ss': 'Siswati', 'st': 'Southern Sotho', 'sv-SE': 'Swedish', 'sw': 'Swahili', 'syr': 'Syriac', 'ta': 'Tamil', 'te': 'Telugu', 'tg': 'Tajik', 'th': 'Thai', 'ti': 'Tigrinya', 'tig': 'Tigre', 'tk': 'Turkmen', 'tl': 'Tagalog', 'tn': 'Setswana', 'tok': 'Toki Pona', 'tr': 'Turkish', 'ts': 'Xitsonga', 'tt': 'Tatar', 'tw': 'Twi', 'ty': 'Tahitian', 'uby': 'Ubykh', 'udm': 'Udmurt', 'ug': 'Uyghur', 'uk': 'Ukrainian', 'ur': 'Urdu', 'uz': 'Uzbek', 've': 'Tshivenda', 'vec': 'Venetian', 'vi': 'Vietnamese', 'vot': 'Votic', 'xh': 'Xhosa', 'yi': 'Yiddish', 'yo': 'Yoruba', 'yue': 'Cantonese', 'zgh': 'Tamazight', 'zh-CN': 'Chinese (China)', 'zh-HK': 'Chinese (Hong Kong)', 'zh-TW': 'Chinese (Taiwan)', 'zu': 'Zulu'}
n_shards.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "zh-CN": {
3
+ "train": 1,
4
+ "dev": 0,
5
+ "test": 0,
6
+ "other": 0,
7
+ "invalidated": 0
8
+ }
9
+ }
ningbo_voice.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Common Voice Dataset"""
16
+
17
+
18
+ import csv
19
+ import os
20
+ import json
21
+
22
+ import datasets
23
+ from datasets.utils.py_utils import size_str
24
+ from tqdm import tqdm
25
+
26
+ from .languages import LANGUAGES
27
+ from .release_stats import STATS
28
+
29
+
30
+ _CITATION = """
31
+ """
32
+ # TODO: change "streaming" to "main" after merge!
33
+ _BASE_URL = "https://huggingface.co/datasets/dcjack/ningbo_voice_0/resolve/main/"
34
+
35
+ _AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
36
+
37
+ _TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
38
+
39
+ _N_SHARDS_URL = _BASE_URL + "n_shards.json"
40
+
41
+
42
+ class CommonVoiceConfig(datasets.BuilderConfig):
43
+ """BuilderConfig for CommonVoice."""
44
+
45
+ def __init__(self, name, version, **kwargs):
46
+ self.language = kwargs.pop("language", None)
47
+ self.release_date = kwargs.pop("release_date", None)
48
+ self.num_clips = kwargs.pop("num_clips", None)
49
+ self.num_speakers = kwargs.pop("num_speakers", None)
50
+ self.validated_hr = kwargs.pop("validated_hr", None)
51
+ self.total_hr = kwargs.pop("total_hr", None)
52
+ self.size_bytes = kwargs.pop("size_bytes", None)
53
+ self.size_human = size_str(self.size_bytes)
54
+ description = (
55
+ f"Common Voice speech to text dataset in {self.language} released on {self.release_date}. "
56
+ f"The dataset comprises {self.validated_hr} hours of validated transcribed speech data "
57
+ f"out of {self.total_hr} hours in total from {self.num_speakers} speakers. "
58
+ f"The dataset contains {self.num_clips} audio clips and has a size of {self.size_human}."
59
+ )
60
+ super(CommonVoiceConfig, self).__init__(
61
+ name=name,
62
+ version=datasets.Version(version),
63
+ description=description,
64
+ **kwargs,
65
+ )
66
+
67
+
68
+ class CommonVoice(datasets.GeneratorBasedBuilder):
69
+ DEFAULT_WRITER_BATCH_SIZE = 1000
70
+
71
+ BUILDER_CONFIGS = [
72
+ CommonVoiceConfig(
73
+ name=lang,
74
+ version=STATS["version"],
75
+ language=LANGUAGES[lang],
76
+ release_date=STATS["date"],
77
+ num_clips=lang_stats["clips"],
78
+ num_speakers=lang_stats["users"],
79
+ validated_hr=float(lang_stats["validHrs"]) if lang_stats["validHrs"] else None,
80
+ total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
81
+ size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
82
+ )
83
+ for lang, lang_stats in STATS["locales"].items()
84
+ ]
85
+
86
+ def _info(self):
87
+ total_languages = len(STATS["locales"])
88
+ total_valid_hours = STATS["totalValidHrs"]
89
+ description = (
90
+ "Common Voice is Mozilla's initiative to help teach machines how real people speak. "
91
+ f"The dataset currently consists of {total_valid_hours} validated hours of speech "
92
+ f" in {total_languages} languages, but more voices and languages are always added."
93
+ )
94
+ features = datasets.Features(
95
+ {
96
+ "client_id": datasets.Value("string"),
97
+ "path": datasets.Value("string"),
98
+ "audio": datasets.features.Audio(sampling_rate=48_000),
99
+ "sentence": datasets.Value("string"),
100
+ "up_votes": datasets.Value("int64"),
101
+ "down_votes": datasets.Value("int64"),
102
+ "age": datasets.Value("string"),
103
+ "gender": datasets.Value("string"),
104
+ "accent": datasets.Value("string"),
105
+ "locale": datasets.Value("string"),
106
+ "segment": datasets.Value("string"),
107
+ }
108
+ )
109
+
110
+ return datasets.DatasetInfo(
111
+ description=description,
112
+ features=features,
113
+ supervised_keys=None,
114
+ homepage=_HOMEPAGE,
115
+ license=_LICENSE,
116
+ citation=_CITATION,
117
+ version=self.config.version,
118
+ )
119
+
120
+ def _split_generators(self, dl_manager):
121
+ lang = self.config.name
122
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
123
+ with open(n_shards_path, encoding="utf-8") as f:
124
+ n_shards = json.load(f)
125
+
126
+ audio_urls = {}
127
+ splits = ("train", "dev", "test", "other", "invalidated")
128
+ for split in splits:
129
+ audio_urls[split] = [
130
+ _AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(n_shards[lang][split])
131
+ ]
132
+ archive_paths = dl_manager.download(audio_urls)
133
+ local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
134
+
135
+ meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
136
+ meta_paths = dl_manager.download_and_extract(meta_urls)
137
+
138
+ split_generators = []
139
+ split_names = {
140
+ "train": datasets.Split.TRAIN,
141
+ "dev": datasets.Split.VALIDATION,
142
+ "test": datasets.Split.TEST,
143
+ }
144
+ for split in splits:
145
+ split_generators.append(
146
+ datasets.SplitGenerator(
147
+ name=split_names.get(split, split),
148
+ gen_kwargs={
149
+ "local_extracted_archive_paths": local_extracted_archive_paths.get(split),
150
+ "archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
151
+ "meta_path": meta_paths[split],
152
+ },
153
+ ),
154
+ )
155
+
156
+ return split_generators
157
+
158
+ def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
159
+ data_fields = list(self._info().features.keys())
160
+ metadata = {}
161
+ with open(meta_path, encoding="utf-8") as f:
162
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
163
+ for row in tqdm(reader, desc="Reading metadata..."):
164
+ if not row["path"].endswith(".mp3"):
165
+ row["path"] += ".mp3"
166
+ # accent -> accents in CV 8.0
167
+ if "accents" in row:
168
+ row["accent"] = row["accents"]
169
+ del row["accents"]
170
+ # if data is incomplete, fill with empty values
171
+ for field in data_fields:
172
+ if field not in row:
173
+ row[field] = ""
174
+ metadata[row["path"]] = row
175
+
176
+ for i, audio_archive in enumerate(archives):
177
+ for path, file in audio_archive:
178
+ _, filename = os.path.split(path)
179
+ if filename in metadata:
180
+ result = dict(metadata[filename])
181
+ # set the audio feature and the path to the extracted file
182
+ path = os.path.join(local_extracted_archive_paths[i], path) if local_extracted_archive_paths else path
183
+ result["audio"] = {"path": path, "bytes": file.read()}
184
+ result["path"] = path
185
+ yield path, result
release_stats.py ADDED
@@ -0,0 +1 @@
 
 
1
+ STATS = {'bundleURLTemplate': 'cv-corpus-11.0-2022-09-21/cv-corpus-11.0-2022-09-21-{locale}.tar.gz', 'date': '2022-09-21', 'name': 'Common Voice Corpus 11.0', 'multilingual': True, 'locales': {'zh-CN': {'duration': 11152496587, 'buckets': {'dev': 0, 'invalidated': 0, 'other': 0, 'reported': 0, 'test': 0, 'train': 22, 'validated': 0}, 'reportedSentences': 22, 'clips': 1500, 'splits': {'accent': {'': 1}, 'age': {'': 0.0, 'twenties': 0.0, 'sixties': 0.0, 'thirties': 1.0, 'teens': 0.0, 'seventies': 0.0, 'fourties': 0.0, 'fifties': 0.0, 'eighties': 0, 'nineties': 0}, 'gender': {'': 0.0, 'male': 0.0, 'female': 1.0, 'other': 0.0}}, 'users': 1, 'size': 937788, 'checksum': '', 'avgDurationSecs': 5.159, 'validDurationSecs': 0, 'totalHrs': 0.2, 'validHrs': 0.0}}, 'totalDuration': 87156202251, 'totalValidDurationSecs': 59087456, 'totalHrs': 24210, 'totalValidHrs': 16413, 'version': '11.0.0'}
transcript/zh-CN/train.tsv ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ client_id path sentence up_votes down_votes age gender accents locale segment
2
+ 0 0.mp3 玉米. 2 0 thirties female zh-CN
3
+ 1 1.mp3 再见. 2 0 thirties female zh-CN
4
+ 2 2.mp3 你今天吃过油条吗? 7 0 thirties female zh-CN
5
+ 3 3.mp3 非常棒! 3 0 thirties female zh-CN
6
+ 4 4.mp3 你好, 2 0 thirties female zh-CN
7
+ 5 5.mp3 我们 2 0 thirties female zh-CN
8
+ 6 6.mp3 淋巴肉不能吃,有毒的. 9 0 thirties female zh-CN
9
+ 7 7.mp3 羊和猪是最好的姐妹花. 10 0 thirties female zh-CN
10
+ 8 8.mp3 长得很漂亮. 5 0 thirties female zh-CN
11
+ 9 9.mp3 话真够多的. 5 0 thirties female zh-CN
12
+ 10 10.mp3 眼镜. 2 0 thirties female zh-CN
13
+ 11 11.mp3 钥匙掉了. 4 0 thirties female zh-CN
14
+ 12 12.mp3 饭吃了没有,今天天气很好,我带你逛街买零食去. 20 0 thirties female zh-CN
15
+ 13 13.mp3 你怎么了,脸怎么黑了? 9 0 thirties female zh-CN
16
+ 14 14.mp3 非常赞. 3 0 thirties female zh-CN
17
+ 15 15.mp3 一家人在山上玩. 7 0 thirties female zh-CN
18
+ 16 16.mp3 一直会说过去的. 7 0 thirties female zh-CN
19
+ 17 17.mp3 我跟你说,这两天猪颈肉涨价了. 13 0 thirties female zh-CN
20
+ 18 18.mp3 青蛙 2 0 thirties female zh-CN
21
+ 19 19.mp3 猜灯谜 3 0 thirties female zh-CN
22
+ 20 20.mp3 气球 2 0 thirties female zh-CN
23
+ 21 21.mp3 他们 2 0 thirties female zh-CN