Datasets:
Convert dataset to Parquet
#5
by
albertvillanova
HF staff
- opened
- README.md +25 -19
- main/train-00000-of-00027.parquet +3 -0
- main/train-00001-of-00027.parquet +3 -0
- main/train-00002-of-00027.parquet +3 -0
- main/train-00003-of-00027.parquet +3 -0
- main/train-00004-of-00027.parquet +3 -0
- main/train-00005-of-00027.parquet +3 -0
- main/train-00006-of-00027.parquet +3 -0
- main/train-00007-of-00027.parquet +3 -0
- main/train-00008-of-00027.parquet +3 -0
- main/train-00009-of-00027.parquet +3 -0
- main/train-00010-of-00027.parquet +3 -0
- main/train-00011-of-00027.parquet +3 -0
- main/train-00012-of-00027.parquet +3 -0
- main/train-00013-of-00027.parquet +3 -0
- main/train-00014-of-00027.parquet +3 -0
- main/train-00015-of-00027.parquet +3 -0
- main/train-00016-of-00027.parquet +3 -0
- main/train-00017-of-00027.parquet +3 -0
- main/train-00018-of-00027.parquet +3 -0
- main/train-00019-of-00027.parquet +3 -0
- main/train-00020-of-00027.parquet +3 -0
- main/train-00021-of-00027.parquet +3 -0
- main/train-00022-of-00027.parquet +3 -0
- main/train-00023-of-00027.parquet +3 -0
- main/train-00024-of-00027.parquet +3 -0
- main/train-00025-of-00027.parquet +3 -0
- main/train-00026-of-00027.parquet +3 -0
- vctk.py +0 -133
README.md
CHANGED
@@ -9,7 +9,6 @@ license:
|
|
9 |
- cc-by-4.0
|
10 |
multilinguality:
|
11 |
- monolingual
|
12 |
-
pretty_name: VCTK
|
13 |
size_categories:
|
14 |
- 10K<n<100K
|
15 |
source_datasets:
|
@@ -20,21 +19,9 @@ task_categories:
|
|
20 |
- text-to-audio
|
21 |
task_ids: []
|
22 |
paperswithcode_id: vctk
|
23 |
-
|
24 |
-
- config: main
|
25 |
-
task: automatic-speech-recognition
|
26 |
-
task_id: speech_recognition
|
27 |
-
splits:
|
28 |
-
train_split: train
|
29 |
-
col_mapping:
|
30 |
-
file: path
|
31 |
-
text: text
|
32 |
-
metrics:
|
33 |
-
- type: wer
|
34 |
-
name: WER
|
35 |
-
- type: cer
|
36 |
-
name: CER
|
37 |
dataset_info:
|
|
|
38 |
features:
|
39 |
- name: speaker_id
|
40 |
dtype: string
|
@@ -58,13 +45,32 @@ dataset_info:
|
|
58 |
dtype: string
|
59 |
- name: comment
|
60 |
dtype: string
|
61 |
-
config_name: main
|
62 |
splits:
|
63 |
- name: train
|
64 |
-
num_bytes:
|
65 |
num_examples: 88156
|
66 |
-
download_size:
|
67 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
---
|
69 |
|
70 |
# Dataset Card for VCTK
|
|
|
9 |
- cc-by-4.0
|
10 |
multilinguality:
|
11 |
- monolingual
|
|
|
12 |
size_categories:
|
13 |
- 10K<n<100K
|
14 |
source_datasets:
|
|
|
19 |
- text-to-audio
|
20 |
task_ids: []
|
21 |
paperswithcode_id: vctk
|
22 |
+
pretty_name: VCTK
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
dataset_info:
|
24 |
+
config_name: main
|
25 |
features:
|
26 |
- name: speaker_id
|
27 |
dtype: string
|
|
|
45 |
dtype: string
|
46 |
- name: comment
|
47 |
dtype: string
|
|
|
48 |
splits:
|
49 |
- name: train
|
50 |
+
num_bytes: 13246600829.632
|
51 |
num_examples: 88156
|
52 |
+
download_size: 11715680639
|
53 |
+
dataset_size: 13246600829.632
|
54 |
+
configs:
|
55 |
+
- config_name: main
|
56 |
+
data_files:
|
57 |
+
- split: train
|
58 |
+
path: main/train-*
|
59 |
+
default: true
|
60 |
+
train-eval-index:
|
61 |
+
- config: main
|
62 |
+
task: automatic-speech-recognition
|
63 |
+
task_id: speech_recognition
|
64 |
+
splits:
|
65 |
+
train_split: train
|
66 |
+
col_mapping:
|
67 |
+
file: path
|
68 |
+
text: text
|
69 |
+
metrics:
|
70 |
+
- type: wer
|
71 |
+
name: WER
|
72 |
+
- type: cer
|
73 |
+
name: CER
|
74 |
---
|
75 |
|
76 |
# Dataset Card for VCTK
|
main/train-00000-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:832409b80208069614650af1438b242351d1ea6cc338547a39fd2617fbadef2d
|
3 |
+
size 471632807
|
main/train-00001-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:349648997920a878fe5a82048587236b35f06b284a03eb84431e72278b219e0f
|
3 |
+
size 415387720
|
main/train-00002-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dec94b112af35981f2046feedb7f290234ed304f3e234260cc4b9adff97b6866
|
3 |
+
size 441013052
|
main/train-00003-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7fb3428e58384acc29a29fc16741144d7a25c75e9349b016ad7377a1be2c13bb
|
3 |
+
size 456796730
|
main/train-00004-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67a152467b3e09529d988e723979bdc04e83f37a8edd4f554a8e5afb019faf4b
|
3 |
+
size 447028518
|
main/train-00005-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9abc09da5aaa4a389e6da1aff2073e0ac6c6867e0160f63bc384941ddd1b2577
|
3 |
+
size 439078904
|
main/train-00006-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67bee0ebb4dab788337f4f023f393277c18f664c64c3cc822ed2e43f54598328
|
3 |
+
size 448929241
|
main/train-00007-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c80e2e782bbb6d321b19c92ebbea7794cd11c0278891e6e3cceceb0d98aa6a6
|
3 |
+
size 421190487
|
main/train-00008-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44142f42b36970a117f4b23d4925fe5700f29bb338946df3f09dd9a91253b203
|
3 |
+
size 430892942
|
main/train-00009-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:456f37796630e65e718a1565d9dcba07698f980cb6344fa8dee7bffa3ecba7f7
|
3 |
+
size 454240101
|
main/train-00010-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bbb17b7778690f69aa3be4a4ab23efbbf4c8865632799f2874918cb3b43b9ca7
|
3 |
+
size 468908412
|
main/train-00011-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2019fd0f995eb311773aaca8ab6a3383d78bef52f0259b163bced1c372f9ae3b
|
3 |
+
size 433448483
|
main/train-00012-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:82ae53d1361507e30bfaeac080fd4c054f9a866243dc87cb0ab65513b8ee05a2
|
3 |
+
size 447928238
|
main/train-00013-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac0323524e39bcce97b14f85f23330a1c899a4446d14fb1fb4e622b031283743
|
3 |
+
size 425435243
|
main/train-00014-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a3def40fd1ab4f993b77634c49ca804a0f8bd5ab7e10d6f38962cb35d4cef41
|
3 |
+
size 408578405
|
main/train-00015-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:077b3d1f2a6db67cdd8201a55d8f406ca5d49dc56147cebfd1e6e944260c0b76
|
3 |
+
size 425807561
|
main/train-00016-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:faa8f13a2a2132818bce1c6997e016389e06b7c9539521ed470674b36a8759fc
|
3 |
+
size 453612283
|
main/train-00017-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:21d3c5b5c8c0ff22d615f08046da73696b7a38904840107824b0dae2d2dea270
|
3 |
+
size 496703992
|
main/train-00018-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:20a98a952dec559aa58f6133082a28be414f94d913878d2f739e35349021d098
|
3 |
+
size 487519230
|
main/train-00019-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f40e878095e1a2844a0289ec1f4347c6b4129d3c39b8a20837f5e3338aa4ea74
|
3 |
+
size 421850585
|
main/train-00020-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc444ec93574cb8f25bed83b1999f5c7245d9737b08f5303571402b9a695ad2f
|
3 |
+
size 416165865
|
main/train-00021-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7bfedf41ed788ef28a1b45f6aff1b22e4559f17b819039fb3dd207a712b27543
|
3 |
+
size 432364498
|
main/train-00022-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9a718c4071c6a179a4023589ef4ef51174e29d10cac8b514f12c995f4f3394e
|
3 |
+
size 419557215
|
main/train-00023-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8c525cebac285d7dcac8d4428900984523e7fd866a51e238efa6b5ff8c257235
|
3 |
+
size 386412884
|
main/train-00024-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c67d95a3d1d21b6ff0d927b8200090ce2784218196a5022e5265efb49497e79
|
3 |
+
size 408793993
|
main/train-00025-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:91eb1570e96b4855da8198df91532aa1a2dd897d109c9cd2736a1e814f18c652
|
3 |
+
size 349888540
|
main/train-00026-of-00027.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e5d6a12b26d0c616d95536266d9e74832b6b74ab50aa918df56061f5bd417776
|
3 |
+
size 406514710
|
vctk.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
# Lint as: python3
|
17 |
-
"""VCTK dataset."""
|
18 |
-
|
19 |
-
|
20 |
-
import os
|
21 |
-
import re
|
22 |
-
|
23 |
-
import datasets
|
24 |
-
from datasets.tasks import AutomaticSpeechRecognition
|
25 |
-
|
26 |
-
|
27 |
-
_CITATION = """\
|
28 |
-
@inproceedings{Veaux2017CSTRVC,
|
29 |
-
title = {CSTR VCTK Corpus: English Multi-speaker Corpus for CSTR Voice Cloning Toolkit},
|
30 |
-
author = {Christophe Veaux and Junichi Yamagishi and Kirsten MacDonald},
|
31 |
-
year = 2017
|
32 |
-
}
|
33 |
-
"""
|
34 |
-
|
35 |
-
_DESCRIPTION = """\
|
36 |
-
The CSTR VCTK Corpus includes speech data uttered by 110 English speakers with various accents.
|
37 |
-
"""
|
38 |
-
|
39 |
-
_URL = "https://datashare.ed.ac.uk/handle/10283/3443"
|
40 |
-
_DL_URL = "https://datashare.is.ed.ac.uk/bitstream/handle/10283/3443/VCTK-Corpus-0.92.zip"
|
41 |
-
|
42 |
-
|
43 |
-
class VCTK(datasets.GeneratorBasedBuilder):
|
44 |
-
"""VCTK dataset."""
|
45 |
-
|
46 |
-
VERSION = datasets.Version("0.9.2")
|
47 |
-
|
48 |
-
BUILDER_CONFIGS = [
|
49 |
-
datasets.BuilderConfig(name="main", version=VERSION, description="VCTK dataset"),
|
50 |
-
]
|
51 |
-
|
52 |
-
def _info(self):
|
53 |
-
return datasets.DatasetInfo(
|
54 |
-
description=_DESCRIPTION,
|
55 |
-
features=datasets.Features(
|
56 |
-
{
|
57 |
-
"speaker_id": datasets.Value("string"),
|
58 |
-
"audio": datasets.features.Audio(sampling_rate=48_000),
|
59 |
-
"file": datasets.Value("string"),
|
60 |
-
"text": datasets.Value("string"),
|
61 |
-
"text_id": datasets.Value("string"),
|
62 |
-
"age": datasets.Value("string"),
|
63 |
-
"gender": datasets.Value("string"),
|
64 |
-
"accent": datasets.Value("string"),
|
65 |
-
"region": datasets.Value("string"),
|
66 |
-
"comment": datasets.Value("string"),
|
67 |
-
}
|
68 |
-
),
|
69 |
-
supervised_keys=("file", "text"),
|
70 |
-
homepage=_URL,
|
71 |
-
citation=_CITATION,
|
72 |
-
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
|
73 |
-
)
|
74 |
-
|
75 |
-
def _split_generators(self, dl_manager):
|
76 |
-
root_path = dl_manager.download_and_extract(_DL_URL)
|
77 |
-
|
78 |
-
return [
|
79 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"root_path": root_path}),
|
80 |
-
]
|
81 |
-
|
82 |
-
def _generate_examples(self, root_path):
|
83 |
-
"""Generate examples from the VCTK corpus root path."""
|
84 |
-
|
85 |
-
meta_path = os.path.join(root_path, "speaker-info.txt")
|
86 |
-
txt_root = os.path.join(root_path, "txt")
|
87 |
-
wav_root = os.path.join(root_path, "wav48_silence_trimmed")
|
88 |
-
# NOTE: "comment" is handled separately in logic below
|
89 |
-
fields = ["speaker_id", "age", "gender", "accent", "region"]
|
90 |
-
|
91 |
-
key = 0
|
92 |
-
with open(meta_path, encoding="utf-8") as meta_file:
|
93 |
-
_ = next(iter(meta_file))
|
94 |
-
for line in meta_file:
|
95 |
-
data = {}
|
96 |
-
line = line.strip()
|
97 |
-
search = re.search(r"\(.*\)", line)
|
98 |
-
if search is None:
|
99 |
-
data["comment"] = ""
|
100 |
-
else:
|
101 |
-
start, _ = search.span()
|
102 |
-
data["comment"] = line[start:]
|
103 |
-
line = line[:start]
|
104 |
-
values = line.split()
|
105 |
-
for i, field in enumerate(fields):
|
106 |
-
if field == "region":
|
107 |
-
data[field] = " ".join(values[i:])
|
108 |
-
else:
|
109 |
-
data[field] = values[i] if i < len(values) else ""
|
110 |
-
speaker_id = data["speaker_id"]
|
111 |
-
speaker_txt_path = os.path.join(txt_root, speaker_id)
|
112 |
-
speaker_wav_path = os.path.join(wav_root, speaker_id)
|
113 |
-
# NOTE: p315 does not have text
|
114 |
-
if not os.path.exists(speaker_txt_path):
|
115 |
-
continue
|
116 |
-
for txt_file in sorted(os.listdir(speaker_txt_path)):
|
117 |
-
filename, _ = os.path.splitext(txt_file)
|
118 |
-
_, text_id = filename.split("_")
|
119 |
-
for i in [1, 2]:
|
120 |
-
wav_file = os.path.join(speaker_wav_path, f"{filename}_mic{i}.flac")
|
121 |
-
# NOTE: p280 does not have mic2 files
|
122 |
-
if not os.path.exists(wav_file):
|
123 |
-
continue
|
124 |
-
with open(os.path.join(speaker_txt_path, txt_file), encoding="utf-8") as text_file:
|
125 |
-
text = text_file.readline().strip()
|
126 |
-
more_data = {
|
127 |
-
"file": wav_file,
|
128 |
-
"audio": wav_file,
|
129 |
-
"text": text,
|
130 |
-
"text_id": text_id,
|
131 |
-
}
|
132 |
-
yield key, {**data, **more_data}
|
133 |
-
key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|