parquet-converter commited on
Commit
2a294e9
1 Parent(s): 3f0baad

Update parquet files

Browse files
librispeech_train_other_only.py DELETED
@@ -1,152 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Librispeech automatic speech recognition dataset."""
18
-
19
-
20
- import os
21
-
22
- import datasets
23
- from datasets.tasks import AutomaticSpeechRecognition
24
-
25
-
26
- _CITATION = """\
27
- @inproceedings{panayotov2015librispeech,
28
- title={Librispeech: an ASR corpus based on public domain audio books},
29
- author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev},
30
- booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on},
31
- pages={5206--5210},
32
- year={2015},
33
- organization={IEEE}
34
- }
35
- """
36
-
37
- _DESCRIPTION = """\
38
- LibriSpeech is a corpus of approximately 1000 hours of read English speech with sampling rate of 16 kHz,
39
- prepared by Vassil Panayotov with the assistance of Daniel Povey. The data is derived from read
40
- audiobooks from the LibriVox project, and has been carefully segmented and aligned.87
41
- """
42
-
43
- _URL = "http://www.openslr.org/12"
44
- _DL_URL = "http://www.openslr.org/resources/12/"
45
-
46
-
47
- _DL_URLS = {
48
- "other": {
49
- "train.500": _DL_URL + "train-other-500.tar.gz",
50
- },
51
- "all": {
52
- "train.500": _DL_URL + "train-other-500.tar.gz",
53
- },
54
- }
55
-
56
-
57
- class LibrispeechASRConfig(datasets.BuilderConfig):
58
- """BuilderConfig for LibriSpeechASR."""
59
-
60
- def __init__(self, **kwargs):
61
- """
62
- Args:
63
- data_dir: `string`, the path to the folder containing the files in the
64
- downloaded .tar
65
- citation: `string`, citation for the data set
66
- url: `string`, url for information about the data set
67
- **kwargs: keyword arguments forwarded to super.
68
- """
69
- super(LibrispeechASRConfig, self).__init__(version=datasets.Version("2.1.0", ""), **kwargs)
70
-
71
-
72
- class LibrispeechASR(datasets.GeneratorBasedBuilder):
73
- """Librispeech dataset."""
74
-
75
- DEFAULT_WRITER_BATCH_SIZE = 256
76
- DEFAULT_CONFIG_NAME = "all"
77
- BUILDER_CONFIGS = [LibrispeechASRConfig(name="other", description="'Other', more challenging, speech.")]
78
-
79
- def _info(self):
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=datasets.Features(
83
- {
84
- "file": datasets.Value("string"),
85
- "audio": datasets.Audio(sampling_rate=16_000),
86
- "text": datasets.Value("string"),
87
- "speaker_id": datasets.Value("int64"),
88
- "chapter_id": datasets.Value("int64"),
89
- "id": datasets.Value("string"),
90
- }
91
- ),
92
- supervised_keys=("file", "text"),
93
- homepage=_URL,
94
- citation=_CITATION,
95
- task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- archive_path = dl_manager.download(_DL_URLS[self.config.name])
100
- # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
101
- local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
102
-
103
- if self.config.name == "other" or self.config.name == "all":
104
- train_splits = [
105
- datasets.SplitGenerator(
106
- name="train.500",
107
- gen_kwargs={
108
- "local_extracted_archive": local_extracted_archive.get("train.500"),
109
- "files": dl_manager.iter_archive(archive_path["train.500"]),
110
- },
111
- )
112
- ]
113
-
114
- return train_splits
115
-
116
- def _generate_examples(self, files, local_extracted_archive):
117
- """Generate examples from a LibriSpeech archive_path."""
118
- key = 0
119
- audio_data = {}
120
- transcripts = []
121
- for path, f in files:
122
- if path.endswith(".flac"):
123
- id_ = path.split("/")[-1][: -len(".flac")]
124
- audio_data[id_] = f.read()
125
- elif path.endswith(".trans.txt"):
126
- for line in f:
127
- if line:
128
- line = line.decode("utf-8").strip()
129
- id_, transcript = line.split(" ", 1)
130
- audio_file = f"{id_}.flac"
131
- speaker_id, chapter_id = [int(el) for el in id_.split("-")[:2]]
132
- audio_file = (
133
- os.path.join(local_extracted_archive, audio_file)
134
- if local_extracted_archive
135
- else audio_file
136
- )
137
- transcripts.append(
138
- {
139
- "id": id_,
140
- "speaker_id": speaker_id,
141
- "chapter_id": chapter_id,
142
- "file": audio_file,
143
- "text": transcript,
144
- }
145
- )
146
- if audio_data and len(audio_data) == len(transcripts):
147
- for transcript in transcripts:
148
- audio = {"path": transcript["file"], "bytes": audio_data[transcript["id"]]}
149
- yield key, {"audio": audio, **transcript}
150
- key += 1
151
- audio_data = {}
152
- transcripts = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
other/partial-train.500/0000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc125ef6135040a1a7f06e85d53d5ea774aa604020f475fb8e7a84a027970534
3
+ size 494205521
other/partial-train.500/0001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cd09a37cc5ac8bc28fa88d0fce1818d2bdbcf71f4e672ca4949e6dced76067a
3
+ size 495084707
other/partial-train.500/0002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99805b1eb828a169e3c082e2ab19a75e78aa94c9d4ee291be3ee5259a74f1be9
3
+ size 501097078
other/partial-train.500/0003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e203f8a1cf9cc4a87f013c5223397ad89d6028ab35db0b9fed2b7cb5ba600897
3
+ size 498950157
other/partial-train.500/0004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95a545106c72c313e7649a4240728273794fb4f7947eda01dc7c97308a3a4160
3
+ size 496104070
other/partial-train.500/0005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaac50b5f44bc0f4979e24e2681da8b0abe75102b53a3cc2d27b1dcfdc7ee282
3
+ size 488051484
other/partial-train.500/0006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91e5344472f8854bf1f8f54678a6673561658e0eee11a1d318a36512bc31c254
3
+ size 487579938
other/partial-train.500/0007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c96df5a05d70047ec0559e98a1dcc7a7b334266a258318de70b675b36523f46
3
+ size 495371974
other/partial-train.500/0008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c489756a600794a671bd7e0ab0553e220f6de7a513cda5192ed269c01a49dde
3
+ size 488682030
other/partial-train.500/0009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fccf9f6526a8269a46bab7358568828d5f46aa3dbde493562dacede007b45098
3
+ size 404048123