polinaeterna HF staff commited on
Commit
c3c6e9d
1 Parent(s): 2ee3ad8

refactor script, add test and val configs

Browse files
Files changed (1) hide show
  1. peoples_speech.py +54 -62
peoples_speech.py CHANGED
@@ -67,25 +67,33 @@ _BASE_URL = "https://huggingface.co/datasets/MLCommons/peoples_speech/resolve/ma
67
  _DATA_URL = _BASE_URL + "{split}/{config}/{config}_{archive_id:06d}.tar"
68
 
69
  # relative path to file containing number of audio archives inside dataset's repo
70
- _N_FILES_URL = _BASE_URL + "{split}/{config}/n_files.txt"
71
 
72
  # relative path to metadata inside dataset's repo
73
  _MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
74
 
75
 
 
 
 
 
 
 
76
  class PeoplesSpeech(datasets.GeneratorBasedBuilder):
77
  """The People's Speech dataset."""
78
 
79
  VERSION = datasets.Version("1.1.0")
80
  BUILDER_CONFIGS = [
81
- datasets.BuilderConfig(name="microset", version=VERSION, description="Small subset of clean data for example pusposes."),
82
- datasets.BuilderConfig(name="clean", version=VERSION, description="Clean, CC-BY licensed subset."),
83
- datasets.BuilderConfig(name="dirty", version=VERSION, description="Dirty, CC-BY licensed subset."),
84
- datasets.BuilderConfig(name="clean_sa", version=VERSION, description="Clean, CC-BY-SA licensed subset."),
85
- datasets.BuilderConfig(name="dirty_sa", version=VERSION, description="Dirty, CC-BY-SA licensed subset."),
 
 
86
  ]
87
  DEFAULT_CONFIG_NAME = "clean"
88
- DEFAULT_WRITER_BATCH_SIZE = 1
89
 
90
  def _info(self):
91
  return datasets.DatasetInfo(
@@ -99,19 +107,11 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
99
  }
100
  ),
101
  task_templates=[AutomaticSpeechRecognition()],
102
- supervised_keys=("file", "text"),
103
  homepage=_HOMEPAGE,
104
  license="/".join(_LICENSE), # license must be a string
105
  citation=_CITATION,
106
  )
107
 
108
- def _get_n_files(self, dl_manager, split, config):
109
- n_files_url = _N_FILES_URL.format(split=split, config=config)
110
- n_files_path = dl_manager.download_and_extract(n_files_url)
111
-
112
- with open(n_files_path, encoding="utf-8") as f:
113
- return int(f.read().strip())
114
-
115
  def _split_generators(self, dl_manager):
116
 
117
  if self.config.name == "microset":
@@ -134,29 +134,33 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
134
  ),
135
  ]
136
 
137
- n_files_train = self._get_n_files(dl_manager, split="train", config=self.config.name)
138
- n_files_validation = self._get_n_files(dl_manager, split="validation", config="validation")
139
- n_files_test = self._get_n_files(dl_manager, split="test", config="test")
 
 
 
 
 
 
 
 
 
140
 
141
- urls = {
142
- "train": [_DATA_URL.format(split="train", config=self.config.name, archive_id=i) for i in range(n_files_train)],
143
- "validation": [_DATA_URL.format(split="validation", config="validation", archive_id=i) for i in range(n_files_validation)],
144
- "test": [_DATA_URL.format(split="test", config="test", archive_id=i) for i in range(n_files_test)],
145
  }
146
- archive_paths = dl_manager.download(urls)
147
 
148
  # In non-streaming mode, we extract the archives to have the data locally:
149
- local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else \
150
- {
151
- "train": [None] * len(archive_paths),
152
- "validation": [None] * len(archive_paths),
153
- "test": [None] * len(archive_paths),
154
- }
155
 
156
  manifest_urls = {
157
- "train": _MANIFEST_URL.format(split="train", config=self.config.name),
158
- "validation": _MANIFEST_URL.format(split="validation", config="validation"),
159
- "test": _MANIFEST_URL.format(split="test", config="test"),
160
  }
161
  manifest_paths = dl_manager.download_and_extract(manifest_urls)
162
 
@@ -169,36 +173,24 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
169
  #
170
  # The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
171
  # file in a TAR archive.
172
-
173
- return [
174
- datasets.SplitGenerator(
175
- name=datasets.Split.TRAIN,
176
- gen_kwargs={
177
- "local_extracted_archive_paths": local_extracted_archive_paths["train"],
178
- # use iter_archive here to access the files in the TAR archives:
179
- "archives": [dl_manager.iter_archive(path) for path in archive_paths["train"]],
180
- "manifest_path": manifest_paths["train"],
181
- },
182
- ),
183
- datasets.SplitGenerator(
184
- name=datasets.Split.VALIDATION,
185
- gen_kwargs={
186
- "local_extracted_archive_paths": local_extracted_archive_paths["validation"],
187
- # use iter_archive here to access the files in the TAR archives:
188
- "archives": [dl_manager.iter_archive(path) for path in archive_paths["validation"]],
189
- "manifest_path": manifest_paths["validation"],
190
- },
191
- ),
192
- datasets.SplitGenerator(
193
- name=datasets.Split.TEST,
194
- gen_kwargs={
195
- "local_extracted_archive_paths": local_extracted_archive_paths["validation"],
196
- # use iter_archive here to access the files in the TAR archives:
197
- "archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
198
- "manifest_path": manifest_paths["test"],
199
- },
200
- ),
201
- ]
202
 
203
  def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path):
204
  meta = dict()
67
  _DATA_URL = _BASE_URL + "{split}/{config}/{config}_{archive_id:06d}.tar"
68
 
69
  # relative path to file containing number of audio archives inside dataset's repo
70
+ _N_SHARDS_URL = _BASE_URL + "n_shards.json"
71
 
72
  # relative path to metadata inside dataset's repo
73
  _MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
74
 
75
 
76
+ class PeoplesSpeechConfig(datasets.BuilderConfig):
77
+
78
+ def __init__(self, *args, **kwargs):
79
+ super.__init__(*args, **kwargs)
80
+
81
+
82
  class PeoplesSpeech(datasets.GeneratorBasedBuilder):
83
  """The People's Speech dataset."""
84
 
85
  VERSION = datasets.Version("1.1.0")
86
  BUILDER_CONFIGS = [
87
+ PeoplesSpeechConfig(name="microset", version=VERSION, description="Small subset of clean data for example pusposes."),
88
+ PeoplesSpeechConfig(name="clean", version=VERSION, description="Clean, CC-BY licensed subset."),
89
+ PeoplesSpeechConfig(name="dirty", version=VERSION, description="Dirty, CC-BY licensed subset."),
90
+ PeoplesSpeechConfig(name="clean_sa", version=VERSION, description="Clean, CC-BY-SA licensed subset."),
91
+ PeoplesSpeechConfig(name="dirty_sa", version=VERSION, description="Dirty, CC-BY-SA licensed subset."),
92
+ PeoplesSpeechConfig(name="test", version=VERSION, description="Only test data."),
93
+ PeoplesSpeechConfig(name="validation", version=VERSION, description="Only validation data."),
94
  ]
95
  DEFAULT_CONFIG_NAME = "clean"
96
+ DEFAULT_WRITER_BATCH_SIZE = 512
97
 
98
  def _info(self):
99
  return datasets.DatasetInfo(
107
  }
108
  ),
109
  task_templates=[AutomaticSpeechRecognition()],
 
110
  homepage=_HOMEPAGE,
111
  license="/".join(_LICENSE), # license must be a string
112
  citation=_CITATION,
113
  )
114
 
 
 
 
 
 
 
 
115
  def _split_generators(self, dl_manager):
116
 
117
  if self.config.name == "microset":
134
  ),
135
  ]
136
 
137
+ n_shards_path = dl_manager.download_and_extract(_N_SHARDS_URL)
138
+ with open(n_shards_path, encoding="utf-8") as f:
139
+ n_shards = json.load(f)
140
+
141
+ if self.config.name in ["validation", "test"]:
142
+ splits_to_configs = {self.config.name: self.config.name}
143
+ else:
144
+ splits_to_configs = {
145
+ "train": self.config.name,
146
+ "validation": "validation",
147
+ "test": "test"
148
+ }
149
 
150
+ audio_urls = {
151
+ split: [
152
+ _DATA_URL.format(split=split, config=config, archive_id=i) for i in range(n_shards[split])
153
+ ] for split, config in splits_to_configs.items()
154
  }
155
+ audio_archive_paths = dl_manager.download(audio_urls)
156
 
157
  # In non-streaming mode, we extract the archives to have the data locally:
158
+ local_extracted_archive_paths = dl_manager.extract(audio_archive_paths) \
159
+ if not dl_manager.is_streaming else \
160
+ {split: [None] * len(audio_archive_paths) for split in splits_to_configs}
 
 
 
161
 
162
  manifest_urls = {
163
+ split: _MANIFEST_URL.format(split=split, config=config) for split, config in splits_to_configs.items()
 
 
164
  }
165
  manifest_paths = dl_manager.download_and_extract(manifest_urls)
166
 
173
  #
174
  # The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
175
  # file in a TAR archive.
176
+ splits_to_names = {
177
+ "train": datasets.Split.TRAIN,
178
+ "validation": datasets.Split.VALIDATION,
179
+ "test": datasets.Split.TEST,
180
+ }
181
+ split_generators = []
182
+ for split in splits_to_configs:
183
+ split_generators.append(
184
+ datasets.SplitGenerator(
185
+ name=splits_to_names[split],
186
+ gen_kwargs={
187
+ "local_extracted_archive_paths": local_extracted_archive_paths[split],
188
+ # use iter_archive here to access the files in the TAR archives:
189
+ "archives": [dl_manager.iter_archive(path) for path in audio_archive_paths[split]],
190
+ "manifest_path": manifest_paths[split],
191
+ }
192
+ )
193
+ )
 
 
 
 
 
 
 
 
 
 
 
 
194
 
195
  def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path):
196
  meta = dict()