Fix dataset script
#6
by
mariosasko
- opened
- VerbaLex_voice.py +8 -3
VerbaLex_voice.py
CHANGED
@@ -95,12 +95,18 @@ class VerbaLexVoiceDataset(datasets.GeneratorBasedBuilder):
|
|
95 |
}
|
96 |
split_generators = []
|
97 |
for split in splits:
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
split_generators.append(
|
99 |
datasets.SplitGenerator(
|
100 |
name=split_names.get(split, split),
|
101 |
gen_kwargs={
|
102 |
-
"local_extracted_archive_paths":
|
103 |
-
"archives": [dl_manager.iter_archive(path) for path in
|
104 |
"meta_path": meta_paths[split]
|
105 |
}
|
106 |
)
|
@@ -109,7 +115,6 @@ class VerbaLexVoiceDataset(datasets.GeneratorBasedBuilder):
|
|
109 |
return split_generators
|
110 |
|
111 |
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
|
112 |
-
datasets.logging.set_verbosity_debug()
|
113 |
data_fields = list(self._info().features.keys())
|
114 |
metadata = {}
|
115 |
with open(meta_path, encoding="UTF-8") as f:
|
|
|
95 |
}
|
96 |
split_generators = []
|
97 |
for split in splits:
|
98 |
+
split_local_extract_archive_paths = local_extracted_archive_paths.get(split)
|
99 |
+
if not isinstance(split_local_extract_archive_paths, list):
|
100 |
+
split_local_extract_archive_paths = [split_local_extract_archive_paths]
|
101 |
+
split_archives = archive_paths.get(split)
|
102 |
+
if not isinstance(split_archives, list):
|
103 |
+
split_archives = [split_archives]
|
104 |
split_generators.append(
|
105 |
datasets.SplitGenerator(
|
106 |
name=split_names.get(split, split),
|
107 |
gen_kwargs={
|
108 |
+
"local_extracted_archive_paths": split_local_extract_archive_paths,
|
109 |
+
"archives": [dl_manager.iter_archive(path) for path in split_archives],
|
110 |
"meta_path": meta_paths[split]
|
111 |
}
|
112 |
)
|
|
|
115 |
return split_generators
|
116 |
|
117 |
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
|
|
|
118 |
data_fields = list(self._info().features.keys())
|
119 |
metadata = {}
|
120 |
with open(meta_path, encoding="UTF-8") as f:
|