Datasets:
Convert the dataset to streaming mode.
Browse files- MASSIVE.py +16 -11
- test_MASSIVE.py +1 -1
MASSIVE.py
CHANGED
@@ -322,13 +322,13 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
322 |
|
323 |
def _split_generators(self, dl_manager):
|
324 |
|
325 |
-
|
326 |
|
327 |
return [
|
328 |
datasets.SplitGenerator(
|
329 |
name=datasets.Split.TRAIN,
|
330 |
gen_kwargs={
|
331 |
-
"
|
332 |
"split": "train",
|
333 |
"lang": self.config.name,
|
334 |
}
|
@@ -336,7 +336,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
336 |
datasets.SplitGenerator(
|
337 |
name=datasets.Split.VALIDATION,
|
338 |
gen_kwargs={
|
339 |
-
"
|
340 |
"split": "dev",
|
341 |
"lang": self.config.name,
|
342 |
}
|
@@ -344,7 +344,7 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
344 |
datasets.SplitGenerator(
|
345 |
name=datasets.Split.TEST,
|
346 |
gen_kwargs={
|
347 |
-
"
|
348 |
"split": "test",
|
349 |
"lang": self.config.name,
|
350 |
}
|
@@ -400,25 +400,30 @@ class MASSIVE(datasets.GeneratorBasedBuilder):
|
|
400 |
|
401 |
return tokens, tags
|
402 |
|
403 |
-
def _generate_examples(self,
|
404 |
|
405 |
key_ = 0
|
406 |
|
407 |
if lang == "all":
|
408 |
-
lang = _LANGUAGE_PAIRS
|
409 |
else:
|
410 |
lang = [lang]
|
411 |
|
412 |
logger.info("⏳ Generating examples from = %s", ", ".join(lang))
|
413 |
|
414 |
-
for
|
415 |
|
416 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
417 |
|
418 |
# Read the file
|
419 |
-
|
420 |
-
lines = f.read().split("\n")
|
421 |
-
f.close()
|
422 |
|
423 |
for line in lines:
|
424 |
|
|
|
322 |
|
323 |
def _split_generators(self, dl_manager):
|
324 |
|
325 |
+
archive = dl_manager.download(_URL)
|
326 |
|
327 |
return [
|
328 |
datasets.SplitGenerator(
|
329 |
name=datasets.Split.TRAIN,
|
330 |
gen_kwargs={
|
331 |
+
"files": dl_manager.iter_archive(archive),
|
332 |
"split": "train",
|
333 |
"lang": self.config.name,
|
334 |
}
|
|
|
336 |
datasets.SplitGenerator(
|
337 |
name=datasets.Split.VALIDATION,
|
338 |
gen_kwargs={
|
339 |
+
"files": dl_manager.iter_archive(archive),
|
340 |
"split": "dev",
|
341 |
"lang": self.config.name,
|
342 |
}
|
|
|
344 |
datasets.SplitGenerator(
|
345 |
name=datasets.Split.TEST,
|
346 |
gen_kwargs={
|
347 |
+
"files": dl_manager.iter_archive(archive),
|
348 |
"split": "test",
|
349 |
"lang": self.config.name,
|
350 |
}
|
|
|
400 |
|
401 |
return tokens, tags
|
402 |
|
403 |
+
def _generate_examples(self, files, split, lang):
|
404 |
|
405 |
key_ = 0
|
406 |
|
407 |
if lang == "all":
|
408 |
+
lang = _LANGUAGE_PAIRS.copy()
|
409 |
else:
|
410 |
lang = [lang]
|
411 |
|
412 |
logger.info("⏳ Generating examples from = %s", ", ".join(lang))
|
413 |
|
414 |
+
for path, f in files:
|
415 |
|
416 |
+
l = path.split("1.0/data/")[-1].split(".jsonl")[0]
|
417 |
+
|
418 |
+
if not lang:
|
419 |
+
break
|
420 |
+
elif l in lang:
|
421 |
+
lang.remove(l)
|
422 |
+
else:
|
423 |
+
continue
|
424 |
|
425 |
# Read the file
|
426 |
+
lines = f.read().decode(encoding="utf-8").split("\n")
|
|
|
|
|
427 |
|
428 |
for line in lines:
|
429 |
|
test_MASSIVE.py
CHANGED
@@ -19,7 +19,7 @@ dataset = load_dataset(source, "all")
|
|
19 |
print(dataset)
|
20 |
|
21 |
# print(dataset[0])
|
22 |
-
f = dataset["
|
23 |
print(f)
|
24 |
|
25 |
# tags = []
|
|
|
19 |
print(dataset)
|
20 |
|
21 |
# print(dataset[0])
|
22 |
+
f = dataset["validation"][0]
|
23 |
print(f)
|
24 |
|
25 |
# tags = []
|