Datasets:
Tasks:
Fill-Mask
Sub-tasks:
masked-language-modeling
Languages:
English
Size:
10M<n<100M
ArXiv:
License:
Peter Henderson
commited on
Commit
•
8006849
1
Parent(s):
5326fd6
Fix script to load chunked data source
Browse files- pile-of-law.py +5 -3
pile-of-law.py
CHANGED
@@ -75,9 +75,11 @@ class PileOfLaw(datasets.GeneratorBasedBuilder):
|
|
75 |
else:
|
76 |
data_sources = [self.config.name]
|
77 |
for split in ["train", "validation"]:
|
78 |
-
data_urls[split] = [
|
79 |
-
|
80 |
-
|
|
|
|
|
81 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
82 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
83 |
return [
|
|
|
75 |
else:
|
76 |
data_sources = [self.config.name]
|
77 |
for split in ["train", "validation"]:
|
78 |
+
data_urls[split] = []
|
79 |
+
for source in data_sources:
|
80 |
+
for chunk in _DATA_URL[source][split]:
|
81 |
+
data_urls[split].append(chunk)
|
82 |
+
|
83 |
train_downloaded_files = dl_manager.download(data_urls["train"])
|
84 |
validation_downloaded_files = dl_manager.download(data_urls["validation"])
|
85 |
return [
|