pierreguillou commited on
Commit
048e3df
1 Parent(s): e6b0f00

Update DocLayNet-large.py

Browse files
Files changed (1) hide show
  1. DocLayNet-large.py +30 -28
DocLayNet-large.py CHANGED
@@ -164,47 +164,49 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
164
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
165
 
166
  archive_path = dl_manager.download_and_extract(_URLs)
167
-
168
- downloaded_file = dl_manager.download_and_extract(archive_path["part_dataset_0"])
169
- downloaded_file = dl_manager.download_and_extract("part_dataset_0")
170
- # downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_2.zip")
171
- # downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_3.zip")
172
-
173
- return [
174
- datasets.SplitGenerator(
175
  name=datasets.Split.TRAIN,
176
  # These kwargs will be passed to _generate_examples
177
  gen_kwargs={
178
- "filepath_0": f"{archive_path["part_dataset_0"]}/large_dataset/train/",
179
- "filepath_1": f"{archive_path["part_dataset_1"]}/large_dataset/train/",
180
- "filepath_2": f"{archive_path["part_dataset_2"]}/large_dataset/train/",
181
- "filepath_3": f"{archive_path["part_dataset_3"]}/large_dataset/train/",
182
  "split": "train",
183
  },
184
- ),
185
- datasets.SplitGenerator(
 
186
  name=datasets.Split.VALIDATION,
187
  # These kwargs will be passed to _generate_examples
188
  gen_kwargs={
189
- "filepath_0": f"{archive_path["part_dataset_0"]}/large_dataset/val/",
190
- "filepath_1": f"{archive_path["part_dataset_1"]}/large_dataset/val/",
191
- "filepath_2": f"{archive_path["part_dataset_2"]}/large_dataset/val/",
192
- "filepath_3": f"{archive_path["part_dataset_3"]}/large_dataset/val/",
193
- "split": "dev",
194
  },
195
- ),
196
- datasets.SplitGenerator(
 
197
  name=datasets.Split.TEST,
198
  # These kwargs will be passed to _generate_examples
199
  gen_kwargs={
200
- "filepath_0": f"{archive_path["part_dataset_0"]}/large_dataset/test/",
201
- "filepath_1": f"{archive_path["part_dataset_1"]}/large_dataset/test/",
202
- "filepath_2": f"{archive_path["part_dataset_2"]}/large_dataset/test/",
203
- "filepath_3": f"{archive_path["part_dataset_3"]}/large_dataset/test/",
204
- "split": "test"
205
  },
206
- ),
207
- ]
 
 
 
 
208
 
209
  def _generate_examples(self, filepath_0, filepath_1, filepath_2, filepath_3, split):
210
  filepath = (filepath_0, filepath_1, filepath_2, filepath_3)
 
164
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
165
 
166
  archive_path = dl_manager.download_and_extract(_URLs)
167
+ splits = []
168
+ for split in self.config.splits:
169
+ if split == "train":
170
+ dataset = datasets.SplitGenerator(
 
 
 
 
171
  name=datasets.Split.TRAIN,
172
  # These kwargs will be passed to _generate_examples
173
  gen_kwargs={
174
+ "filepath_0": os.path.join(archive_path["part_dataset_0"], "part_dataset_0/train/"),
175
+ "filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/train/"),
176
+ "filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/train/"),
177
+ "filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/train/"),
178
  "split": "train",
179
  },
180
+ )
181
+ elif split in ["val", "valid", "validation", "dev"]:
182
+ dataset = datasets.SplitGenerator(
183
  name=datasets.Split.VALIDATION,
184
  # These kwargs will be passed to _generate_examples
185
  gen_kwargs={
186
+ "filepath_0": os.path.join(archive_path["part_dataset_0"], "part_dataset_0/val/"),
187
+ "filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/val/"),
188
+ "filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/val/"),
189
+ "filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/val/"),
190
+ "split": "val",
191
  },
192
+ )
193
+ elif split in ["test"]:
194
+ dataset = datasets.SplitGenerator(
195
  name=datasets.Split.TEST,
196
  # These kwargs will be passed to _generate_examples
197
  gen_kwargs={
198
+ "filepath_0": os.path.join(archive_path["part_dataset_0"], "part_dataset_0/test/"),
199
+ "filepath_1": os.path.join(archive_path["part_dataset_1"], "part_dataset_1/test/"),
200
+ "filepath_2": os.path.join(archive_path["part_dataset_2"], "part_dataset_2/test/"),
201
+ "filepath_3": os.path.join(archive_path["part_dataset_3"], "part_dataset_3/test/"),
202
+ "split": "test",
203
  },
204
+ )
205
+ else:
206
+ continue
207
+
208
+ splits.append(dataset)
209
+ return splits
210
 
211
  def _generate_examples(self, filepath_0, filepath_1, filepath_2, filepath_3, split):
212
  filepath = (filepath_0, filepath_1, filepath_2, filepath_3)