gabrielaltay albertvillanova HF staff commited on
Commit
063509a
1 Parent(s): 27b7ef9

Fix ValueError: Cannot seek streaming HTTP file (#2)

Browse files

- Host data files (e3e29cd6cae75512a7a03ac94cf38518ba471d97)
- Update loading script with hosted data files (5c9738f78d1315640e90e264e9c07b67dba1acef)


Co-authored-by: Albert Villanova <albertvillanova@users.noreply.huggingface.co>

Files changed (4) hide show
  1. bionlp_st_2011_ge.py +33 -29
  2. data/devel.zip +3 -0
  3. data/test.zip +3 -0
  4. data/train.zip +3 -0
bionlp_st_2011_ge.py CHANGED
@@ -72,8 +72,9 @@ _HOMEPAGE = "https://sites.google.com/site/bionlpst/bionlp-shared-task-2011/geni
72
  _LICENSE = 'Creative Commons Attribution 3.0 Unported'
73
 
74
  _URLs = {
75
- "source": "https://github.com/openbiocorpora/bionlp-st-2011-ge/archive/refs/heads/master.zip",
76
- "bigbio_kb": "https://github.com/openbiocorpora/bionlp-st-2011-ge/archive/refs/heads/master.zip",
 
77
  }
78
 
79
  _SUPPORTED_TASKS = [
@@ -215,26 +216,20 @@ class bionlp_st_2011_ge(datasets.GeneratorBasedBuilder):
215
  self, dl_manager: datasets.DownloadManager
216
  ) -> List[datasets.SplitGenerator]:
217
 
218
- my_urls = _URLs[self.config.schema]
219
- data_dir = Path(dl_manager.download_and_extract(my_urls))
220
- data_files = {
221
- "train": data_dir / f"bionlp-st-2011-ge-master" / "original-data" / "train",
222
- "dev": data_dir / f"bionlp-st-2011-ge-master" / "original-data" / "devel",
223
- "test": data_dir / f"bionlp-st-2011-ge-master" / "original-data" / "test",
224
- }
225
 
226
  return [
227
  datasets.SplitGenerator(
228
  name=datasets.Split.TRAIN,
229
- gen_kwargs={"data_files": data_files["train"]},
230
  ),
231
  datasets.SplitGenerator(
232
  name=datasets.Split.VALIDATION,
233
- gen_kwargs={"data_files": data_files["dev"]},
234
  ),
235
  datasets.SplitGenerator(
236
  name=datasets.Split.TEST,
237
- gen_kwargs={"data_files": data_files["test"]},
238
  ),
239
  ]
240
 
@@ -248,20 +243,29 @@ class bionlp_st_2011_ge(datasets.GeneratorBasedBuilder):
248
  return kb_example
249
 
250
  def _generate_examples(self, data_files: Path):
251
- if self.config.schema == "source":
252
- txt_files = list(data_files.glob("*txt"))
253
- for guid, txt_file in enumerate(txt_files):
254
- example = parse_brat_file(txt_file)
255
- example["id"] = str(guid)
256
- yield guid, example
257
- elif self.config.schema == "bigbio_kb":
258
- txt_files = list(data_files.glob("*txt"))
259
- for guid, txt_file in enumerate(txt_files):
260
- example = brat_parse_to_bigbio_kb(
261
- parse_brat_file(txt_file)
262
- )
263
- example = self._standardize_arguments_roles(example)
264
- example["id"] = str(guid)
265
- yield guid, example
266
- else:
267
- raise ValueError(f"Invalid config: {self.config.name}")
 
 
 
 
 
 
 
 
 
72
  _LICENSE = 'Creative Commons Attribution 3.0 Unported'
73
 
74
  _URLs = {
75
+ "train": "data/train.zip",
76
+ "validation": "data/devel.zip",
77
+ "test": "data/test.zip",
78
  }
79
 
80
  _SUPPORTED_TASKS = [
216
  self, dl_manager: datasets.DownloadManager
217
  ) -> List[datasets.SplitGenerator]:
218
 
219
+ data_files = dl_manager.download_and_extract(_URLs)
 
 
 
 
 
 
220
 
221
  return [
222
  datasets.SplitGenerator(
223
  name=datasets.Split.TRAIN,
224
+ gen_kwargs={"data_files": dl_manager.iter_files(data_files["train"])},
225
  ),
226
  datasets.SplitGenerator(
227
  name=datasets.Split.VALIDATION,
228
+ gen_kwargs={"data_files": dl_manager.iter_files(data_files["validation"])},
229
  ),
230
  datasets.SplitGenerator(
231
  name=datasets.Split.TEST,
232
+ gen_kwargs={"data_files": dl_manager.iter_files(data_files["test"])},
233
  ),
234
  ]
235
 
243
  return kb_example
244
 
245
  def _generate_examples(self, data_files: Path):
246
+
247
+ if self.config.schema == "source":
248
+ guid = 0
249
+ for data_file in data_files:
250
+ txt_file = Path(data_file)
251
+ if txt_file.suffix != ".txt":
252
+ continue
253
+ example = parse_brat_file(txt_file)
254
+ example["id"] = str(guid)
255
+ yield guid, example
256
+ guid += 1
257
+ elif self.config.schema == "bigbio_kb":
258
+ guid = 0
259
+ for data_file in data_files:
260
+ txt_file = Path(data_file)
261
+ if txt_file.suffix != ".txt":
262
+ continue
263
+ example = brat_parse_to_bigbio_kb(
264
+ parse_brat_file(txt_file)
265
+ )
266
+ example = self._standardize_arguments_roles(example)
267
+ example["id"] = str(guid)
268
+ yield guid, example
269
+ guid += 1
270
+ else:
271
+ raise ValueError(f"Invalid config: {self.config.name}")
data/devel.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bec828262ad26b572fbcfe6349ac0da925b4518fac1376bee037132db2f38ce5
3
+ size 1150649
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0bb6a3fedc5a6aa1a890defc541c10cffd65f118854f8d9cb21266db78c598
3
+ size 458328
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e4537170061741b025d3118dc257da42207375517dcbf9ebcbcba7a1db542b6
3
+ size 1601419