Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
gabrielaltay albertvillanova HF staff commited on
Commit
38dbd24
·
1 Parent(s): 84dd01e

Fix FileNotFoundError (#2)

Browse files

- Add data file (535377bcdbd49c2342decfc61d0b96e9ffda6a4e)
- Update URL to data file (18ad13762d5437820ccf82872c8db62de20c736f)
- Simplify code (26d3d6986260fa7363382e464953ba9e713625bd)


Co-authored-by: Albert Villanova <albertvillanova@users.noreply.huggingface.co>

Files changed (2) hide show
  1. data/crossvalidation_data.zip +3 -0
  2. progene.py +21 -22
data/crossvalidation_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8745d6b112b5c02510331ae598451edceb014d76edeef63d6fe420ed4d6de5c0
3
+ size 21690976
progene.py CHANGED
@@ -24,7 +24,7 @@ from .bigbiohub import kb_features
24
  from .bigbiohub import BigBioConfig
25
  from .bigbiohub import Tasks
26
 
27
- _LANGUAGES = ['English']
28
  _PUBMED = True
29
  _LOCAL = False
30
  _CITATION = """\
@@ -59,10 +59,10 @@ The corpus was developed in the context of the StemNet project (http://www.stemn
59
 
60
  _HOMEPAGE = "https://zenodo.org/record/3698568#.YlVHqdNBxeg"
61
 
62
- _LICENSE = 'Creative Commons Attribution 4.0 International'
63
 
64
  # using custom url: original distribution includes trained models (>25GB) and original dataset license allow for redistribution
65
- _URLS = "https://huggingface.co/datasets/bigscience-biomedical/progene/resolve/main/crossvalidation_data.zip"
66
 
67
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
68
 
@@ -128,25 +128,24 @@ class ProgeneDataset(datasets.GeneratorBasedBuilder):
128
  urls = _URLS
129
  dl_dir = dl_manager.download_and_extract(urls)
130
  dataset_dir = os.path.join(dl_dir, "crossvalidation_data")
131
- dataset_dir = pathlib.Path(dataset_dir)
132
- splits = []
133
- for split_num in range(0, 10):
134
- for file in dataset_dir.joinpath(f"flairSplit{split_num}").iterdir():
135
- if file.name == "train.txt":
136
- split_id = f"split_{split_num}_{datasets.Split.TRAIN}"
137
- elif file.name == "dev.txt":
138
- split_id = f"split_{split_num}_{datasets.Split.VALIDATION}"
139
- else:
140
- split_id = f"split_{split_num}_{datasets.Split.TEST}"
141
-
142
- splits.append(
143
- datasets.SplitGenerator(
144
- name=split_id,
145
- gen_kwargs={"filepath": file, "split_id": split_id},
146
- )
147
- )
148
-
149
- return splits
150
 
151
  def _generate_examples(self, filepath, split_id: str) -> Tuple[int, Dict]:
152
  """Yields examples as (key, example) tuples."""
 
24
  from .bigbiohub import BigBioConfig
25
  from .bigbiohub import Tasks
26
 
27
+ _LANGUAGES = ["English"]
28
  _PUBMED = True
29
  _LOCAL = False
30
  _CITATION = """\
 
59
 
60
  _HOMEPAGE = "https://zenodo.org/record/3698568#.YlVHqdNBxeg"
61
 
62
+ _LICENSE = "Creative Commons Attribution 4.0 International"
63
 
64
  # using custom url: original distribution includes trained models (>25GB) and original dataset license allow for redistribution
65
+ _URLS = "data/crossvalidation_data.zip"
66
 
67
  _SUPPORTED_TASKS = [Tasks.NAMED_ENTITY_RECOGNITION]
68
 
 
128
  urls = _URLS
129
  dl_dir = dl_manager.download_and_extract(urls)
130
  dataset_dir = os.path.join(dl_dir, "crossvalidation_data")
131
+ split_filenames = {
132
+ "train": "train.txt",
133
+ "validation": "dev.txt",
134
+ "test": "test.txt",
135
+ }
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=f"split_{split_num}_{split}",
139
+ gen_kwargs={
140
+ "filepath": os.path.join(
141
+ dataset_dir, f"flairSplit{split_num}", filename
142
+ ),
143
+ "split_id": f"split_{split_num}_{split}",
144
+ },
145
+ )
146
+ for split_num in range(0, 10)
147
+ for split, filename in split_filenames.items()
148
+ ]
 
149
 
150
  def _generate_examples(self, filepath, split_id: str) -> Tuple[int, Dict]:
151
  """Yields examples as (key, example) tuples."""