albertvillanova HF staff commited on
Commit
26d3d69
1 Parent(s): 18ad137

Simplify code

Browse files
Files changed (1) hide show
  1. progene.py +20 -21
progene.py CHANGED
@@ -24,7 +24,7 @@ from .bigbiohub import kb_features
24
  from .bigbiohub import BigBioConfig
25
  from .bigbiohub import Tasks
26
 
27
- _LANGUAGES = ['English']
28
  _PUBMED = True
29
  _LOCAL = False
30
  _CITATION = """\
@@ -59,7 +59,7 @@ The corpus was developed in the context of the StemNet project (http://www.stemn
59
 
60
  _HOMEPAGE = "https://zenodo.org/record/3698568#.YlVHqdNBxeg"
61
 
62
- _LICENSE = 'Creative Commons Attribution 4.0 International'
63
 
64
  # using custom url: original distribution includes trained models (>25GB) and original dataset license allow for redistribution
65
  _URLS = "data/crossvalidation_data.zip"
@@ -128,25 +128,24 @@ class ProgeneDataset(datasets.GeneratorBasedBuilder):
128
  urls = _URLS
129
  dl_dir = dl_manager.download_and_extract(urls)
130
  dataset_dir = os.path.join(dl_dir, "crossvalidation_data")
131
- dataset_dir = pathlib.Path(dataset_dir)
132
- splits = []
133
- for split_num in range(0, 10):
134
- for file in dataset_dir.joinpath(f"flairSplit{split_num}").iterdir():
135
- if file.name == "train.txt":
136
- split_id = f"split_{split_num}_{datasets.Split.TRAIN}"
137
- elif file.name == "dev.txt":
138
- split_id = f"split_{split_num}_{datasets.Split.VALIDATION}"
139
- else:
140
- split_id = f"split_{split_num}_{datasets.Split.TEST}"
141
-
142
- splits.append(
143
- datasets.SplitGenerator(
144
- name=split_id,
145
- gen_kwargs={"filepath": file, "split_id": split_id},
146
- )
147
- )
148
-
149
- return splits
150
 
151
  def _generate_examples(self, filepath, split_id: str) -> Tuple[int, Dict]:
152
  """Yields examples as (key, example) tuples."""
 
24
  from .bigbiohub import BigBioConfig
25
  from .bigbiohub import Tasks
26
 
27
+ _LANGUAGES = ["English"]
28
  _PUBMED = True
29
  _LOCAL = False
30
  _CITATION = """\
 
59
 
60
  _HOMEPAGE = "https://zenodo.org/record/3698568#.YlVHqdNBxeg"
61
 
62
+ _LICENSE = "Creative Commons Attribution 4.0 International"
63
 
64
  # using custom url: original distribution includes trained models (>25GB) and original dataset license allow for redistribution
65
  _URLS = "data/crossvalidation_data.zip"
 
128
  urls = _URLS
129
  dl_dir = dl_manager.download_and_extract(urls)
130
  dataset_dir = os.path.join(dl_dir, "crossvalidation_data")
131
+ split_filenames = {
132
+ "train": "train.txt",
133
+ "validation": "dev.txt",
134
+ "test": "test.txt",
135
+ }
136
+ return [
137
+ datasets.SplitGenerator(
138
+ name=f"split_{split_num}_{split}",
139
+ gen_kwargs={
140
+ "filepath": os.path.join(
141
+ dataset_dir, f"flairSplit{split_num}", filename
142
+ ),
143
+ "split_id": f"split_{split_num}_{split}",
144
+ },
145
+ )
146
+ for split_num in range(0, 10)
147
+ for split, filename in split_filenames.items()
148
+ ]
 
149
 
150
  def _generate_examples(self, filepath, split_id: str) -> Tuple[int, Dict]:
151
  """Yields examples as (key, example) tuples."""