Chris Oswald
commited on
Commit
•
02961c5
1
Parent(s):
d004e7b
added custom config options
Browse files
SPIDER.py
CHANGED
@@ -18,7 +18,7 @@
|
|
18 |
import csv
|
19 |
import json
|
20 |
import os
|
21 |
-
from typing import Dict, List, Optional, Set, Tuple
|
22 |
|
23 |
import numpy as np
|
24 |
|
@@ -62,14 +62,27 @@ _LICENSE = """Creative Commons Attribution 4.0 International License \
|
|
62 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
63 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
64 |
_URLS = {
|
65 |
-
"
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
"gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv",
|
70 |
-
}
|
71 |
}
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
class SPIDER(datasets.GeneratorBasedBuilder):
|
74 |
"""TODO: Short description of my dataset."""
|
75 |
|
@@ -81,38 +94,54 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
81 |
|
82 |
# If you need to make complex sub-parts in the datasets with configurable options
|
83 |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
84 |
-
|
85 |
|
86 |
# You will be able to load one or the other configurations in the following list with
|
87 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
88 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
89 |
BUILDER_CONFIGS = [
|
90 |
-
|
91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
]
|
93 |
|
94 |
-
DEFAULT_CONFIG_NAME = "
|
95 |
|
96 |
def _info(self):
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
"sentence": datasets.Value("string"),
|
111 |
-
"option2": datasets.Value("string"),
|
112 |
-
"second_domain_answer": datasets.Value("string")
|
113 |
-
# These are the features of your dataset like images, labels ...
|
114 |
-
}
|
115 |
-
)
|
116 |
return datasets.DatasetInfo(
|
117 |
# This is the description that will appear on the datasets page.
|
118 |
description=_DESCRIPTION,
|
@@ -130,15 +159,16 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
130 |
)
|
131 |
|
132 |
def _split_generators(self, dl_manager):
|
133 |
-
# TODO: This method is tasked with downloading/extracting the data
|
134 |
-
#
|
|
|
|
|
135 |
|
136 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
137 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
138 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
return [
|
143 |
datasets.SplitGenerator(
|
144 |
name=datasets.Split.TRAIN,
|
@@ -146,6 +176,7 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
146 |
gen_kwargs={
|
147 |
"paths_dict": paths_dict,
|
148 |
"split": "train",
|
|
|
149 |
},
|
150 |
),
|
151 |
datasets.SplitGenerator(
|
@@ -153,7 +184,8 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
153 |
# These kwargs will be passed to _generate_examples
|
154 |
gen_kwargs={
|
155 |
"paths_dict": paths_dict,
|
156 |
-
"split": "
|
|
|
157 |
},
|
158 |
),
|
159 |
datasets.SplitGenerator(
|
@@ -161,7 +193,8 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
161 |
# These kwargs will be passed to _generate_examples
|
162 |
gen_kwargs={
|
163 |
"paths_dict": paths_dict,
|
164 |
-
"split": "test"
|
|
|
165 |
},
|
166 |
),
|
167 |
]
|
@@ -170,8 +203,8 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
170 |
def _generate_examples(
|
171 |
self,
|
172 |
paths_dict: Dict[str, str],
|
|
|
173 |
scan_types: List[str] = ['t1', 't2', 't2_SPACE'],
|
174 |
-
split: str = 'train',
|
175 |
validate_share: float = 0.3,
|
176 |
test_share: float = 0.2,
|
177 |
raw_image: bool = True,
|
@@ -186,15 +219,24 @@ class SPIDER(datasets.GeneratorBasedBuilder):
|
|
186 |
(tfds) and is not important in itself, but must be unique for each example.
|
187 |
|
188 |
Args
|
189 |
-
paths_dict
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
|
199 |
Yields
|
200 |
Tuple (unique patient-scan ID, dict of
|
|
|
18 |
import csv
|
19 |
import json
|
20 |
import os
|
21 |
+
from typing import Dict, List, Mapping, Optional, Set, Sequence, Tuple, Union
|
22 |
|
23 |
import numpy as np
|
24 |
|
|
|
62 |
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
63 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
64 |
_URLS = {
|
65 |
+
"images":"https://zenodo.org/records/10159290/files/images.zip",
|
66 |
+
"masks":"https://zenodo.org/records/10159290/files/masks.zip",
|
67 |
+
"overview":"https://zenodo.org/records/10159290/files/overview.csv",
|
68 |
+
"gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv",
|
|
|
|
|
69 |
}
|
70 |
|
71 |
+
class CustomBuilderConfig(datasets.BuilderConfig):
|
72 |
+
|
73 |
+
def __init__(
|
74 |
+
self,
|
75 |
+
name: str = 'default',
|
76 |
+
version: str = '0.0.0',
|
77 |
+
data_dir: Optional[str] = None,
|
78 |
+
data_files: Optional[Union[str, Sequence, Mapping]] = None,
|
79 |
+
description: Optional[str] = None,
|
80 |
+
scan_types: List[str] = ['t1', 't2', 't2_SPACE'],
|
81 |
+
):
|
82 |
+
super().__init__(name, version, data_dir, data_files, description)
|
83 |
+
self.scan_types = scan_types
|
84 |
+
|
85 |
+
|
86 |
class SPIDER(datasets.GeneratorBasedBuilder):
|
87 |
"""TODO: Short description of my dataset."""
|
88 |
|
|
|
94 |
|
95 |
# If you need to make complex sub-parts in the datasets with configurable options
|
96 |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
97 |
+
BUILDER_CONFIG_CLASS = CustomBuilderConfig
|
98 |
|
99 |
# You will be able to load one or the other configurations in the following list with
|
100 |
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
101 |
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
102 |
BUILDER_CONFIGS = [
|
103 |
+
CustomBuilderConfig(
|
104 |
+
name="all_scan_types",
|
105 |
+
version=VERSION,
|
106 |
+
description="Use images of all scan types (t1, t2, t2 SPACE)",
|
107 |
+
scan_types=['t1', 't2', 't2_SPACE'],
|
108 |
+
),
|
109 |
+
CustomBuilderConfig(
|
110 |
+
name="t1_scan_types",
|
111 |
+
version=VERSION,
|
112 |
+
description="Use images of t1 scan types only",
|
113 |
+
scan_types=['t1'],
|
114 |
+
),
|
115 |
+
CustomBuilderConfig(
|
116 |
+
name="t2_scan_types",
|
117 |
+
version=VERSION,
|
118 |
+
description="Use images of t2 scan types only",
|
119 |
+
scan_types=['t2'],
|
120 |
+
),
|
121 |
+
CustomBuilderConfig(
|
122 |
+
name="t2_SPACE_scan_types",
|
123 |
+
version=VERSION,
|
124 |
+
description="Use images of t2 SPACE scan types only",
|
125 |
+
scan_types=['t2_SPACE'],
|
126 |
+
),
|
127 |
]
|
128 |
|
129 |
+
DEFAULT_CONFIG_NAME = "all_scan_types"
|
130 |
|
131 |
def _info(self):
|
132 |
+
"""
|
133 |
+
This method specifies the datasets.DatasetInfo object which contains
|
134 |
+
informations and typings for the dataset.
|
135 |
+
"""
|
136 |
+
features = datasets.Features(
|
137 |
+
{
|
138 |
+
"sentence": datasets.Value("string"),
|
139 |
+
"option1": datasets.Value("string"),
|
140 |
+
"answer": datasets.Value("string")
|
141 |
+
# These are the features of your dataset like images, labels ...
|
142 |
+
}
|
143 |
+
)
|
144 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
return datasets.DatasetInfo(
|
146 |
# This is the description that will appear on the datasets page.
|
147 |
description=_DESCRIPTION,
|
|
|
159 |
)
|
160 |
|
161 |
def _split_generators(self, dl_manager):
|
162 |
+
# TODO: This method is tasked with downloading/extracting the data
|
163 |
+
# and defining the splits depending on the configuration
|
164 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS),
|
165 |
+
# the configuration selected by the user is in self.config.name
|
166 |
|
167 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
168 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
169 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
170 |
+
paths_dict = dl_manager.download_and_extract(_URLS)
|
171 |
+
scan_types = self.config.scan_types
|
|
|
172 |
return [
|
173 |
datasets.SplitGenerator(
|
174 |
name=datasets.Split.TRAIN,
|
|
|
176 |
gen_kwargs={
|
177 |
"paths_dict": paths_dict,
|
178 |
"split": "train",
|
179 |
+
"scan_types": scan_types,
|
180 |
},
|
181 |
),
|
182 |
datasets.SplitGenerator(
|
|
|
184 |
# These kwargs will be passed to _generate_examples
|
185 |
gen_kwargs={
|
186 |
"paths_dict": paths_dict,
|
187 |
+
"split": "validate",
|
188 |
+
"scan_types": scan_types,
|
189 |
},
|
190 |
),
|
191 |
datasets.SplitGenerator(
|
|
|
193 |
# These kwargs will be passed to _generate_examples
|
194 |
gen_kwargs={
|
195 |
"paths_dict": paths_dict,
|
196 |
+
"split": "test",
|
197 |
+
"scan_types": scan_types,
|
198 |
},
|
199 |
),
|
200 |
]
|
|
|
203 |
def _generate_examples(
|
204 |
self,
|
205 |
paths_dict: Dict[str, str],
|
206 |
+
split: str = 'train',
|
207 |
scan_types: List[str] = ['t1', 't2', 't2_SPACE'],
|
|
|
208 |
validate_share: float = 0.3,
|
209 |
test_share: float = 0.2,
|
210 |
raw_image: bool = True,
|
|
|
219 |
(tfds) and is not important in itself, but must be unique for each example.
|
220 |
|
221 |
Args
|
222 |
+
paths_dict: mapping of data element name to temporary file location
|
223 |
+
split: specify training, validation, or testing set;
|
224 |
+
options = 'train', 'validate', OR 'test'
|
225 |
+
scan_types: list of sagittal scan types to use in examples;
|
226 |
+
options = ['t1', 't2', 't2_SPACE']
|
227 |
+
validate_share: float indicating share of data to use for validation;
|
228 |
+
must be in range (0.0, 1.0); note that training share is
|
229 |
+
calculated as (1 - validate_share - test_share)
|
230 |
+
test_share: float indicating share of data to use for testing;
|
231 |
+
must be in range (0.0, 1.0); note that training share is
|
232 |
+
calculated as (1 - validate_share - test_share)
|
233 |
+
raw_image: indicates whether to include .mha image file in example
|
234 |
+
numeric_array: indicates whether to include numpy numeric array of
|
235 |
+
image in example
|
236 |
+
metadata: indicates whether to include patient and scanner metadata
|
237 |
+
with image example
|
238 |
+
rad_gradings: indicates whether to include patient's radiological
|
239 |
+
gradings with image example
|
240 |
|
241 |
Yields
|
242 |
Tuple (unique patient-scan ID, dict of
|