Chris Oswald commited on
Commit
61fd3fa
1 Parent(s): b64741a

initial commit

Browse files
Files changed (1) hide show
  1. SPIDER.py +236 -0
SPIDER.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ from typing import Dict, List, Optional, Set
22
+
23
+ import numpy as np
24
+
25
+ import datasets
26
+
27
+
28
+ # TODO: Add BibTeX citation
29
+ # Find for instance the citation on arxiv or on the dataset repo/website
30
+ _CITATION = """\
31
+ @InProceedings{huggingface:dataset,
32
+ title = {A great new dataset},
33
+ author={huggingface, Inc.
34
+ },
35
+ year={2020}
36
+ }
37
+ """
38
+
39
+ # TODO: Add description of the dataset here
40
+ # You can copy an official description
41
+ _DESCRIPTION = """\
42
+ This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
43
+ """
44
+
45
+ _HOMEPAGE = "https://zenodo.org/records/10159290"
46
+
47
+ _LICENSE = """Creative Commons Attribution 4.0 International License \
48
+ (https://creativecommons.org/licenses/by/4.0/legalcode)"""
49
+
50
+ # TODO: Add link to the official dataset URLs here
51
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _URLS = {
54
+ "first_domain": {
55
+ "images":"https://zenodo.org/records/10159290/files/images.zip",
56
+ "masks":"https://zenodo.org/records/10159290/files/masks.zip",
57
+ "overview":"https://zenodo.org/records/10159290/files/overview.csv",
58
+ "gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv",
59
+ }
60
+ }
61
+
62
+ class SPIDER(datasets.GeneratorBasedBuilder):
63
+ """TODO: Short description of my dataset."""
64
+
65
+ VERSION = datasets.Version("1.1.0")
66
+
67
+ # This is an example of a dataset with multiple configurations.
68
+ # If you don't want/need to define several sub-sets in your dataset,
69
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
70
+
71
+ # If you need to make complex sub-parts in the datasets with configurable options
72
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
73
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
74
+
75
+ # You will be able to load one or the other configurations in the following list with
76
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
77
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
78
+ BUILDER_CONFIGS = [
79
+ datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
80
+ datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
81
+ ]
82
+
83
+ DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
84
+
85
+ def _info(self):
86
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
87
+ if self.config.name == "first_domain": # This is the name of the configuration selected in BUILDER_CONFIGS above
88
+ features = datasets.Features(
89
+ {
90
+ "sentence": datasets.Value("string"),
91
+ "option1": datasets.Value("string"),
92
+ "answer": datasets.Value("string")
93
+ # These are the features of your dataset like images, labels ...
94
+ }
95
+ )
96
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
97
+ features = datasets.Features(
98
+ {
99
+ "sentence": datasets.Value("string"),
100
+ "option2": datasets.Value("string"),
101
+ "second_domain_answer": datasets.Value("string")
102
+ # These are the features of your dataset like images, labels ...
103
+ }
104
+ )
105
+ return datasets.DatasetInfo(
106
+ # This is the description that will appear on the datasets page.
107
+ description=_DESCRIPTION,
108
+ # This defines the different columns of the dataset and their types
109
+ features=features, # Here we define them above because they are different between the two configurations
110
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
111
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
112
+ # supervised_keys=("sentence", "label"),
113
+ # Homepage of the dataset for documentation
114
+ homepage=_HOMEPAGE,
115
+ # License for the dataset if available
116
+ license=_LICENSE,
117
+ # Citation for the dataset
118
+ citation=_CITATION,
119
+ )
120
+
121
+ def _split_generators(self, dl_manager):
122
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
123
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
124
+
125
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
126
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
127
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
128
+ urls = _URLS[self.config.name]
129
+ paths_dict = dl_manager.download_and_extract(urls)
130
+
131
+ return [
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split.TRAIN,
134
+ # These kwargs will be passed to _generate_examples
135
+ gen_kwargs={
136
+ "paths_dict": paths_dict,
137
+ "split": "train",
138
+ },
139
+ ),
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.VALIDATION,
142
+ # These kwargs will be passed to _generate_examples
143
+ gen_kwargs={
144
+ "paths_dict": paths_dict,
145
+ "split": "dev",
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.TEST,
150
+ # These kwargs will be passed to _generate_examples
151
+ gen_kwargs={
152
+ "paths_dict": paths_dict,
153
+ "split": "test"
154
+ },
155
+ ),
156
+ ]
157
+
158
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
159
+ def _generate_examples(self, paths_dict, split):
160
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
161
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
162
+
163
+ # Generate train/validate/test partitions of patient IDs
164
+ np.random.seed(9999)
165
+ N_PATIENTS = 257 #TODO: make hardcoded values dynamic
166
+ VALIDATE_SHARE = 0.3
167
+ TEST_SHARE = 0.2
168
+ TRAIN_SHARE = (1.0 - VALIDATE_SHARE - TEST_SHARE)
169
+ partition = np.random.choice(
170
+ ['train', 'dev', 'test'],
171
+ p=[TRAIN_SHARE, VALIDATE_SHARE, TEST_SHARE],
172
+ size=N_PATIENTS,
173
+ )
174
+ patient_ids = (np.arange(N_PATIENTS) + 1)
175
+ train_ids = set(patient_ids[partition == 'train'])
176
+ validate_ids = set(patient_ids[partition == 'dev'])
177
+ test_ids = set(patient_ids[partition == 'test'])
178
+ assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS
179
+
180
+
181
+
182
+ # Import patient/scanner data and radiological gradings data
183
+ overview_data = import_csv_data(paths_dict['overview'])
184
+ grades_data = import_csv_data(paths_dict['gradings'])
185
+
186
+ # Import image and mask data
187
+ image_files = [
188
+ file for file in os.listdir(os.path.join(paths_dict['images'], 'images'))
189
+ if file.endswith('.mha')
190
+ ]
191
+ assert len(image_files) > 0, "No image files found--check directory path."
192
+
193
+ mask_files = [
194
+ file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks'))
195
+ if file.endswith('.mha')
196
+ ]
197
+ assert len(mask_files) > 0, "No mask files found--check directory path."
198
+
199
+ images = []
200
+ masks = []
201
+ if split == 'train':
202
+ for patient_id in train_ids:
203
+
204
+
205
+ elif split == 'validate':
206
+
207
+ elif split == 'test':
208
+
209
+
210
+ def import_csv_data(filepath: str) -> List[Dict[str, str]]:
211
+ """Import all rows of CSV file."""
212
+ results = []
213
+ with open(filepath, encoding='utf-8') as f:
214
+ reader = csv.DictReader(f)
215
+ for line in reader:
216
+ results.append(line)
217
+ return results
218
+
219
+
220
+
221
+ with open(filepath, encoding="utf-8") as f:
222
+ for key, row in enumerate(f):
223
+ data = json.loads(row)
224
+ if self.config.name == "first_domain":
225
+ # Yields examples as (key, example) tuples
226
+ yield key, {
227
+ "sentence": data["sentence"],
228
+ "option1": data["option1"],
229
+ "answer": "" if split == "test" else data["answer"],
230
+ }
231
+ else:
232
+ yield key, {
233
+ "sentence": data["sentence"],
234
+ "option2": data["option2"],
235
+ "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
236
+ }