SPIDER / SPIDER.py
Chris Oswald
minor text edits
00ba0c6
raw
history blame
No virus
15.9 kB
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Address all TODOs and remove all explanatory comments
"""TODO: Add a description here."""
# Import packages
import csv
import json
import os
from typing import Dict, List, Mapping, Optional, Set, Sequence, Tuple, Union
import numpy as np
import datasets
import SimpleITK as sitk
# Define functions
def import_csv_data(filepath: str) -> List[Dict[str, str]]:
"""Import all rows of CSV file."""
results = []
with open(filepath, encoding='utf-8') as f:
reader = csv.DictReader(f)
for line in reader:
results.append(line)
return results
# Define constants
N_PATIENTS = 257
# TODO: Add BibTeX citation
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {A great new dataset},
author={huggingface, Inc.
},
year={2020}
}
"""
# TODO: Add description of the dataset here
# You can copy an official description
_DESCRIPTION = """\
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
"""
_HOMEPAGE = "https://zenodo.org/records/10159290"
_LICENSE = """Creative Commons Attribution 4.0 International License \
(https://creativecommons.org/licenses/by/4.0/legalcode)"""
# TODO: Add link to the official dataset URLs here
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS = {
"images":"https://zenodo.org/records/10159290/files/images.zip",
"masks":"https://zenodo.org/records/10159290/files/masks.zip",
"overview":"https://zenodo.org/records/10159290/files/overview.csv",
"gradings":"https://zenodo.org/records/10159290/files/radiological_gradings.csv",
}
class CustomBuilderConfig(datasets.BuilderConfig):
def __init__(
self,
name: str = 'default',
version: str = '0.0.0',
data_dir: Optional[str] = None,
data_files: Optional[Union[str, Sequence, Mapping]] = None,
description: Optional[str] = None,
scan_types: List[str] = ['t1', 't2', 't2_SPACE'],
):
super().__init__(name, version, data_dir, data_files, description)
self.scan_types = scan_types
class SPIDER(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
# This is an example of a dataset with multiple configurations.
# If you don't want/need to define several sub-sets in your dataset,
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
# If you need to make complex sub-parts in the datasets with configurable options
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
BUILDER_CONFIG_CLASS = CustomBuilderConfig
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
CustomBuilderConfig(
name="all_scan_types",
version=VERSION,
description="Use images of all scan types (t1, t2, t2 SPACE)",
scan_types=['t1', 't2', 't2_SPACE'],
),
CustomBuilderConfig(
name="t1_scan_types",
version=VERSION,
description="Use images of t1 scan types only",
scan_types=['t1'],
),
CustomBuilderConfig(
name="t2_scan_types",
version=VERSION,
description="Use images of t2 scan types only",
scan_types=['t2'],
),
CustomBuilderConfig(
name="t2_SPACE_scan_types",
version=VERSION,
description="Use images of t2 SPACE scan types only",
scan_types=['t2_SPACE'],
),
]
DEFAULT_CONFIG_NAME = "all_scan_types"
def _info(self):
"""
This method specifies the datasets.DatasetInfo object which contains
informations and typings for the dataset.
"""
features = datasets.Features(
{
"sentence": datasets.Value("string"),
"option1": datasets.Value("string"),
"answer": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""
This method is tasked with downloading/extracting the data
and defining the splits depending on the configuration
If several configurations are possible (listed in BUILDER_CONFIGS),
the configuration selected by the user is in self.config.name
"""
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
paths_dict = dl_manager.download_and_extract(_URLS)
scan_types = self.config.scan_types
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"paths_dict": paths_dict,
"split": "train",
"scan_types": scan_types,
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"paths_dict": paths_dict,
"split": "validate",
"scan_types": scan_types,
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"paths_dict": paths_dict,
"split": "test",
"scan_types": scan_types,
},
),
]
def _generate_examples(
self,
paths_dict: Dict[str, str],
split: str = 'train',
scan_types: List[str] = ['t1', 't2', 't2_SPACE'],
validate_share: float = 0.3,
test_share: float = 0.2,
raw_image: bool = True,
numeric_array: bool = True,
metadata: bool = True,
rad_gradings: bool = True,
random_seed: int = 9999,
) -> Tuple[str, Dict]:
"""
This method handles input defined in _split_generators to yield
(key, example) tuples from the dataset. The `key` is for legacy reasons
(tfds) and is not important in itself, but must be unique for each example.
Args
paths_dict: mapping of data element name to temporary file location
split: specify training, validation, or testing set;
options = 'train', 'validate', OR 'test'
scan_types: list of sagittal scan types to use in examples;
options = ['t1', 't2', 't2_SPACE']
validate_share: float indicating share of data to use for validation;
must be in range (0.0, 1.0); note that training share is
calculated as (1 - validate_share - test_share)
test_share: float indicating share of data to use for testing;
must be in range (0.0, 1.0); note that training share is
calculated as (1 - validate_share - test_share)
raw_image: indicates whether to include .mha image file in example
numeric_array: indicates whether to include numpy numeric array of
image in example
metadata: indicates whether to include patient and scanner metadata
with image example
rad_gradings: indicates whether to include patient's radiological
gradings with image example
Yields
Tuple (unique patient-scan ID, dict of
"""
# Set constants
train_share = (1.0 - validate_share - test_share)
np.random.seed(int(random_seed))
# Validate params
for item in scan_types:
if item not in ['t1', 't2', 't2_SPACE']:
raise ValueError(
'Scan type "{item}" not recognized as valid scan type.\
Verify scan type argument.'
)
if split not in ['train', 'validate', 'test']:
raise ValueError(
f'Split argument "{split}" is not recognized. \
Please enter one of ["train", "validate", "test"]'
)
if train_share <= 0.0:
raise ValueError(
f'Training share is calculated as (1 - validate_share - test_share) \
and must be greater than 0. Current calculated value is \
{round(train_share, 3)}. Adjust validate_share and/or \
test_share parameters.'
)
if validate_share > 1.0 or validate_share < 0.0:
raise ValueError(
f'Validation share must be between (0, 1). Current value is \
{validate_share}.'
)
if test_share > 1.0 or test_share < 0.0:
raise ValueError(
f'Testing share must be between (0, 1). Current value is \
{test_share}.'
)
# Generate train/validate/test partitions of patient IDs
partition = np.random.choice(
['train', 'dev', 'test'],
p=[train_share, validate_share, test_share],
size=N_PATIENTS,
)
patient_ids = (np.arange(N_PATIENTS) + 1)
train_ids = set(patient_ids[partition == 'train'])
validate_ids = set(patient_ids[partition == 'dev'])
test_ids = set(patient_ids[partition == 'test'])
assert len(train_ids.union(validate_ids, test_ids)) == N_PATIENTS
# Import patient/scanner data and radiological gradings data
overview_data = import_csv_data(paths_dict['overview'])
grades_data = import_csv_data(paths_dict['gradings'])
# Convert overview data list of dicts to dict of dicts
overview_dict = {}
for item in overview_data:
key = item['new_file_name']
overview_dict[key] = item
# Merge patient records for radiological gradings data
grades_dict = {}
for patient_id in patient_ids:
patient_grades = [
x for x in grades_data if x['Patient'] == str(patient_id)
]
if patient_grades:
grades_dict[str(patient_id)] = patient_grades
# Import image and mask data
image_files = [
file for file in os.listdir(os.path.join(paths_dict['images'], 'images'))
if file.endswith('.mha')
]
assert len(image_files) > 0, "No image files found--check directory path."
mask_files = [
file for file in os.listdir(os.path.join(paths_dict['masks'], 'masks'))
if file.endswith('.mha')
]
assert len(mask_files) > 0, "No mask files found--check directory path."
# Filter image and mask data based on scan types
image_files = [
file for file in image_files
if any(scan_type in file for scan_type in scan_types)
]
mask_files = [
file for file in mask_files
if any(scan_type in file for scan_type in scan_types)
]
# Subset train/validation/test partition images and mask files
if split == 'train':
subset_ids = train_ids
elif split == 'validate':
subset_ids = validate_ids
elif split == 'test':
subset_ids = test_ids
image_files = [
file for file in image_files
if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids)
]
mask_files = [
file for file in mask_files
if any(str(patient_id) in file.split('_')[0] for patient_id in subset_ids)
]
assert len(image_files) == len(mask_files), "The number of image files\
does not match the number of mask files--verify subsetting operation."
# Shuffle order of patient scans
# (note that only images need to be shuffled since masks and metadata
# will be linked to the selected image)
np.random.shuffle(image_files)
## Generate next example
# ----------------------
for example in image_files:
# Extract linking data
scan_id = example.replace('.mha', '')
patient_id = scan_id.split('_')[0]
scan_type = '_'.join(scan_id.split('_')[1:])
# Load .mha file
image_path = os.path.join(paths_dict['images'], 'images', example)
image = sitk.ReadImage(image_path)
# Convert .mha image to numeric array
image_array = sitk.GetArrayFromImage(image)
# Extract overview data corresponding to image
image_overview = overview_dict[scan_id]
# Extract patient radiological gradings corresponding to patient
patient_grades_dict = {}
for item in grades_dict[patient_id]:
key = f'IVD{item["IVD label"]}'
value = {
k:v for k,v in item.items()
if k not in ['Patient', 'IVD label']
}
patient_grades_dict[key] = value
# Prepare example return dict
return_dict = {'patient_id':patient_id, 'scan_type':scan_type}
if raw_image:
return_dict['raw_image'] = image
if numeric_array:
return_dict['numeric_array'] = image_array
if metadata:
return_dict['metadata'] = image_overview
if rad_gradings:
return_dict['rad_gradings'] = patient_grades_dict
# Yield example
yield (scan_id, return_dict)