Datasets:
Tasks:
Image Classification
Sub-tasks:
multi-label-image-classification
Languages:
English
Size:
100B<n<1T
License:
import pydicom | |
from PIL import Image | |
import numpy as np | |
import io | |
import datasets | |
import gdown | |
import re | |
import s3fs | |
import random | |
example_manifest_url = "https://drive.google.com/uc?id=1JBkQTXeieyN9_6BGdTF_DDlFFyZrGyU6" | |
example_manifest_file = gdown.download(example_manifest_url, 'manifest_file.s5cmd', quiet = False) | |
full_manifest_url = "https://drive.google.com/uc?id=1KP6qxcQoPF4MJdEPNwW7J6BlL_sUJ17j" | |
full_manifest_file = gdown.download(full_manifest_url, 'full_manifest_file.s5cmd', quiet = False) | |
fs = s3fs.S3FileSystem(anon=True) | |
_DESCRIPTION = "This is the description" | |
_HOMEPAGE = "https://imaging.datacommons.cancer.gov/" | |
_LICENSE = "https://fairsharing.org/FAIRsharing.0b5a1d" | |
_CITATION = "National Cancer Institute Imaging Data Commons (IDC) Collections was accessed on DATE from https://registry.opendata.aws/nci-imaging-data-commons" | |
class ColonCancerCTDataset(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="example", version=VERSION, description="This is a subset of the full dataset for demonstration purposes"), | |
datasets.BuilderConfig(name="full_data", version=VERSION, description="This is the complete dataset"), | |
] | |
DEFAULT_CONFIG_NAME = "example" | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"image": datasets.Image(), | |
"ImageType": datasets.Sequence(datasets.Value('string')), | |
"StudyDate": datasets.Value('string'), | |
"SeriesDate": datasets.Value('string'), | |
"Manufacturer": datasets.Value('string'), | |
"StudyDescription": datasets.Value('string'), | |
"SeriesDescription": datasets.Value('string'), | |
"PatientSex": datasets.Value('string'), | |
"PatientAge": datasets.Value('string'), | |
"PregnancyStatus": datasets.Value('string'), | |
"BodyPartExamined": datasets.Value('string'), | |
}), | |
homepage = _HOMEPAGE, | |
license = _LICENSE, | |
citation = _CITATION | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the | |
s3_series_paths = [] | |
s3_individual_paths = [] | |
if self.config.name == 'example': | |
manifest_file = example_manifest_file | |
else: | |
manifest_file = full_manifest_file | |
with open(manifest_file, 'r') as file: | |
for line in file: | |
match = re.search(r'cp (s3://[\S]+) .', line) | |
if match: | |
s3_series_paths.append(match.group(1)[:-2]) # Deleting the '/*' in directories | |
for series in s3_series_paths: | |
for content in fs.ls(series): | |
s3_individual_paths.append(fs.info(content)['Key']) | |
random.shuffle(s3_individual_paths) | |
# Define the split sizes | |
train_size = int(0.7 * len(s3_individual_paths)) | |
val_size = int(0.15 * len(s3_individual_paths)) | |
# Split the paths into train, validation, and test sets | |
train_paths = s3_individual_paths[:train_size] | |
val_paths = s3_individual_paths[train_size:train_size + val_size] | |
test_paths = s3_individual_paths[train_size + val_size:] | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"paths": train_paths, | |
"split": "train" | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"paths": val_paths, | |
"split": "dev" | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"paths": test_paths, | |
"split": "test" | |
}, | |
), | |
] | |
def _generate_examples(self, paths, split): | |
"""Yields examples.""" | |
# TODO: This method will yield examples, i.e. rows in the dataset. | |
for path in paths: | |
key = path | |
with fs.open(path, 'rb') as f: | |
dicom_data = pydicom.dcmread(f) | |
pixel_array = dicom_data.pixel_array | |
# Adjust for MONOCHROME1 to invert the grayscale values | |
if dicom_data.PhotometricInterpretation == "MONOCHROME1": | |
pixel_array = np.max(pixel_array) - pixel_array | |
# Normalize or scale 16-bit or other depth images to 8-bit | |
if pixel_array.dtype != np.uint8: | |
pixel_array = (np.divide(pixel_array, np.max(pixel_array)) * 255).astype(np.uint8) | |
# Convert to RGB if it is not already (e.g., for color images) | |
if len(pixel_array.shape) == 2: | |
im = Image.fromarray(pixel_array, mode="L") # L mode is for grayscale | |
elif len(pixel_array.shape) == 3 and pixel_array.shape[2] in [3, 4]: | |
im = Image.fromarray(pixel_array, mode="RGB") | |
else: | |
raise ValueError("Unsupported DICOM image format") | |
with io.BytesIO() as output: | |
im.save(output, format="PNG") | |
png_image = output.getvalue() | |
# Extracting metadata | |
ImageType = dicom_data.get("ImageType", "") | |
StudyDate = dicom_data.get("StudyDate", "") | |
SeriesDate = dicom_data.get("SeriesDate", "") | |
Manufacturer = dicom_data.get("Manufacturer", "") | |
StudyDescription = dicom_data.get("StudyDescription", "") | |
SeriesDescription = dicom_data.get("SeriesDescription", "") | |
PatientSex = dicom_data.get("PatientSex", "") | |
PatientAge = dicom_data.get("PatientAge", "") | |
PregnancyStatus = dicom_data.get("PregnancyStatus", "") | |
if PregnancyStatus == None: | |
PregnancyStatus = "None" | |
else: | |
PregnancyStatus = "Yes" | |
BodyPartExamined = dicom_data.get("BodyPartExamined", "") | |
yield key, {"image": png_image, | |
"ImageType": ImageType, | |
"StudyDate": StudyDate, | |
"SeriesDate": SeriesDate, | |
"Manufacturer": Manufacturer, | |
"StudyDescription": StudyDescription, | |
"SeriesDescription": SeriesDescription, | |
"PatientSex": PatientSex, | |
"PatientAge": PatientAge, | |
"PregnancyStatus": PregnancyStatus, | |
"BodyPartExamined": BodyPartExamined} |