NuCLS_dataset / NuCLS_dataset.py
minhanhto09's picture
Update helper functions
bdf7c58 verified
raw
history blame
No virus
8.16 kB
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 16:13:56 2024
@author: tominhanh
"""
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test 6
import pandas as pd
from PIL import Image as PilImage # Import PIL Image with an alias
import datasets
from datasets import DatasetBuilder, GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Image, ClassLabel, Value, Sequence, load_dataset, SplitGenerator
import os
import io
from typing import Tuple, Dict, List
import numpy as np
import zipfile
import requests
import random
from io import BytesIO
import csv
_CITATION = """\
https://arxiv.org/abs/2102.09099
"""
_DESCRIPTION = """\
The comprehensive dataset contains over 220,000 single-rater and multi-rater labeled nuclei from breast cancer images
obtained from TCGA, making it one of the largest datasets for nucleus detection, classification, and segmentation in hematoxylin and eosin-stained
digital slides of breast cancer. This version of the dataset is a revised single-rater dataset, featuring over 125,000 nucleus csvs.
These nuclei were annotated through a collaborative effort involving pathologists, pathology residents, and medical students, using the Digital Slide Archive.
"""
_HOMEPAGE = "https://sites.google.com/view/nucls/home?authuser=0"
_LICENSE = "CC0 1.0 license"
_URL = "https://www.dropbox.com/scl/fi/srq574rdgvp7f5gwr60xw/NuCLS_dataset.zip?rlkey=qjc9q8shgvnqpfy4bktbqybd1&dl=1"
class NuCLSDataset(GeneratorBasedBuilder):
"""The NuCLS dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
"""Returns the dataset info."""
# Define the classes for the classifications
raw_classification = ClassLabel(names=[
'apoptotic_body', 'ductal_epithelium', 'eosinophil','fibroblast', 'lymphocyte',
'macrophage', 'mitotic_figure', 'myoepithelium', 'neutrophil',
'plasma_cell','tumor', 'unlabeled', 'vascular_endothelium'
])
main_classification = ClassLabel(names=[
'AMBIGUOUS', 'lymphocyte', 'macrophage', 'nonTILnonMQ_stromal',
'plasma_cell', 'tumor_mitotic', 'tumor_nonMitotic',
])
super_classification = ClassLabel(names=[
'AMBIGUOUS','nonTIL_stromal','sTIL', 'tumor_any',
])
type = ClassLabel(names=['rectangle', 'polyline'])
# Assuming a maximum length for polygon coordinates.
max_polygon_length = 20
# Define features
features = Features({
# Images will be loaded as arrays; you'll dynamically handle the varying sizes in the generator function
'rgb_image': Image(decode=False),
'mask_image': Image(decode=False),
'visualization_image': Image(decode=False),
# Annotation coordinates
'annotation_coordinates': Features({
'raw_classification': raw_classification,
'main_classification': main_classification,
'super_classification': super_classification,
'type': type,
'xmin': Value('int64'),
'ymin': Value('int64'),
'xmax': Value('int64'),
'ymax': Value('int64'),
'coords_x': Sequence(Value('float32')),
'coords_y': Sequence(Value('float32')),
})
})
return DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
# Download source data
data_dir = dl_manager.download_and_extract(_URL)
# Directory paths
base_dir = os.path.join(data_dir, "NuCLS_dataset")
rgb_dir = os.path.join(base_dir, "rgb")
visualization_dir = os.path.join(base_dir, "visualization")
mask_dir = os.path.join(base_dir, "mask")
csv_dir = os.path.join(base_dir, "csv")
# Generate a list of unique filenames (without extensions)
unique_filenames = [os.path.splitext(f)[0] for f in os.listdir(rgb_dir)]
# Split filenames into training and testing sets
random.shuffle(unique_filenames)
split_idx = int(0.8 * len(unique_filenames))
train_filenames = unique_filenames[:split_idx]
test_filenames = unique_filenames[split_idx:]
# Map filenames to file paths for each split
train_filepaths = self._map_filenames_to_paths(train_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
test_filepaths = self._map_filenames_to_paths(test_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
# Create the split generators
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": train_filepaths}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepaths": test_filepaths}
),
]
def _map_filenames_to_paths(self, filenames, rgb_dir, visualization_dir, mask_dir, csv_dir):
"""Maps filenames to file paths for each split."""
filepaths = {}
for filename in filenames:
filepaths[filename] = {
'fov': os.path.join(rgb_dir, filename + '.png'),
'visualization': os.path.join(visualization_dir, filename + '.png'),
'mask': os.path.join(mask_dir, filename + '.png'),
'csv': os.path.join(csv_dir, filename + '.csv'),
}
return filepaths
def _generate_examples(self, filepaths):
"""Yield examples as (key, example) tuples."""
for key, paths in filepaths.items():
# Initialize an example dictionary
example = {
'rgb_image': self._read_image_file(paths['fov']),
'mask_image': self._read_image_file(paths['mask']),
'visualization_image': self._read_image_file(paths['visualization']),
'annotation_coordinates': self._read_csv_file(paths['csv']),
}
yield key, example
def _read_image_file(self, file_path: str) -> PilImage:
"""Reads an image file and returns it as a PIL Image object."""
try:
with open(file_path, 'rb') as f:
image = PilImage.open(f)
return np.array(image)
except Exception as e:
print(f"Error reading image file {file_path}: {e}")
return None
def _read_csv_file(self, file_path: str):
"""Reads a CSV file and returns the contents in the expected format."""
try:
csv_df = pd.read_csv(file_path)
if csv_df.empty:
print(f"Warning: CSV file {file_path} is empty.")
return None
else:
# Convert the DataFrame into the structure that matches your features' annotation_coordinates
return self._process_csv_data(csv_df)
except Exception as e:
print(f"Error reading CSV file {file_path}: {e}")
return None
# Implement this method to process and convert CSV data into the format expected by your dataset's features
def _process_csv_data(self, csv_df):
# Process the DataFrame and return the data in the correct format
pass