File size: 8,051 Bytes
6be63ab e9b174a 6be63ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 16:13:56 2024
@author: tominhanh
"""
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test 6
import pandas as pd
from PIL import Image as PilImage # Import PIL Image with an alias
import datasets
from datasets import DatasetBuilder, GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Image, ClassLabel, Value, Sequence, load_dataset, SplitGenerator
import os
import io
from typing import Tuple, Dict, List
import numpy as np
import zipfile
import requests
import random
from io import BytesIO
import csv
_CITATION = """\
https://arxiv.org/abs/2102.09099
"""
_DESCRIPTION = """\
The comprehensive dataset contains over 220,000 single-rater and multi-rater labeled nuclei from breast cancer images
obtained from TCGA, making it one of the largest datasets for nucleus detection, classification, and segmentation in hematoxylin and eosin-stained
digital slides of breast cancer. This version of the dataset is a revised single-rater dataset, featuring over 125,000 nucleus csvs.
These nuclei were annotated through a collaborative effort involving pathologists, pathology residents, and medical students, using the Digital Slide Archive.
"""
_HOMEPAGE = "https://sites.google.com/view/nucls/home?authuser=0"
_LICENSE = "CC0 1.0 license"
_URL = "https://www.dropbox.com/scl/fi/srq574rdgvp7f5gwr60xw/NuCLS_dataset.zip?rlkey=qjc9q8shgvnqpfy4bktbqybd1&dl=1"
class NuCLSDataset(GeneratorBasedBuilder):
"""The NuCLS dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
"""Returns the dataset info."""
# Define the classes for the classifications
raw_classification = ClassLabel(names=[
'apoptotic_body', 'ductal_epithelium', 'eosinophil','fibroblast', 'lymphocyte',
'macrophage', 'mitotic_figure', 'myoepithelium', 'neutrophil',
'plasma_cell','tumor', 'unlabeled', 'vascular_endothelium'
])
main_classification = ClassLabel(names=[
'AMBIGUOUS', 'lymphocyte', 'macrophage', 'nonTILnonMQ_stromal',
'plasma_cell', 'tumor_mitotic', 'tumor_nonMitotic',
])
super_classification = ClassLabel(names=[
'AMBIGUOUS','nonTIL_stromal','sTIL', 'tumor_any',
])
type = ClassLabel(names=['rectangle', 'polyline'])
# Assuming a maximum length for polygon coordinates.
max_polygon_length = 20
# Define features
features = Features({
# Images will be loaded as arrays; you'll dynamically handle the varying sizes in the generator function
'rgb_image': Image(decode=False),
'mask_image': Image(decode=False),
'visualization_image': Image(decode=False),
# Annotation coordinates
'annotation_coordinates': Features({
'raw_classification': raw_classification,
'main_classification': main_classification,
'super_classification': super_classification,
'type': type,
'xmin': Value('int64'),
'ymin': Value('int64'),
'xmax': Value('int64'),
'ymax': Value('int64'),
'coords_x': Sequence(Value('float32')),
'coords_y': Sequence(Value('float32')),
})
})
return DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
# Download source data
data_dir = dl_manager.download_and_extract(_URL)
# Directory paths
rgb_dir = os.path.join(data_dir, "rgb")
visualization_dir = os.path.join(data_dir, "visualization")
mask_dir = os.path.join(data_dir, "mask")
csv_dir = os.path.join(data_dir, "csv")
# Generate a list of unique filenames (without extensions)
unique_filenames = [os.path.splitext(f)[0] for f in os.listdir(rgb_dir)]
# Split filenames into training and testing sets
random.shuffle(unique_filenames)
split_idx = int(0.8 * len(unique_filenames))
train_filenames = unique_filenames[:split_idx]
test_filenames = unique_filenames[split_idx:]
# Map filenames to file paths for each split
train_filepaths = self._map_filenames_to_paths(train_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
test_filepaths = self._map_filenames_to_paths(test_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
# Create the split generators
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": train_filepaths}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepaths": test_filepaths}
),
]
def _map_filenames_to_paths(self, filenames, rgb_dir, visualization_dir, mask_dir, csv_dir):
"""Maps filenames to file paths for each split."""
filepaths = {}
for filename in filenames:
filepaths[filename] = {
'fov': os.path.join(rgb_dir, filename + '.png'),
'visualization': os.path.join(visualization_dir, filename + '.png'),
'mask': os.path.join(mask_dir, filename + '.png'),
'csv': os.path.join(csv_dir, filename + '.csv'),
}
return filepaths
def _generate_examples(self, filepaths):
"""Yield examples as (key, example) tuples."""
for key, paths in filepaths.items():
# Initialize an example dictionary
example = {
'rgb_image': self._read_image_file(paths['fov']),
'mask_image': self._read_image_file(paths['mask']),
'visualization_image': self._read_image_file(paths['visualization']),
'annotation_coordinates': self._read_csv_file(paths['csv']),
}
yield key, example
def _read_image_file(self, file_path: str) -> PilImage:
"""Reads an image file and returns it as a PIL Image object."""
try:
with open(file_path, 'rb') as f:
return PilImage.open(f)
except Exception as e:
print(f"Error reading image file {file_path}: {e}")
return None
def _read_csv_file(self, file_path: str):
"""Reads a CSV file and returns the contents in the expected format."""
try:
csv_df = pd.read_csv(file_path)
if csv_df.empty:
print(f"Warning: CSV file {file_path} is empty.")
return None
else:
# Convert the DataFrame into the structure that matches your features' annotation_coordinates
return self._process_csv_data(csv_df)
except Exception as e:
print(f"Error reading CSV file {file_path}: {e}")
return None
# Implement this method to process and convert CSV data into the format expected by your dataset's features
def _process_csv_data(self, csv_df):
# Process the DataFrame and return the data in the correct format
pass
|