Commit
•
51b6c32
1
Parent(s):
f6c2460
Delete NuCLS_dataset.py
Browse files- NuCLS_dataset.py +0 -283
NuCLS_dataset.py
DELETED
@@ -1,283 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# -*- coding: utf-8 -*-
|
3 |
-
"""
|
4 |
-
Created on Tue Mar 12 16:13:56 2024
|
5 |
-
|
6 |
-
@author: tominhanh
|
7 |
-
"""
|
8 |
-
|
9 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
10 |
-
#
|
11 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
12 |
-
# you may not use this file except in compliance with the License.
|
13 |
-
# You may obtain a copy of the License at
|
14 |
-
#
|
15 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
16 |
-
#
|
17 |
-
# Unless required by applicable law or agreed to in writing, software
|
18 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
19 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
20 |
-
# See the License for the specific language governing permissions and
|
21 |
-
# limitations under the License.
|
22 |
-
|
23 |
-
import csv
|
24 |
-
import json
|
25 |
-
import os
|
26 |
-
import logging
|
27 |
-
import pandas as pd
|
28 |
-
import numpy as np
|
29 |
-
from PIL import Image
|
30 |
-
import datasets
|
31 |
-
from datasets import DatasetBuilder, GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Image, ClassLabel, Value, Sequence, load_dataset, SplitGenerator
|
32 |
-
import io
|
33 |
-
from typing import List, Tuple, Dict
|
34 |
-
import random
|
35 |
-
import zipfile
|
36 |
-
|
37 |
-
_CITATION = """\
|
38 |
-
https://arxiv.org/abs/2102.09099
|
39 |
-
"""
|
40 |
-
|
41 |
-
_DESCRIPTION = """\
|
42 |
-
The comprehensive dataset contains over 220,000 single-rater and multi-rater labeled nuclei from breast cancer images
|
43 |
-
obtained from TCGA, making it one of the largest datasets for nucleus detection, classification, and segmentation in hematoxylin and eosin-stained
|
44 |
-
digital slides of breast cancer. This version of the dataset is a revised single-rater dataset, featuring over 125,000 nucleus annotations.
|
45 |
-
These nuclei were annotated through a collaborative effort involving pathologists, pathology residents, and medical students, using the Digital Slide Archive.
|
46 |
-
"""
|
47 |
-
|
48 |
-
_HOMEPAGE = "https://sites.google.com/view/nucls/home?authuser=0"
|
49 |
-
|
50 |
-
_GITHUB_REPO = "https://github.com/PathologyDataScience/NuCLS"
|
51 |
-
|
52 |
-
_LICENSE = "CC0 1.0 license"
|
53 |
-
|
54 |
-
_URLS_GD = {
|
55 |
-
'visualization': 'https://drive.google.com/uc?export=download&id=1LxngBja8yga2PDE4nLh-sDt7zylbU6LX',
|
56 |
-
'fov': 'https://drive.google.com/uc?export=download&id=1WRlgzL2y3A4Vw2qrgZMeSrG2QLWsSbbt',
|
57 |
-
'annotation': 'https://drive.google.com/uc?export=download&id=1ac6lwgjDgukYbabSJXYGFUp1XC6hrTq0',
|
58 |
-
'mask': 'https://drive.google.com/uc?export=download&id=1PdLKC_d2W_-58AUYEECknjzf9UUnmXeL',
|
59 |
-
'train_test_splits': 'https://drive.google.com/uc?export=download&id=1Hidb8WsKn2-35-EzyvZdOzJtM7OujVE5',
|
60 |
-
}
|
61 |
-
|
62 |
-
_URLS_DB = {
|
63 |
-
'visualization': 'https://www.dropbox.com/scl/fi/xvh0oe2tf7uw2hr4qooju/visualization-20240214T034323Z-001.zip?rlkey=sabkedpkaxviecfbozjffqeqq&dl=1',
|
64 |
-
'fov': 'https://www.dropbox.com/scl/fi/7ob7mub0kgjcvicrvmlm2/rgb-20240213T171654Z-001.zip?rlkey=jupumuk8emtdls1m4jui2230l&dl=1',
|
65 |
-
'annotation': 'https://www.dropbox.com/scl/fi/co71y09x1sukm41dvjbrb/csv-20240213T170834Z-001.zip?rlkey=sycxq8udvaqbljz0lfoeu9mdi&dl=1',
|
66 |
-
'mask': 'https://www.dropbox.com/scl/fi/14n7c604ontck1tjp4mi9/mask-20240213T171332Z-001.zip?rlkey=12m9967vsxvudk1gcv6t5u7ia&dl=1',
|
67 |
-
}
|
68 |
-
|
69 |
-
_URLS = "https://www.dropbox.com/scl/fi/srq574rdgvp7f5gwr60xw/NuCLS_dataset.zip?rlkey=qjc9q8shgvnqpfy4bktbqybd1&dl=1"
|
70 |
-
|
71 |
-
class NuCLSDataset(GeneratorBasedBuilder):
|
72 |
-
"""NuCLS dataset."""
|
73 |
-
# TODO: Add description of the dataset here
|
74 |
-
# You can copy an official description
|
75 |
-
_DESCRIPTION = """\
|
76 |
-
The comprehensive dataset contains over 220,000 single-rater and multi-rater labeled nuclei from breast cancer images
|
77 |
-
obtained from TCGA, making it one of the largest datasets for nucleus detection, classification, and segmentation in hematoxylin and eosin-stained
|
78 |
-
digital slides of breast cancer. This version of the dataset is a revised single-rater dataset, featuring over 125,000 nucleus annotations.
|
79 |
-
These nuclei were annotated through a collaborative effort involving pathologists, pathology residents, and medical students, using the Digital Slide Archive.
|
80 |
-
"""
|
81 |
-
|
82 |
-
_CITATION = """\
|
83 |
-
https://arxiv.org/abs/2102.09099
|
84 |
-
"""
|
85 |
-
|
86 |
-
# TODO: Add a link to an official homepage for the dataset here
|
87 |
-
_HOMEPAGE = "https://sites.google.com/view/nucls/home?authuser=0"
|
88 |
-
|
89 |
-
_GITHUB_REPO = "https://github.com/PathologyDataScience/NuCLS"
|
90 |
-
|
91 |
-
# TODO: Add the licence for the dataset here if you can find it
|
92 |
-
_LICENSE = "CC0 1.0 license"
|
93 |
-
|
94 |
-
# TODO: Add link to the official dataset URLs here
|
95 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
96 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
97 |
-
_URLS = {
|
98 |
-
'visualization': 'https://drive.google.com/uc?export=download&id=1LxngBja8yga2PDE4nLh-sDt7zylbU6LX',
|
99 |
-
'fov': 'https://drive.google.com/uc?export=download&id=1WRlgzL2y3A4Vw2qrgZMeSrG2QLWsSbbt',
|
100 |
-
'annotation': 'https://drive.google.com/uc?export=download&id=1ac6lwgjDgukYbabSJXYGFUp1XC6hrTq0',
|
101 |
-
'mask': 'https://drive.google.com/uc?export=download&id=1PdLKC_d2W_-58AUYEECknjzf9UUnmXeL',
|
102 |
-
'train_test_splits': 'https://drive.google.com/uc?export=download&id=1Hidb8WsKn2-35-EzyvZdOzJtM7OujVE5',
|
103 |
-
}
|
104 |
-
|
105 |
-
VERSION = datasets.Version("1.1.0")
|
106 |
-
|
107 |
-
def _info(self):
|
108 |
-
"""Returns the dataset info."""
|
109 |
-
|
110 |
-
# Define the classes for the classifications
|
111 |
-
raw_classification = ClassLabel(names=[
|
112 |
-
'apoptotic_body', 'ductal_epithelium', 'eosinophil','fibroblast', 'lymphocyte',
|
113 |
-
'macrophage', 'mitotic_figure', 'myoepithelium', 'neutrophil',
|
114 |
-
'plasma_cell','tumor', 'unlabeled', 'vascular_endothelium'
|
115 |
-
])
|
116 |
-
main_classification = ClassLabel(names=[
|
117 |
-
'AMBIGUOUS', 'lymphocyte', 'macrophage', 'nonTILnonMQ_stromal',
|
118 |
-
'plasma_cell', 'tumor_mitotic', 'tumor_nonMitotic',
|
119 |
-
])
|
120 |
-
super_classification = ClassLabel(names=[
|
121 |
-
'AMBIGUOUS','nonTIL_stromal','sTIL', 'tumor_any',
|
122 |
-
])
|
123 |
-
type = ClassLabel(names=['rectangle', 'polyline'])
|
124 |
-
|
125 |
-
# Assuming a maximum length for polygon coordinates.
|
126 |
-
max_polygon_length = 20
|
127 |
-
|
128 |
-
# Define features
|
129 |
-
features = Features({
|
130 |
-
# Images will be loaded as arrays; you'll dynamically handle the varying sizes in the generator function
|
131 |
-
'fov_image': Image(decode=False),
|
132 |
-
'mask_image': Image(decode=False),
|
133 |
-
'visualization_image': Image(decode=False),
|
134 |
-
|
135 |
-
# Annotation coordinates
|
136 |
-
'annotation_coordinates': Features({
|
137 |
-
'raw_classification': raw_classification,
|
138 |
-
'main_classification': main_classification,
|
139 |
-
'super_classification': super_classification,
|
140 |
-
'type': type,
|
141 |
-
'xmin': Value('int64'),
|
142 |
-
'ymin': Value('int64'),
|
143 |
-
'xmax': Value('int64'),
|
144 |
-
'ymax': Value('int64'),
|
145 |
-
'coords_x': Sequence(Value('float32')),
|
146 |
-
'coords_y': Sequence(Value('float32')),
|
147 |
-
})
|
148 |
-
})
|
149 |
-
|
150 |
-
return DatasetInfo(
|
151 |
-
description=NuCLSDataset._DESCRIPTION,
|
152 |
-
features=features,
|
153 |
-
supervised_keys=None,
|
154 |
-
homepage=NuCLSDataset._HOMEPAGE,
|
155 |
-
license=NuCLSDataset._LICENSE,
|
156 |
-
citation=NuCLSDataset._CITATION,
|
157 |
-
)
|
158 |
-
|
159 |
-
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
160 |
-
"""Returns the split generators."""
|
161 |
-
|
162 |
-
# Download and extract files
|
163 |
-
downloaded_files = dl_manager.download_and_extract(self._URLS)
|
164 |
-
|
165 |
-
# Check if files are ZIP and extract them
|
166 |
-
for key, file_path in downloaded_files.items():
|
167 |
-
if zipfile.is_zipfile(file_path): # Check if it's a ZIP file
|
168 |
-
with zipfile.ZipFile(file_path, 'r') as zip_ref:
|
169 |
-
# Define a directory to extract to
|
170 |
-
extract_dir = os.path.join(os.path.dirname(file_path), key)
|
171 |
-
os.makedirs(extract_dir, exist_ok=True)
|
172 |
-
zip_ref.extractall(extract_dir)
|
173 |
-
# Update downloaded_files to point to the extracted directory
|
174 |
-
downloaded_files[key] = extract_dir
|
175 |
-
else:
|
176 |
-
# If not a ZIP file, assume it's a directory
|
177 |
-
downloaded_files[key] = file_path
|
178 |
-
|
179 |
-
# Extract unique base filenames (without extensions)
|
180 |
-
unique_filenames = self._extract_unique_base_filenames(downloaded_files)
|
181 |
-
|
182 |
-
# Shuffle and split filenames
|
183 |
-
random.shuffle(unique_filenames)
|
184 |
-
split_index = int(0.8 * len(unique_filenames))
|
185 |
-
train_filenames = unique_filenames[:split_index]
|
186 |
-
test_filenames = unique_filenames[split_index:]
|
187 |
-
|
188 |
-
# Map filenames to file paths for each split
|
189 |
-
train_filepaths = self._map_filenames_to_paths(train_filenames, downloaded_files)
|
190 |
-
test_filepaths = self._map_filenames_to_paths(test_filenames, downloaded_files)
|
191 |
-
|
192 |
-
return [
|
193 |
-
datasets.SplitGenerator(
|
194 |
-
name=datasets.Split.TRAIN,
|
195 |
-
gen_kwargs={"filepaths": train_filepaths},
|
196 |
-
),
|
197 |
-
datasets.SplitGenerator(
|
198 |
-
name=datasets.Split.TEST,
|
199 |
-
gen_kwargs={"filepaths": test_filepaths},
|
200 |
-
),
|
201 |
-
]
|
202 |
-
|
203 |
-
def _generate_examples(self, filepaths):
|
204 |
-
"""Yield examples as (key, example) tuples."""
|
205 |
-
|
206 |
-
for key, paths in filepaths.items():
|
207 |
-
# Initialize an example dictionary
|
208 |
-
example = {}
|
209 |
-
|
210 |
-
# Read the Field of View image file
|
211 |
-
example['fov_image'] = self._read_image_file(paths['fov'])
|
212 |
-
|
213 |
-
# Read the Mask image file
|
214 |
-
example['mask_image'] = self._read_image_file(paths['mask'])
|
215 |
-
|
216 |
-
# Read the Visualization image file
|
217 |
-
example['visualization_image'] = self._read_image_file(paths['visualization'])
|
218 |
-
|
219 |
-
# Read the Annotation CSV file and populate the corresponding fields
|
220 |
-
example['annotation_coordinates'] = self._read_annotation_file(paths['annotation'])
|
221 |
-
|
222 |
-
yield key, example
|
223 |
-
|
224 |
-
def _map_filenames_to_paths(self, filenames, downloaded_files):
|
225 |
-
"""Map base filenames to their respective file paths for different types."""
|
226 |
-
filepaths = {}
|
227 |
-
for filename in filenames:
|
228 |
-
# Construct paths for each file type
|
229 |
-
filepaths[filename] = {
|
230 |
-
'fov': os.path.join(downloaded_files['fov'], filename + '.png'),
|
231 |
-
'mask': os.path.join(downloaded_files['mask'], filename + '.png'),
|
232 |
-
'visualization': os.path.join(downloaded_files['visualization'], filename + '.png'),
|
233 |
-
'annotation': os.path.join(downloaded_files['annotation'], filename + '.csv')
|
234 |
-
}
|
235 |
-
return filepaths
|
236 |
-
|
237 |
-
def _list_files_in_directory(self, directory):
|
238 |
-
"""List all files in a given directory."""
|
239 |
-
return [os.path.join(directory, file) for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))]
|
240 |
-
|
241 |
-
def _extract_unique_base_filenames(self, downloaded_files):
|
242 |
-
"""Extract unique base filenames from downloaded files."""
|
243 |
-
|
244 |
-
base_filenames = set()
|
245 |
-
for filename in downloaded_files['fov']:
|
246 |
-
filename = os.path.splitext(filename)[0]
|
247 |
-
base_filenames.add(filename)
|
248 |
-
return list(base_filenames)
|
249 |
-
|
250 |
-
def _read_image_file(self, file_path: str) -> Image:
|
251 |
-
""" Reads an image file and returns it as an Image object."""
|
252 |
-
# Read the image file
|
253 |
-
try:
|
254 |
-
image_bytes = self._read_image_as_bytes(file_path)
|
255 |
-
# Convert the image bytes to a PIL Image object
|
256 |
-
image = Image.open(io.BytesIO(image_bytes))
|
257 |
-
return image
|
258 |
-
|
259 |
-
except Exception as e:
|
260 |
-
print(f"Error reading image file {file_path}: {e}")
|
261 |
-
return None
|
262 |
-
|
263 |
-
def _read_image_as_bytes(self, file_path: str) -> bytes:
|
264 |
-
""" Reads an image file and returns it as a bytes object."""
|
265 |
-
with open(file_path, 'rb') as f:
|
266 |
-
return f.read()
|
267 |
-
|
268 |
-
def _read_annotation_file(self, file_path: str):
|
269 |
-
""" Reads an annotation file and returns it as an AnnotationCoordinates object."""
|
270 |
-
# Read the annotation file
|
271 |
-
try:
|
272 |
-
annotation_df = pd.read_csv(file_path)
|
273 |
-
if annotation_df.empty:
|
274 |
-
print(f"Warning: Annotation file {file_path} is empty.")
|
275 |
-
return None
|
276 |
-
else:
|
277 |
-
print("Successfully read the file. Here are the first few rows:")
|
278 |
-
print(annotation_df.head())
|
279 |
-
return annotation_df
|
280 |
-
except Exception as e:
|
281 |
-
print(f"Error reading annotation file {file_path}: {e}")
|
282 |
-
return None
|
283 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|