minhanhto09 commited on
Commit
6be63ab
1 Parent(s): 51b6c32

Upload NuCLS_dataset.py

Browse files
Files changed (1) hide show
  1. NuCLS_dataset.py +201 -0
NuCLS_dataset.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ Created on Tue Mar 12 16:13:56 2024
5
+
6
+ @author: tominhanh
7
+ """
8
+
9
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
10
+ #
11
+ # Licensed under the Apache License, Version 2.0 (the "License");
12
+ # you may not use this file except in compliance with the License.
13
+ # You may obtain a copy of the License at
14
+ #
15
+ # http://www.apache.org/licenses/LICENSE-2.0
16
+ #
17
+ # Unless required by applicable law or agreed to in writing, software
18
+ # distributed under the License is distributed on an "AS IS" BASIS,
19
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
20
+ # See the License for the specific language governing permissions and
21
+ # limitations under the License.
22
+
23
+ # Test 6
24
+
25
+ import pandas as pd
26
+ from PIL import Image
27
+ import datasets
28
+ from datasets import DatasetBuilder, GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Image, ClassLabel, Value, Sequence, load_dataset, SplitGenerator
29
+ import os
30
+ import io
31
+ from typing import Tuple, Dict, List
32
+ import numpy as np
33
+ import zipfile
34
+ import requests
35
+ import random
36
+ from io import BytesIO
37
+ import csv
38
+
39
+ _CITATION = """\
40
+ https://arxiv.org/abs/2102.09099
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ The comprehensive dataset contains over 220,000 single-rater and multi-rater labeled nuclei from breast cancer images
45
+ obtained from TCGA, making it one of the largest datasets for nucleus detection, classification, and segmentation in hematoxylin and eosin-stained
46
+ digital slides of breast cancer. This version of the dataset is a revised single-rater dataset, featuring over 125,000 nucleus csvs.
47
+ These nuclei were annotated through a collaborative effort involving pathologists, pathology residents, and medical students, using the Digital Slide Archive.
48
+ """
49
+
50
+ _HOMEPAGE = "https://sites.google.com/view/nucls/home?authuser=0"
51
+
52
+ _LICENSE = "CC0 1.0 license"
53
+
54
+ _URL = "https://www.dropbox.com/scl/fi/srq574rdgvp7f5gwr60xw/NuCLS_dataset.zip?rlkey=qjc9q8shgvnqpfy4bktbqybd1&dl=1"
55
+
56
+ class NuCLSDataset(GeneratorBasedBuilder):
57
+ """The NuCLS dataset."""
58
+
59
+ VERSION = datasets.Version("1.1.0")
60
+
61
+ def _info(self):
62
+ """Returns the dataset info."""
63
+
64
+ # Define the classes for the classifications
65
+ raw_classification = ClassLabel(names=[
66
+ 'apoptotic_body', 'ductal_epithelium', 'eosinophil','fibroblast', 'lymphocyte',
67
+ 'macrophage', 'mitotic_figure', 'myoepithelium', 'neutrophil',
68
+ 'plasma_cell','tumor', 'unlabeled', 'vascular_endothelium'
69
+ ])
70
+ main_classification = ClassLabel(names=[
71
+ 'AMBIGUOUS', 'lymphocyte', 'macrophage', 'nonTILnonMQ_stromal',
72
+ 'plasma_cell', 'tumor_mitotic', 'tumor_nonMitotic',
73
+ ])
74
+ super_classification = ClassLabel(names=[
75
+ 'AMBIGUOUS','nonTIL_stromal','sTIL', 'tumor_any',
76
+ ])
77
+ type = ClassLabel(names=['rectangle', 'polyline'])
78
+
79
+ # Assuming a maximum length for polygon coordinates.
80
+ max_polygon_length = 20
81
+
82
+ # Define features
83
+ features = Features({
84
+ # Images will be loaded as arrays; you'll dynamically handle the varying sizes in the generator function
85
+ 'rgb_image': Image(decode=False),
86
+ 'mask_image': Image(decode=False),
87
+ 'visualization_image': Image(decode=False),
88
+
89
+ # Annotation coordinates
90
+ 'annotation_coordinates': Features({
91
+ 'raw_classification': raw_classification,
92
+ 'main_classification': main_classification,
93
+ 'super_classification': super_classification,
94
+ 'type': type,
95
+ 'xmin': Value('int64'),
96
+ 'ymin': Value('int64'),
97
+ 'xmax': Value('int64'),
98
+ 'ymax': Value('int64'),
99
+ 'coords_x': Sequence(Value('float32')),
100
+ 'coords_y': Sequence(Value('float32')),
101
+ })
102
+ })
103
+
104
+ return DatasetInfo(
105
+ description=_DESCRIPTION,
106
+ features=features,
107
+ supervised_keys=None,
108
+ homepage=_HOMEPAGE,
109
+ license=_LICENSE,
110
+ citation=_CITATION,
111
+ )
112
+
113
+ def _split_generators(self, dl_manager: DownloadManager):
114
+ # Download source data
115
+ data_dir = dl_manager.download_and_extract(_URL)
116
+
117
+ # Directory paths
118
+ rgb_dir = os.path.join(data_dir, "rgb")
119
+ visualization_dir = os.path.join(data_dir, "visualization")
120
+ mask_dir = os.path.join(data_dir, "mask")
121
+ csv_dir = os.path.join(data_dir, "csv")
122
+
123
+ # Generate a list of unique filenames (without extensions)
124
+ unique_filenames = [os.path.splitext(f)[0] for f in os.listdir(rgb_dir)]
125
+
126
+ # Split filenames into training and testing sets
127
+ random.shuffle(unique_filenames)
128
+ split_idx = int(0.8 * len(unique_filenames))
129
+ train_filenames = unique_filenames[:split_idx]
130
+ test_filenames = unique_filenames[split_idx:]
131
+
132
+ # Map filenames to file paths for each split
133
+ train_filepaths = self._map_filenames_to_paths(train_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
134
+ test_filepaths = self._map_filenames_to_paths(test_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
135
+
136
+ # Create the split generators
137
+ return [
138
+ datasets.SplitGenerator(
139
+ name=datasets.Split.TRAIN,
140
+ gen_kwargs={"filepaths": train_filepaths}
141
+ ),
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TEST,
144
+ gen_kwargs={"filepaths": test_filepaths}
145
+ ),
146
+ ]
147
+
148
+ def _map_filenames_to_paths(self, filenames, rgb_dir, visualization_dir, mask_dir, csv_dir):
149
+ """Maps filenames to file paths for each split."""
150
+ filepaths = {}
151
+ for filename in filenames:
152
+ filepaths[filename] = {
153
+ 'fov': os.path.join(rgb_dir, filename + '.png'),
154
+ 'visualization': os.path.join(visualization_dir, filename + '.png'),
155
+ 'mask': os.path.join(mask_dir, filename + '.png'),
156
+ 'csv': os.path.join(csv_dir, filename + '.csv'),
157
+ }
158
+ return filepaths
159
+
160
+
161
+ def _generate_examples(self, filepaths):
162
+ """Yield examples as (key, example) tuples."""
163
+
164
+ for key, paths in filepaths.items():
165
+ # Initialize an example dictionary
166
+ example = {
167
+ 'rgb_image': self._read_image_file(paths['fov']),
168
+ 'mask_image': self._read_image_file(paths['mask']),
169
+ 'visualization_image': self._read_image_file(paths['visualization']),
170
+ 'annotation_coordinates': self._read_csv_file(paths['csv']),
171
+ }
172
+
173
+ yield key, example
174
+
175
+ def _read_image_file(self, file_path: str) -> PilImage:
176
+ """Reads an image file and returns it as a PIL Image object."""
177
+ try:
178
+ with open(file_path, 'rb') as f:
179
+ return PilImage.open(f)
180
+ except Exception as e:
181
+ print(f"Error reading image file {file_path}: {e}")
182
+ return None
183
+
184
+ def _read_csv_file(self, file_path: str):
185
+ """Reads a CSV file and returns the contents in the expected format."""
186
+ try:
187
+ csv_df = pd.read_csv(file_path)
188
+ if csv_df.empty:
189
+ print(f"Warning: CSV file {file_path} is empty.")
190
+ return None
191
+ else:
192
+ # Convert the DataFrame into the structure that matches your features' annotation_coordinates
193
+ return self._process_csv_data(csv_df)
194
+ except Exception as e:
195
+ print(f"Error reading CSV file {file_path}: {e}")
196
+ return None
197
+
198
+ # Implement this method to process and convert CSV data into the format expected by your dataset's features
199
+ def _process_csv_data(self, csv_df):
200
+ # Process the DataFrame and return the data in the correct format
201
+ pass