File size: 9,050 Bytes
6be63ab e9b174a 6be63ab dd1630f 6be63ab bdf7c58 6be63ab dd1630f 6be63ab dd1630f 6be63ab dd1630f 6be63ab dd1630f 6be63ab dd1630f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 16:13:56 2024
@author: tominhanh
"""
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test 6
import pandas as pd
from PIL import Image as PilImage # Import PIL Image with an alias
import datasets
from datasets import DatasetBuilder, GeneratorBasedBuilder, DownloadManager, DatasetInfo, Features, Image, ClassLabel, Value, Sequence, load_dataset, SplitGenerator
import os
import io
from typing import Tuple, Dict, List
import numpy as np
import zipfile
import requests
import random
from io import BytesIO
import csv
_CITATION = """\
https://arxiv.org/abs/2102.09099
"""
_DESCRIPTION = """\
The comprehensive dataset contains over 220,000 single-rater and multi-rater labeled nuclei from breast cancer images
obtained from TCGA, making it one of the largest datasets for nucleus detection, classification, and segmentation in hematoxylin and eosin-stained
digital slides of breast cancer. This version of the dataset is a revised single-rater dataset, featuring over 125,000 nucleus csvs.
These nuclei were annotated through a collaborative effort involving pathologists, pathology residents, and medical students, using the Digital Slide Archive.
"""
_HOMEPAGE = "https://sites.google.com/view/nucls/home?authuser=0"
_LICENSE = "CC0 1.0 license"
_URL = "https://www.dropbox.com/scl/fi/srq574rdgvp7f5gwr60xw/NuCLS_dataset.zip?rlkey=qjc9q8shgvnqpfy4bktbqybd1&dl=1"
class NuCLSDataset(GeneratorBasedBuilder):
"""The NuCLS dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
"""Returns the dataset info."""
# Define the classes for the classifications
raw_classification = ClassLabel(names=[
'apoptotic_body', 'ductal_epithelium', 'eosinophil','fibroblast', 'lymphocyte',
'macrophage', 'mitotic_figure', 'myoepithelium', 'neutrophil',
'plasma_cell','tumor', 'unlabeled', 'vascular_endothelium'
])
main_classification = ClassLabel(names=[
'AMBIGUOUS', 'lymphocyte', 'macrophage', 'nonTILnonMQ_stromal',
'plasma_cell', 'tumor_mitotic', 'tumor_nonMitotic',
])
super_classification = ClassLabel(names=[
'AMBIGUOUS','nonTIL_stromal','sTIL', 'tumor_any',
])
type = ClassLabel(names=['rectangle', 'polyline'])
# Define features
features = Features({
'rgb_image': Image(decode=True),
'mask_image': Image(decode=True),
'visualization_image': Image(decode=True),
'annotation_coordinates': Features({
'raw_classification': Sequence(Value("string")),
'main_classification': Sequence(Value("string")),
'super_classification': Sequence(Value("string")),
'type': Sequence(Value("string")),
'xmin': Sequence(Value('int64')),
'ymin': Sequence(Value('int64')),
'xmax': Sequence(Value('int64')),
'ymax': Sequence(Value('int64')),
'coords_x': Sequence(Sequence(Value('int64'))), # Lists of integers
'coords_y': Sequence(Sequence(Value('int64'))), # Lists of integers
})
})
return DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: DownloadManager):
# Download source data
data_dir = dl_manager.download_and_extract(_URL)
# Directory paths
base_dir = os.path.join(data_dir, "NuCLS_dataset")
rgb_dir = os.path.join(base_dir, "rgb")
visualization_dir = os.path.join(base_dir, "visualization")
mask_dir = os.path.join(base_dir, "mask")
csv_dir = os.path.join(base_dir, "csv")
# Generate a list of unique filenames (without extensions)
unique_filenames = [os.path.splitext(f)[0] for f in os.listdir(rgb_dir)]
# Split filenames into training and testing sets
random.shuffle(unique_filenames)
split_idx = int(0.8 * len(unique_filenames))
train_filenames = unique_filenames[:split_idx]
test_filenames = unique_filenames[split_idx:]
# Map filenames to file paths for each split
train_filepaths = self._map_filenames_to_paths(train_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
test_filepaths = self._map_filenames_to_paths(test_filenames, rgb_dir, visualization_dir, mask_dir, csv_dir)
# Create the split generators
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepaths": train_filepaths}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepaths": test_filepaths}
),
]
def _map_filenames_to_paths(self, filenames, rgb_dir, visualization_dir, mask_dir, csv_dir):
"""Maps filenames to file paths for each split."""
filepaths = {}
for filename in filenames:
filepaths[filename] = {
'fov': os.path.join(rgb_dir, filename + '.png'),
'visualization': os.path.join(visualization_dir, filename + '.png'),
'mask': os.path.join(mask_dir, filename + '.png'),
'csv': os.path.join(csv_dir, filename + '.csv'),
}
return filepaths
def _generate_examples(self, filepaths):
"""Yield examples as (key, example) tuples."""
for key, paths in filepaths.items():
# Read the images using a method to handle the image files
rgb_image = self._read_image_file(paths['rgb'])
mask_image = self._read_image_file(paths['mask'])
visualization_image = self._read_image_file(paths['visualization'])
# Read the CSV and format the data as per the defined features
annotation_coordinates = self._read_csv_file(paths['csv'])
# Yield the example
yield key, {
'rgb_image': rgb_image,
'mask_image': mask_image,
'visualization_image': visualization_image,
'annotation_coordinates': annotation_coordinates,
}
def _read_image_file(self, file_path: str, ) -> bytes:
"""Reads an image file and returns it as a bytes_like object."""
try:
with open(file_path, 'rb') as f:
return f.read()
except Exception as e:
print(f"Error reading image file {file_path}: {e}")
return None
def _read_csv_file(self, filepath):
"""Reads the annotation CSV file and formats the data."""
with open(filepath, 'r', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
annotations = {
'raw_classification': [],
'main_classification': [],
'super_classification': [],
'type': [],
'xmin': [],
'ymin': [],
'xmax': [],
'ymax': [],
'coords_x': [],
'coords_y': []
}
for row in reader:
annotations['raw_classification'].append(row.get('raw_classification', ''))
annotations['main_classification'].append(row.get('main_classification', ''))
annotations['super_classification'].append(row.get('super_classification', ''))
annotations['type'].append(row.get('type', ''))
annotations['xmin'].append(int(row.get('xmin', 0)))
annotations['ymin'].append(int(row.get('ymin', 0)))
annotations['xmax'].append(int(row.get('xmax', 0)))
annotations['ymax'].append(int(row.get('ymax', 0)))
# Handle coords_x and coords_y safely
coords_x = row.get('coords_x', '')
coords_y = row.get('coords_y', '')
annotations['coords_x'].append([int(coord) if coord.isdigit() else 0 for coord in coords_x.split(',')])
annotations['coords_y'].append([int(coord) if coord.isdigit() else 0 for coord in coords_y.split(',')])
return annotations
|