|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
author={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "" |
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NewDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
features = datasets.Features({ |
|
"image_id": datasets.Value("string"), |
|
"species": datasets.Value("string"), |
|
"scientific_name": datasets.Value("string"), |
|
"pics_array": datasets.Array3D(dtype="uint8", shape=(3, 768, 1024)), |
|
"image_resolution": { |
|
"width": datasets.Value("int32"), |
|
"height": datasets.Value("int32"), |
|
}, |
|
"annotations": datasets.Sequence({ |
|
"category_id": datasets.Value("int32"), |
|
"bounding_box": { |
|
"x_min": datasets.Value("float32"), |
|
"y_min": datasets.Value("float32"), |
|
"x_max": datasets.Value("float32"), |
|
"y_max": datasets.Value("float32"), |
|
}, |
|
}), |
|
}) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
data_files = dl_manager.download_and_extract({ |
|
"csv": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.csv", |
|
"zip": "https://huggingface.co/datasets/XintongHe/Populus_Stomatal_Images_Datasets/resolve/main/Labeled Stomatal Images.zip" |
|
}) |
|
|
|
|
|
species_info = pd.read_csv(data_files["csv"]) |
|
|
|
|
|
extracted_images_path = os.path.join(data_files["zip"], "Labeled Stomatal Images") |
|
|
|
|
|
all_image_filenames = species_info['FileName'].apply(lambda x: x + '.jpg').tolist() |
|
|
|
|
|
random.seed(42) |
|
random.shuffle(all_image_filenames) |
|
|
|
|
|
num_files = len(all_image_filenames) |
|
train_split_end = int(num_files * 0.7) |
|
val_split_end = train_split_end + int(num_files * 0.15) |
|
|
|
train_files = all_image_filenames[:train_split_end] |
|
val_files = all_image_filenames[train_split_end:val_split_end] |
|
test_files = all_image_filenames[val_split_end:] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepaths": train_files, |
|
"species_info": species_info, |
|
"data_dir": extracted_images_path, |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepaths": val_files, |
|
"species_info": species_info, |
|
"data_dir": extracted_images_path, |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepaths": test_files, |
|
"species_info": species_info, |
|
"data_dir": extracted_images_path, |
|
"split": "test", |
|
}, |
|
), |
|
] |
|
|
|
|
|
|
|
|
|
def _parse_yolo_labels(self, label_path, width, height): |
|
annotations = [] |
|
with open(label_path, 'r') as file: |
|
yolo_data = file.readlines() |
|
|
|
for line in yolo_data: |
|
class_id, x_center_rel, y_center_rel, width_rel, height_rel = map(float, line.split()) |
|
x_min = (x_center_rel - width_rel / 2) * width |
|
y_min = (y_center_rel - height_rel / 2) * height |
|
x_max = (x_center_rel + width_rel / 2) * width |
|
y_max = (y_center_rel + height_rel / 2) * height |
|
annotations.append({ |
|
"category_id": int(class_id), |
|
"bounding_box": { |
|
"x_min": x_min, |
|
"y_min": y_min, |
|
"x_max": x_max, |
|
"y_max": y_max |
|
} |
|
}) |
|
return annotations |
|
|
|
def _generate_examples(self, filepaths, species_info, data_dir, split): |
|
"""Yields examples as (key, example) tuples.""" |
|
for file_name in filepaths: |
|
image_id = os.path.splitext(file_name)[0] |
|
image_path = os.path.join(data_dir, f"{image_id}.jpg") |
|
label_path = os.path.join(data_dir, f"{image_id}.txt") |
|
|
|
with Image.open(image_path) as img: |
|
pics_array = np.array(img) |
|
width, height = img.size |
|
|
|
species_row = species_info.loc[species_info['FileName'] == file_name] |
|
species = species_row['Species'].values[0] if not species_row.empty else None |
|
scientific_name = species_row['ScientificName'].values[0] if not species_row.empty else None |
|
|
|
annotations = self._parse_yolo_labels(label_path, width, height) |
|
|
|
yield image_id, { |
|
"image_id": image_id, |
|
"species": species, |
|
"scientific_name": scientific_name, |
|
"pics_array": pics_array, |
|
"image_resolution": {"width": width, "height": height}, |
|
"annotations": annotations |
|
} |
|
|
|
|