|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
|
|
import numpy as np |
|
from PIL import Image |
|
import os |
|
import io |
|
import pandas as pd |
|
import matplotlib.pyplot as plt |
|
from numpy import asarray |
|
import requests |
|
from io import BytesIO |
|
from numpy import asarray |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{chen2023dataset, |
|
title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis}, |
|
author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J}, |
|
journal={Data in Brief}, |
|
volume={52}, |
|
pages={109833}, |
|
year={2023}, |
|
publisher={Elsevier}, |
|
doi={10.1016/j.dib.2023.109833} |
|
} |
|
|
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset" |
|
|
|
|
|
_LICENSE = "Under a Creative Commons license" |
|
|
|
|
|
|
|
|
|
|
|
_URLs = { |
|
"train" : "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/train.zip?download=true", |
|
"test": "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/test.zip?download=true", |
|
"valid": "https://huggingface.co/datasets/lisawen/soybean_dataset/resolve/main/valid.zip?download=true" |
|
} |
|
|
|
|
|
class SoybeanDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
_URLs = _URLs |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
|
|
"original_image": datasets.Image(), |
|
"segmentation_image": datasets.Image(), |
|
|
|
} |
|
), |
|
|
|
|
|
supervised_keys=("original_image","segmentation_image"), |
|
homepage="https://github.com/lisawen0707/soybean/tree/main", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
|
|
|
|
|
|
|
urls_to_download = self._URLs |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}), |
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, filepath): |
|
logging.info("Generating examples from = %s", filepath) |
|
|
|
print(f"Debug: filepath = {filepath}") |
|
|
|
|
|
if not os.path.exists(filepath): |
|
print(f"Debug: Directory does not exist: {filepath}") |
|
return |
|
|
|
file_list = os.listdir(filepath) |
|
print(f"Debug: file_list = {file_list}") |
|
|
|
|
|
for filename in os.listdir(filepath): |
|
if filename.endswith('_original.jpg'): |
|
|
|
unique_id = filename.split('_')[0] |
|
segmentation_image_name = filename.replace('_original.jpg', '_segmentation.png') |
|
|
|
|
|
original_image_path = os.path.join(filepath, filename) |
|
segmentation_image_path = os.path.join(filepath, segmentation_image_name) |
|
|
|
|
|
original_image = Image.open(original_image_path) |
|
|
|
|
|
segmentation_image = Image.open(segmentation_image_path) |
|
|
|
yield unique_id, { |
|
"original_image": original_image, |
|
"segmentation_image": segmentation_image, |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|