|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO: Add a description here.""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
from typing import List |
|
import datasets |
|
import logging |
|
import csv |
|
import numpy as np |
|
from PIL import Image |
|
import os |
|
import io |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@article{chen2023dataset, |
|
title={A dataset of the quality of soybean harvested by mechanization for deep-learning-based monitoring and analysis}, |
|
author={Chen, M and Jin, C and Ni, Y and Yang, T and Xu, J}, |
|
journal={Data in Brief}, |
|
volume={52}, |
|
pages={109833}, |
|
year={2023}, |
|
publisher={Elsevier}, |
|
doi={10.1016/j.dib.2023.109833} |
|
} |
|
|
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset contains images captured during the mechanized harvesting of soybeans, aimed at facilitating the development of machine vision and deep learning models for quality analysis. It contains information of original soybean pictures in different forms, labels of whether the soybean belongs to training, validation, or testing datasets, segmentation class of soybean pictures in one dataset. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/lisawen/soybean_dataset" |
|
|
|
|
|
_LICENSE = "Under a Creative Commons license" |
|
|
|
|
|
|
|
|
|
_URL = "/content/drive/MyDrive/sta_663/soybean/dataset.csv" |
|
|
|
|
|
|
|
class SoybeanDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
_URLS = _URL |
|
VERSION = datasets.Version("1.1.0") |
|
|
|
def _info(self): |
|
raise ValueError('woops!') |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"unique_id": datasets.Value("string"), |
|
"sets": datasets.Value("string"), |
|
"original_image": datasets.Value("string"), |
|
"segmentation_image": datasets.Value("string"), |
|
|
|
} |
|
), |
|
|
|
|
|
supervised_keys=("original_image","segmentation_image"), |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
|
|
|
|
|
|
|
dataset_path = "/content/drive/MyDrive/sta_663/soybean/dataset.csv" |
|
|
|
|
|
if not os.path.exists(dataset_path): |
|
raise FileNotFoundError(f"{dataset_path} does not exist. Have you mounted Google Drive?") |
|
|
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split, |
|
gen_kwargs={ |
|
"filepath": dataset_path |
|
} |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
|
|
|
|
|
|
if not os.path.exists(filepath): |
|
raise FileNotFoundError(f"{filepath} does not exist. Have you mounted Google Drive?") |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.DictReader(f) |
|
|
|
for row in reader: |
|
|
|
original_image_path = row['original_image'] |
|
segmentation_image_path = row['segmentation_image'] |
|
sets = row['sets'] |
|
|
|
|
|
with open(original_image_path, "rb") as image_file: |
|
original_image = Image.open(image_file) |
|
original_image_array = np.array(original_image) |
|
|
|
|
|
|
|
with open(segmentation_image_path, "rb") as image_file: |
|
segmentation_image = Image.open(image_file) |
|
segmentation_image_array = np.array(segmentation_image) |
|
|
|
|
|
|
|
yield row['unique_id'], { |
|
"sets": sets, |
|
"original_image": original_image_array, |
|
"segmentation_image": segmentation_image_array, |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
urls_to_download = self._URLS |
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logging.info("generating examples from = %s", filepath) |
|
with open(filepath) as f: |
|
squad = json.load(f) |
|
for article in squad["data"]: |
|
title = article.get("title", "").strip() |
|
for paragraph in article["paragraphs"]: |
|
context = paragraph["context"].strip() |
|
for qa in paragraph["qas"]: |
|
question = qa["question"].strip() |
|
id_ = qa["id"] |
|
|
|
answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
|
answers = [answer["text"].strip() for answer in qa["answers"]] |
|
|
|
|
|
|
|
yield id_, { |
|
"title": title, |
|
"context": context, |
|
"question": question, |
|
"id": id_, |
|
"answers": {"answer_start": answer_starts, "text": answers,}, |
|
} |