File size: 3,338 Bytes
d61a5ba f4ce00e d2395f0 d61a5ba 6f69865 fc74c6f 6f69865 fc74c6f 6f69865 1af3915 6f69865 fc74c6f 6f69865 fc74c6f 498fbdd 22fad07 498fbdd fc74c6f 1af3915 fc74c6f 6f69865 ea0a244 fc74c6f 3f0c3b8 d61a5ba 4058cac fc74c6f 4058cac fc74c6f 4058cac fc74c6f eb03b30 6f69865 eb03b30 5cd88e4 eb03b30 5cd88e4 eb03b30 5cd88e4 eb03b30 5cd88e4 a9126ce 5cd88e4 a9126ce 4b10b88 5cd88e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
import glob
import random
import datasets
from datasets.tasks import ImageClassification
_HOMEPAGE = "https://github.com/your-github/renovation"
_CITATION = """\
@ONLINE {renovationdata,
author="Your Name",
title="Renovation dataset",
month="January",
year="2023",
url="https://github.com/your-github/renovation"
}
"""
_DESCRIPTION = """\
Renovations is a dataset of images of houses taken in the field using smartphone
cameras. It consists of 3 classes: cheap, average, and expensive renovations.
Data was collected by the your research lab.
"""
_URLS = {
"cheap": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/cheap.zip",
"average": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/average.zip",
"expensive": "https://huggingface.co/datasets/rshrott/renovation/resolve/main/expensive.zip",
}
_NAMES = ["cheap", "average", "expensive"]
class Renovations(datasets.GeneratorBasedBuilder):
"""Renovations house images dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image_file_path": datasets.Value("string"),
"image": datasets.Image(),
"labels": datasets.features.ClassLabel(names=_NAMES),
}
),
supervised_keys=("image", "labels"),
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[ImageClassification(image_column="image", label_column="labels")],
)
def _split_generators(self, dl_manager):
data_files = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_files": data_files,
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_files": data_files,
"split": "val",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_files": data_files,
"split": "test",
},
),
]
def _generate_examples(self, data_files, split):
all_files_and_labels = []
for label, path in data_files.items():
files = glob.glob(path + '/*.jpeg', recursive=True)
all_files_and_labels.extend((file, label) for file in files)
random.shuffle(all_files_and_labels)
num_files = len(all_files_and_labels)
if split == "train":
all_files_and_labels = all_files_and_labels[:int(num_files*0.7)]
elif split == "val":
all_files_and_labels = all_files_and_labels[int(num_files*0.7):int(num_files*0.85)]
else:
all_files_and_labels = all_files_and_labels[int(num_files*0.85):]
for idx, (file, label) in enumerate(all_files_and_labels):
yield idx, {
"image_file_path": file,
"image": file,
"labels": label,
}
|