Datasets:
File size: 5,124 Bytes
808187d f40d8c4 808187d eadf52e df10ff5 cafa6da f80ee35 17acea3 62e9177 cafa6da e774f45 808187d 3371f7f 808187d e774f45 35f32ee 808187d a014ab8 d11e825 808187d 5c728e6 d11e825 808187d 5d2aa2d 808187d 17acea3 d11e825 a014ab8 f875dbb e774f45 d11e825 15fe087 d11e825 ba7d78e f875dbb 4767376 f875dbb f6ef9d8 f875dbb 4767376 e774f45 cafa6da e774f45 30c4ef3 bc44388 e774f45 13a2b0f e774f45 f875dbb e774f45 3dede87 3b7918a e774f45 3dede87 e774f45 f40d8c4 ad90602 4e8ce42 3b7918a f40d8c4 3b7918a ad90602 aaf8b27 df10ff5 51a3670 e774f45 3dede87 e774f45 3dede87 e774f45 1340d71 e774f45 3b7918a e774f45 3b7918a 8c5ddc8 30c4ef3 a21fff4 e774f45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
"""CC6204-Hackaton-Cub-Dataset: Multimodal"""
import glob
import os
import re
import datasets
from requests import get
#datasets.logging.set_verbosity_debug()
#logger = datasets.logging.get_logger(__name__)
#datasets.logging.set_verbosity_info()
#datasets.logging.set_verbosity_debug()
_DESCRIPTION = "Dataset multimodal para actividad del hackaton curso CC6204: Deep Learning"
_CITATION = "XYZ"
_HOMEPAGE = "https://github.com/ivansipiran/CC6204-Deep-Learning/blob/main/Hackaton/hackaton.md"
_REPO = "https://huggingface.co/datasets/alkzar90/CC6204-Hackaton-Cub-Dataset/resolve/main/data"
_URLS = {
"train_test_split": f"{_REPO}/train_test_split.txt",
"classes": f"{_REPO}/classes.txt",
"image_class_labels": f"{_REPO}/image_class_labels.txt",
"images": f"{_REPO}/images.txt",
"image_urls": f"{_REPO}/images.zip",
"text_urls": f"{_REPO}/text.zip",
"mini_images_urls": f"{_REPO}/dummy/mini_images.zip"
}
# Create ClassId-to-label dictionary using the classes file
classes = get(_URLS["classes"]).iter_lines()
_ID2LABEL = {}
for row in classes:
row = row.decode("UTF8")
if row != "":
idx, label = row.split(" ")
_ID2LABEL[int(idx)] = re.search("[^\d\.\_+].+", label).group(0).replace("_", " ")
_NAMES = list(_ID2LABEL.values())
# Create imageId-to-ClassID dictionary using the image_class_labels
img_idx_2_class_idx = get(_URLS["image_class_labels"]).iter_lines()
_IMGID2CLASSID = {}
for row in img_idx_2_class_idx:
row = row.decode("UTF8")
if row != "":
idx, class_id = row.split(" ")
_IMGID2CLASSID[idx] = int(class_id)
# build from images.txt: a mapping from image_file_name -> id
imgpath_to_ids = get(_URLS["images"]).iter_lines()
_IMGNAME2ID = {}
for row in imgpath_to_ids:
row = row.decode("UTF8")
if row != "":
idx, img_name = row.split(" ")
_IMGNAME2ID[os.path.basename(img_name)] = idx
# Create TRAIN_IDX_SET
train_test_split = get(_URLS["train_test_split"]).iter_lines()
_TRAIN_IDX_SET = []
for row in train_test_split:
row = row.decode("UTF8")
if row != "":
idx, train_bool = row.split(" ")
# 1: train, 0: test
if train_bool == "1":
_TRAIN_IDX_SET.append(idx)
_TRAIN_IDX_SET = set(_TRAIN_IDX_SET)
class CubDataset(datasets.GeneratorBasedBuilder):
"""Cub Dataset para el Hackaton del curso CC6204: Deep Learning"""
def _info(self):
features = datasets.Features({
"image": datasets.Image(),
"description": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=_NAMES),
"file_name": datasets.Value("string"),
})
keys = ("image", "label")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=keys,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
train_files = []
train_idx = []
test_files = []
test_idx = []
# Download images
img_data_files = os.path.join(dl_manager.download_and_extract(_URLS["image_urls"]), "images") # skip _MACOSX dir
text_data_files = os.path.join(dl_manager.download_and_extract(_URLS["text_urls"]), "text") # skip _MACOSX dir
#logger.info(f"text_data_files: {text_data_files}")
#logger.info(f"text_data_files: {text_data_files[10]}")
img_path_files = sorted(glob.glob(os.path.join(img_data_files, "*", "*.jpg")))
text_path_files = sorted(glob.glob(os.path.join(text_data_files, "*", "*.txt")))
for img, text in zip(img_path_files, text_path_files):
img_idx = _IMGNAME2ID[os.path.basename(img)]
# Sanity check to ensure that pairs of text and image are correct
if os.path.basename(img).replace(".jpg", "") == os.path.basename(text).replace(".txt", ""):
if img_idx in _TRAIN_IDX_SET:
train_files.append((img, text))
train_idx.append(img_idx)
else:
test_files.append((img, text))
test_idx.append(img_idx)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"files": train_files,
"image_idx": train_idx
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"files": test_files,
"image_idx": test_idx
}
)
]
def _generate_examples(self, files, image_idx):
for i, path in enumerate(files):
file_name = os.path.basename(path[0])
if file_name.endswith(".jpg"):
yield i, {
"image": path[0],
"description": open(path[1], "r").read(),
"label": _ID2LABEL[_IMGID2CLASSID[image_idx[i]]],
"file_name": file_name,
}
|