CelebA_bbox_and_facepoints / CelebA_bbox_and_facepoints.py
hfaus's picture
Upload CelebA_bbox_and_facepoints.py
25e1929
raw
history blame contribute delete
No virus
6.12 kB
# coding=utf-8
"""CelebA FACE dataset."""
import os
import datasets
_HOMEPAGE = "https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html"
_LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)"
_CITATION = """\
@inproceedings{liu2015faceattributes,
title = {Deep Learning Face Attributes in the Wild},
author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou},
booktitle = {Proceedings of International Conference on Computer Vision (ICCV)},
month = {December},
year = {2015}
}
"""
_DESCRIPTION = """\
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset with more than 200K celebrity images,
each with 40 attribute annotations. The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including 10,177 number of identities, 202,599 number of face images,
and 5 landmark locations, 40 binary attributes annotations per image.
"""
_REPO = "https://huggingface.co/datasets/hfaus/CelebA_bbox_and_facepoints/resolve/main/data"
_URLS = {
"train": f"{_REPO}/celebA_train.zip",
"validation": f"{_REPO}/celebA_val.zip",
"test": f"{_REPO}/celebA_test.zip",
"eval_partition_file": f"{_REPO}/list_eval_partition.txt",
"bbox_file": f"{_REPO}/list_bbox_celeba.txt",
"landmarks_file": f"{_REPO}/list_landmarks_celeba.txt"
}
class CelebA(datasets.GeneratorBasedBuilder):
"""CelebA dataset."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"bbox": datasets.Sequence(datasets.Value("int32"), length=4),
"facial_landmarks": {
"lefteye": datasets.Sequence(datasets.Value("int32"), length=2),
"righteye": datasets.Sequence(datasets.Value("int32"), length=2),
"nose": datasets.Sequence(datasets.Value("int32"), length=2),
"leftmouth": datasets.Sequence(datasets.Value("int32"), length=2),
"rightmouth": datasets.Sequence(datasets.Value("int32"), length=2)
}
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
data_dir = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split": "train",
"data_dir": data_dir["train"],
"partition_type": 0,
"eval_partition_file": data_dir["eval_partition_file"],
"bbox_file": data_dir["bbox_file"],
"landmarks_file": data_dir["landmarks_file"]
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split": "test",
"data_dir": data_dir["test"],
"partition_type": 2,
"eval_partition_file": data_dir["eval_partition_file"],
"bbox_file": data_dir["bbox_file"],
"landmarks_file": data_dir["landmarks_file"]
}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split": "val",
"data_dir": data_dir["validation"],
"partition_type": 1,
"eval_partition_file": data_dir["eval_partition_file"],
"bbox_file": data_dir["bbox_file"],
"landmarks_file": data_dir["landmarks_file"]
}
)
]
def _generate_examples(self, split, data_dir, partition_type, eval_partition_file, bbox_file, landmarks_file):
#Abrimos los dos ficheros
fichero1 = open(bbox_file, "r", encoding="utf-8")
fichero2 = open(landmarks_file, "r", encoding="utf-8")
# Se pasa le puntero a las cabeceras
fichero1.readline()
fichero1.readline()
fichero2.readline()
fichero2.readline()
with open(eval_partition_file, "r", encoding="utf-8") as f:
idx = 0
while True:
# Se obtiene el tipo de split
eval_line = f.readline()
if not ".jpg" in eval_line:
break
strip_type_in_line = int(" ".join(eval_line.split()).split(" ")[1])
if strip_type_in_line == partition_type:
# Se obtiene la imágen
image_file_path = os.path.join(data_dir, " ".join(eval_line.split()).split(" ")[0])
# Se lee la línea de bbox
bbox_line = fichero1.readline()
bbox = " ".join(bbox_line.split()).split(" ")
bbox_total = [int(bbox[1]), int(bbox[2]), int(bbox[3]), int(bbox[4])]
# Se lee la línea de landmarks
landmarks_line = fichero2.readline()
landmarks = " ".join(landmarks_line.split()).split(" ")
facial_landmarks = {
'lefteye': [int(landmarks[1]), int(landmarks[2])],
'righteye': [int(landmarks[3]), int(landmarks[4])],
'nose': [int(landmarks[5]), int(landmarks[6])],
'leftmouth': [int(landmarks[7]), int(landmarks[8])],
'rightmouth': [int(landmarks[9]), int(landmarks[10])],
}
yield idx, {"image": image_file_path, "bbox": bbox_total, "facial_landmarks": facial_landmarks}
idx += 1
fichero1.close()
fichero2.close()