|
|
|
"""CelebA FACE dataset.""" |
|
|
|
import os |
|
import datasets |
|
|
|
_HOMEPAGE = "https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)" |
|
|
|
_CITATION = """\ |
|
@inproceedings{liu2015faceattributes, |
|
title = {Deep Learning Face Attributes in the Wild}, |
|
author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, |
|
booktitle = {Proceedings of International Conference on Computer Vision (ICCV)}, |
|
month = {December}, |
|
year = {2015} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset with more than 200K celebrity images, |
|
each with 40 attribute annotations. The images in this dataset cover large pose variations and background clutter. |
|
CelebA has large diversities, large quantities, and rich annotations, including 10,177 number of identities, 202,599 number of face images, |
|
and 5 landmark locations, 40 binary attributes annotations per image. |
|
""" |
|
|
|
_REPO = "https://huggingface.co/datasets/hfaus/CelebA_bbox_and_facepoints/resolve/main/data" |
|
_URLS = { |
|
"train": f"{_REPO}/celebA_train.zip", |
|
"validation": f"{_REPO}/celebA_val.zip", |
|
"test": f"{_REPO}/celebA_test.zip", |
|
"eval_partition_file": f"{_REPO}/list_eval_partition.txt", |
|
"bbox_file": f"{_REPO}/list_bbox_celeba.txt", |
|
"landmarks_file": f"{_REPO}/list_landmarks_celeba.txt" |
|
} |
|
|
|
class CelebA(datasets.GeneratorBasedBuilder): |
|
"""CelebA dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"bbox": datasets.Sequence(datasets.Value("int32")), |
|
"facial_landmarks": datasets.Sequence( |
|
{ |
|
"lefteye": datasets.Sequence(datasets.Value("int32")), |
|
"righteye": datasets.Sequence(datasets.Value("int32")), |
|
"nose": datasets.Sequence(datasets.Value("int32")), |
|
"leftmouth": datasets.Sequence(datasets.Value("int32")), |
|
"rightmouth": datasets.Sequence(datasets.Value("int32")) |
|
} |
|
) |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split": "train", |
|
"data_dir": data_dir["train"], |
|
"partition_type": 0, |
|
"eval_partition_file": data_dir["eval_partition_file"], |
|
"bbox_file": data_dir["bbox_file"], |
|
"landmarks_file": data_dir["landmarks_file"] |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"split": "test", |
|
"data_dir": data_dir["test"], |
|
"partition_type": 1, |
|
"eval_partition_file": data_dir["eval_partition_file"], |
|
"bbox_file": data_dir["bbox_file"], |
|
"landmarks_file": data_dir["landmarks_file"] |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split": "val", |
|
"data_dir": data_dir["validation"], |
|
"partition_type": 2, |
|
"eval_partition_file": data_dir["eval_partition_file"], |
|
"bbox_file": data_dir["bbox_file"], |
|
"landmarks_file": data_dir["landmarks_file"] |
|
} |
|
) |
|
] |
|
|
|
def _generate_examples(self, split, data_dir, partition_type, eval_partition_file, bbox_file, landmarks_file): |
|
|
|
|
|
fichero0 = open(eval_partition_file, "r", encoding="utf-8") |
|
fichero1 = open(bbox_file, "r", encoding="utf-8") |
|
fichero2 = open(landmarks_file, "r", encoding="utf-8") |
|
|
|
|
|
lista = fichero0.readlines() |
|
for i, eval_line in enumerate(lista): |
|
|
|
|
|
eval_line = eval_line.rstrip() |
|
if not ".jpg" in eval_line: |
|
break |
|
strip_type_in_line = int(" ".join(eval_line.split()).split(" ")[1]) |
|
if strip_type_in_line != partition_type: |
|
break |
|
|
|
|
|
image_file_path = os.path.join(data_dir, eval_line[0]) |
|
|
|
|
|
bbox_line = fichero1.readline(i+2) |
|
bbox = " ".join(bbox_line.split()).split(" ") |
|
bbox_total = [int(bbox[1]), int(bbox[2]), int(bbox[3]), int(bbox[4])] |
|
|
|
|
|
landmarks_line = fichero2.readline(i+2) |
|
landmarks = " ".join(landmarks_line.split()).split(" ") |
|
facial_landmarks = { |
|
'lefteye': [landmarks[1], landmarks[2]], |
|
'righteye': [landmarks[3], landmarks[4]], |
|
'nose': [landmarks[5], landmarks[6]], |
|
'leftmouth': [landmarks[7], landmarks[8]], |
|
'rightmouth': [landmarks[9], landmarks[10]], |
|
} |
|
|
|
yield idx, {"image": image_file_path, "facial_landmarks": facial_landmarks, "bbox": bbox_total} |
|
idx += 1 |
|
else: |
|
fichero0.close() |
|
fichero1.close() |
|
fichero2.close() |