File size: 6,120 Bytes
66ede7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dcc88d1
 
 
 
66ede7e
 
 
 
 
 
 
 
 
 
 
 
 
f7aac21
4877ced
 
 
 
 
 
 
66ede7e
 
 
 
 
dcc88d1
66ede7e
 
 
 
 
 
 
 
 
dcc88d1
 
 
 
 
 
66ede7e
 
 
 
 
dcc88d1
25e1929
dcc88d1
 
 
 
66ede7e
 
 
 
 
dcc88d1
25e1929
dcc88d1
 
 
 
 
66ede7e
 
dcc88d1
66ede7e
 
dcc88d1
 
219da71
 
 
 
 
 
4610165
 
 
 
 
 
77a9d8c
4610165
 
 
 
 
 
a16b2ce
4610165
 
219da71
4610165
 
 
 
219da71
4610165
 
f7aac21
 
 
 
 
4610165
 
f7aac21
4610165
 
 
4877ced
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# coding=utf-8
"""CelebA FACE dataset."""

import os
import datasets

_HOMEPAGE = "https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html"

_LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)"

_CITATION = """\
@inproceedings{liu2015faceattributes,
  title = {Deep Learning Face Attributes in the Wild},
  author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou},
  booktitle = {Proceedings of International Conference on Computer Vision (ICCV)},
  month = {December},
  year = {2015} 
}
"""

_DESCRIPTION = """\
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset with more than 200K celebrity images,
each with 40 attribute annotations. The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including 10,177 number of identities, 202,599 number of face images,
and 5 landmark locations, 40 binary attributes annotations per image.
"""

_REPO = "https://huggingface.co/datasets/hfaus/CelebA_bbox_and_facepoints/resolve/main/data"
_URLS = {
    "train": f"{_REPO}/celebA_train.zip",
    "validation": f"{_REPO}/celebA_val.zip",
    "test": f"{_REPO}/celebA_test.zip",
    "eval_partition_file": f"{_REPO}/list_eval_partition.txt",
    "bbox_file": f"{_REPO}/list_bbox_celeba.txt",
    "landmarks_file": f"{_REPO}/list_landmarks_celeba.txt"
}

class CelebA(datasets.GeneratorBasedBuilder):
    """CelebA dataset."""

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "image": datasets.Image(),
                    "bbox": datasets.Sequence(datasets.Value("int32"), length=4),
                    "facial_landmarks": {
                       "lefteye": datasets.Sequence(datasets.Value("int32"), length=2),
                       "righteye": datasets.Sequence(datasets.Value("int32"), length=2),
                       "nose": datasets.Sequence(datasets.Value("int32"), length=2),
                       "leftmouth": datasets.Sequence(datasets.Value("int32"), length=2),
                       "rightmouth": datasets.Sequence(datasets.Value("int32"), length=2)
                    }
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract(_URLS)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "split": "train",
                    "data_dir": data_dir["train"],
                    "partition_type": 0,
                    "eval_partition_file": data_dir["eval_partition_file"],
                    "bbox_file": data_dir["bbox_file"],
                    "landmarks_file": data_dir["landmarks_file"]
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "split": "test",
                    "data_dir": data_dir["test"],
                    "partition_type": 2,
                    "eval_partition_file": data_dir["eval_partition_file"],
                    "bbox_file": data_dir["bbox_file"],
                    "landmarks_file": data_dir["landmarks_file"]
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "split": "val",
                    "data_dir": data_dir["validation"],
                    "partition_type": 1,
                    "eval_partition_file": data_dir["eval_partition_file"],
                    "bbox_file": data_dir["bbox_file"],
                    "landmarks_file": data_dir["landmarks_file"]
                }
            )
        ]

    def _generate_examples(self, split, data_dir, partition_type, eval_partition_file, bbox_file, landmarks_file):

        #Abrimos los dos ficheros
        fichero1 = open(bbox_file, "r", encoding="utf-8")
        fichero2 = open(landmarks_file, "r", encoding="utf-8")

        # Se pasa le puntero a las cabeceras
        fichero1.readline()
        fichero1.readline()
        fichero2.readline()
        fichero2.readline()
        
        with open(eval_partition_file, "r", encoding="utf-8") as f:
            idx = 0
            while True:

                # Se obtiene el tipo de split
                eval_line = f.readline()
                if not ".jpg" in eval_line:
                    break
                strip_type_in_line = int(" ".join(eval_line.split()).split(" ")[1])
                if strip_type_in_line == partition_type:

                    # Se obtiene la imágen
                    image_file_path = os.path.join(data_dir, " ".join(eval_line.split()).split(" ")[0])

                    # Se lee la línea de bbox
                    bbox_line = fichero1.readline()
                    bbox = " ".join(bbox_line.split()).split(" ")
                    bbox_total = [int(bbox[1]), int(bbox[2]), int(bbox[3]), int(bbox[4])]

                    # Se lee la línea de landmarks
                    landmarks_line = fichero2.readline()
                    landmarks = " ".join(landmarks_line.split()).split(" ")
                    facial_landmarks =  {
                       'lefteye': [int(landmarks[1]), int(landmarks[2])],
                       'righteye': [int(landmarks[3]), int(landmarks[4])],
                       'nose': [int(landmarks[5]), int(landmarks[6])],
                       'leftmouth': [int(landmarks[7]), int(landmarks[8])],
                       'rightmouth': [int(landmarks[9]), int(landmarks[10])],
                    }

                    yield idx, {"image": image_file_path, "bbox": bbox_total, "facial_landmarks": facial_landmarks}
                    idx += 1

        fichero1.close()
        fichero2.close()