File size: 2,282 Bytes
b18750d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5749a46
07d50da
b18750d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5749a46
07d50da
b18750d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from xml.etree import ElementTree as ET

import datasets

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {silicone-masks-biometric-attacks},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The dataset consists of videos of individuals and attacks with printed 2D masks and
silicone masks . Videos are filmed in different lightning conditions (*in a dark room,
daylight, light room and nightlight*). Dataset includes videos of people with different
attributes (*glasses, mask, hat, hood, wigs and mustaches for men*).
"""

_NAME = "silicone-masks-biometric-attacks"

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = ""

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"

_LABELS = ["real", "silicone", "mask"]


class SiliconeMasksBiometricAttacks(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id": datasets.Value("int32"),
                    "video_name": datasets.Value("string"),
                    "video_path": datasets.Value("string"),
                    "label": datasets.ClassLabel(
                        num_classes=len(_LABELS),
                        names=_LABELS,
                    ),
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        videos = dl_manager.download(f"{_DATA}videos.tar.gz")
        videos = dl_manager.iter_archive(videos)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "videos": videos,
                },
            ),
        ]

    def _generate_examples(self, videos):
        for idx, ((video_path, video)) in enumerate(videos):
            for lbl in _LABELS:
                if lbl in video_path:
                    label = lbl

            yield idx, {
                "id": idx,
                "video_name": video_path.split("/")[-1],
                "video_path": video_path,
                "label": label,
            }