vkashko commited on
Commit
8b54612
·
1 Parent(s): 4167c17

feat: script

Browse files
Files changed (1) hide show
  1. generated-usa-passeports-dataset.py +149 -0
generated-usa-passeports-dataset.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+
3
+ import datasets
4
+ import numpy as np
5
+ import pandas as pd
6
+ import PIL.Image
7
+ import PIL.ImageOps
8
+
9
+ _CITATION = """\
10
+ @InProceedings{huggingface:dataset,
11
+ title = {generated-usa-passeports-dataset},
12
+ author = {TrainingDataPro},
13
+ year = {2023}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ The dataset consists of selfies of people and videos of them wearing a printed
19
+ 2d mask with their face. The dataset solves tasks in the field of anti-spoofing
20
+ and it is useful for buisness and safety systems.
21
+ The dataset includes: **attacks** - videos of people wearing printed portraits
22
+ of themselves with cut-out eyes.
23
+ """
24
+ _NAME = 'generated-usa-passeports-dataset'
25
+
26
+ _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
27
+
28
+ _LICENSE = "cc-by-nc-nd-4.0"
29
+
30
+ _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
31
+
32
+
33
+ def exif_transpose(img):
34
+ if not img:
35
+ return img
36
+
37
+ exif_orientation_tag = 274
38
+
39
+ # Check for EXIF data (only present on some files)
40
+ if hasattr(img, "_getexif") and isinstance(
41
+ img._getexif(), dict) and exif_orientation_tag in img._getexif():
42
+ exif_data = img._getexif()
43
+ orientation = exif_data[exif_orientation_tag]
44
+
45
+ # Handle EXIF Orientation
46
+ if orientation == 1:
47
+ # Normal image - nothing to do!
48
+ pass
49
+ elif orientation == 2:
50
+ # Mirrored left to right
51
+ img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)
52
+ elif orientation == 3:
53
+ # Rotated 180 degrees
54
+ img = img.rotate(180)
55
+ elif orientation == 4:
56
+ # Mirrored top to bottom
57
+ img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT)
58
+ elif orientation == 5:
59
+ # Mirrored along top-left diagonal
60
+ img = img.rotate(-90,
61
+ expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)
62
+ elif orientation == 6:
63
+ # Rotated 90 degrees
64
+ img = img.rotate(-90, expand=True)
65
+ elif orientation == 7:
66
+ # Mirrored along top-right diagonal
67
+ img = img.rotate(90,
68
+ expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)
69
+ elif orientation == 8:
70
+ # Rotated 270 degrees
71
+ img = img.rotate(90, expand=True)
72
+
73
+ return img
74
+
75
+
76
+ def load_image_file(file, mode='RGB'):
77
+ # Load the image with PIL
78
+ img = PIL.Image.open(file)
79
+
80
+ if hasattr(PIL.ImageOps, 'exif_transpose'):
81
+ # Very recent versions of PIL can do exit transpose internally
82
+ img = PIL.ImageOps.exif_transpose(img)
83
+ else:
84
+ # Otherwise, do the exif transpose ourselves
85
+ img = exif_transpose(img)
86
+
87
+ img = img.convert(mode)
88
+
89
+ return np.array(img)
90
+
91
+
92
+ class GeneratedUsaPasseportsDataset(datasets.GeneratorBasedBuilder):
93
+
94
+ def _info(self):
95
+ return datasets.DatasetInfo(
96
+ description=_DESCRIPTION,
97
+ features=datasets.Features({
98
+ 'original': datasets.Image(),
99
+ 'us_pass_augmentated_1': datasets.Image(),
100
+ 'us_pass_augmentated_2': datasets.Image(),
101
+ 'us_pass_augmentated_3': datasets.Image()
102
+ }),
103
+ supervised_keys=None,
104
+ homepage=_HOMEPAGE,
105
+ citation=_CITATION,
106
+ license=_LICENSE)
107
+
108
+ def _split_generators(self, dl_manager):
109
+ original = dl_manager.download_and_extract(f"{_DATA}photo.zip")
110
+ augmentation = dl_manager.download(f"{_DATA}attack.tar.gz")
111
+ annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
112
+ original = dl_manager.iter_files(original)
113
+ augmentation = dl_manager.iter_archive(augmentation)
114
+ return [
115
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
116
+ gen_kwargs={
117
+ "original": original,
118
+ 'augmentation': augmentation,
119
+ 'annotations': annotations
120
+ }),
121
+ ]
122
+
123
+ def _generate_examples(self, images, attacks, annotations):
124
+ annotations_df = pd.read_csv(annotations, sep=';')
125
+
126
+ for idx, (image_path, (attack_path, attack)) in enumerate(
127
+ zip(sorted(images), sorted(attacks, key=lambda x: x[0]))):
128
+ image_name = Path(image_path).name
129
+ yield idx, {
130
+ "photo":
131
+ load_image_file(image_path),
132
+ "attack":
133
+ attack_path,
134
+ # annotations_df.loc[annotations_df['photo'].str.lower() ==
135
+ # image_name.lower()]['attack'].values[0],
136
+ 'phone':
137
+ annotations_df.loc[annotations_df['photo'].str.lower() ==
138
+ image_name.lower()]['phone'].values[0],
139
+ 'gender':
140
+ annotations_df.loc[annotations_df['photo'].str.lower() ==
141
+ image_name.lower()]['gender'].values[0],
142
+ 'age':
143
+ annotations_df.loc[annotations_df['photo'].str.lower() ==
144
+ image_name.lower()]['age'].values[0],
145
+ 'country':
146
+ annotations_df.loc[annotations_df['photo'].str.lower() ==
147
+ image_name.lower()]
148
+ ['country'].values[0],
149
+ }