Vadzim Kashko commited on
Commit
2271465
1 Parent(s): 5239c2b

refactor: script and readme

Browse files
Files changed (3) hide show
  1. README.md +52 -0
  2. selfies_and_id.pdf +0 -0
  3. selfies_and_id.py +118 -0
README.md CHANGED
@@ -4,6 +4,58 @@ task_categories:
4
  - image-to-image
5
  tags:
6
  - code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  ---
8
  # Selfies, ID Images dataset
9
  **4083** sets, which includes *2 photos of a person from his documents and 13 selfies*. **571** sets of Hispanics and **3512** sets of Caucasians.
 
4
  - image-to-image
5
  tags:
6
  - code
7
+ dataset_info:
8
+ features:
9
+ - name: id_1
10
+ dtype: image
11
+ - name: id_2
12
+ dtype: image
13
+ - name: selfie_1
14
+ dtype: image
15
+ - name: selfie_2
16
+ dtype: image
17
+ - name: selfie_3
18
+ dtype: image
19
+ - name: selfie_4
20
+ dtype: image
21
+ - name: selfie_5
22
+ dtype: image
23
+ - name: selfie_6
24
+ dtype: image
25
+ - name: selfie_7
26
+ dtype: image
27
+ - name: selfie_8
28
+ dtype: image
29
+ - name: selfie_9
30
+ dtype: image
31
+ - name: selfie_10
32
+ dtype: image
33
+ - name: selfie_11
34
+ dtype: image
35
+ - name: selfie_12
36
+ dtype: image
37
+ - name: selfie_13
38
+ dtype: image
39
+ - name: user_id
40
+ dtype: string
41
+ - name: set_id
42
+ dtype: string
43
+ - name: user_race
44
+ dtype: string
45
+ - name: name
46
+ dtype: string
47
+ - name: age
48
+ dtype: int8
49
+ - name: country
50
+ dtype: string
51
+ - name: gender
52
+ dtype: string
53
+ splits:
54
+ - name: train
55
+ num_bytes: 376371811
56
+ num_examples: 10
57
+ download_size: 374658409
58
+ dataset_size: 376371811
59
  ---
60
  # Selfies, ID Images dataset
61
  **4083** sets, which includes *2 photos of a person from his documents and 13 selfies*. **571** sets of Hispanics and **3512** sets of Caucasians.
selfies_and_id.pdf ADDED
Binary file (292 kB). View file
 
selfies_and_id.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ _CITATION = """\
7
+ @InProceedings{huggingface:dataset,
8
+ title = {selfies_and_id},
9
+ author = {TrainingDataPro},
10
+ year = {2023}
11
+ }
12
+ """
13
+
14
+ _DESCRIPTION = """\
15
+ 4083 sets, which includes 2 photos of a person from his documents and
16
+ 13 selfies. 571 sets of Hispanics and 3512 sets of Caucasians.
17
+ Photo documents contains only a photo of a person.
18
+ All personal information from the document is hidden.
19
+ """
20
+ _NAME = 'selfies_and_id'
21
+
22
+ _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
23
+
24
+ _LICENSE = ""
25
+
26
+ _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
27
+
28
+
29
+ class SelfiesAndId(datasets.GeneratorBasedBuilder):
30
+ """Small sample of image-text pairs"""
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features({
36
+ 'id_1': datasets.Image(),
37
+ 'id_2': datasets.Image(),
38
+ 'selfie_1': datasets.Image(),
39
+ 'selfie_2': datasets.Image(),
40
+ 'selfie_3': datasets.Image(),
41
+ 'selfie_4': datasets.Image(),
42
+ 'selfie_5': datasets.Image(),
43
+ 'selfie_6': datasets.Image(),
44
+ 'selfie_7': datasets.Image(),
45
+ 'selfie_8': datasets.Image(),
46
+ 'selfie_9': datasets.Image(),
47
+ 'selfie_10': datasets.Image(),
48
+ 'selfie_11': datasets.Image(),
49
+ 'selfie_12': datasets.Image(),
50
+ 'selfie_13': datasets.Image(),
51
+ 'user_id': datasets.Value('string'),
52
+ 'set_id': datasets.Value('string'),
53
+ 'user_race': datasets.Value('string'),
54
+ 'name': datasets.Value('string'),
55
+ 'age': datasets.Value('int8'),
56
+ 'country': datasets.Value('string'),
57
+ 'gender': datasets.Value('string')
58
+ }),
59
+ supervised_keys=None,
60
+ homepage=_HOMEPAGE,
61
+ citation=_CITATION,
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ images = dl_manager.download(f"{_DATA}images.tar.gz")
66
+ annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
67
+ images = dl_manager.iter_archive(images)
68
+ return [
69
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
70
+ gen_kwargs={
71
+ "images": images,
72
+ 'annotations': annotations
73
+ }),
74
+ ]
75
+
76
+ def _generate_examples(self, images, annotations):
77
+ annotations_df = pd.read_csv(annotations, sep=';')
78
+ images_data = pd.DataFrame(columns=['URL', 'Bytes'])
79
+ for idx, (image_path, image) in enumerate(images):
80
+ images_data.loc[idx] = {'URL': image_path, 'Bytes': image.read()}
81
+
82
+ annotations_df = pd.merge(annotations_df,
83
+ images_data,
84
+ how='left',
85
+ on=['URL'])
86
+ for idx, worker_id in enumerate(pd.unique(annotations_df['UserId'])):
87
+ annotation = annotations_df.loc[annotations_df['UserId'] ==
88
+ worker_id]
89
+ annotation = annotation.sort_values(['FName'])
90
+ data = {
91
+ row[5].lower(): {
92
+ 'path': row[6],
93
+ 'bytes': row[10]
94
+ } for row in annotation.itertuples()
95
+ }
96
+
97
+ age = annotation.loc[annotation['FName'] ==
98
+ 'ID_1']['Age'].values[0]
99
+ country = annotation.loc[annotation['FName'] ==
100
+ 'ID_1']['Country'].values[0]
101
+ gender = annotation.loc[annotation['FName'] ==
102
+ 'ID_1']['Gender'].values[0]
103
+ set_id = annotation.loc[annotation['FName'] ==
104
+ 'ID_1']['SetId'].values[0]
105
+ user_race = annotation.loc[annotation['FName'] ==
106
+ 'ID_1']['UserRace'].values[0]
107
+ name = annotation.loc[annotation['FName'] ==
108
+ 'ID_1']['Name'].values[0]
109
+
110
+ data['user_id'] = worker_id
111
+ data['age'] = age
112
+ data['country'] = country
113
+ data['gender'] = gender
114
+ data['set_id'] = set_id
115
+ data['user_race'] = user_race
116
+ data['name'] = name
117
+
118
+ yield idx, data