File size: 3,582 Bytes
f1e3448
 
9d65543
f1e3448
 
 
 
 
 
 
9d65543
f1e3448
 
 
 
 
 
 
 
 
 
 
 
 
9d65543
f1e3448
9d65543
f1e3448
9d65543
f1e3448
9d65543
 
f1e3448
9d65543
f1e3448
 
 
 
 
 
 
 
 
 
 
 
 
 
9d65543
f1e3448
 
 
 
 
 
 
 
 
 
 
9d65543
f1e3448
 
9d65543
f1e3448
 
 
 
 
9d65543
f1e3448
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# import datasets
# import pandas as pd

# _CITATION = """\
# @InProceedings{huggingface:dataset,
# title = {hand-gesture-recognition-dataset},
# author = {TrainingDataPro},
# year = {2023}
# }
# """

# _DESCRIPTION = """\
# The dataset consists of videos showcasing individuals demonstrating 5 different
# hand gestures (*"one", "four", "small", "fist", and "me"*). Each video captures
# a person prominently displaying a single hand gesture, allowing for accurate
# identification and differentiation of the gestures.
# The dataset offers a diverse range of individuals performing the gestures,
# enabling the exploration of variations in hand shapes, sizes, and movements
# across different individuals. 
# The videos in the dataset are recorded in reasonable lighting conditions and
# with adequate resolution, to ensure that the hand gestures can be easily
# observed and studied.
# """
# _NAME = 'hand-gesture-recognition-dataset'

# _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

# _LICENSE = "cc-by-nc-nd-4.0"

# _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


# class HandGestureRecognitionDataset(datasets.GeneratorBasedBuilder):

#     def _info(self):
#         return datasets.DatasetInfo(description=_DESCRIPTION,
#                                     features=datasets.Features({
#                                         'set_id': datasets.Value('int32'),
#                                         'fist': datasets.Value('string'),
#                                         'four': datasets.Value('string'),
#                                         'me': datasets.Value('string'),
#                                         'one': datasets.Value('string'),
#                                         'small': datasets.Value('string')
#                                     }),
#                                     supervised_keys=None,
#                                     homepage=_HOMEPAGE,
#                                     citation=_CITATION,
#                                     license=_LICENSE)

#     def _split_generators(self, dl_manager):
#         files = dl_manager.download_and_extract(f"{_DATA}files.zip")
#         annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
#         files = dl_manager.iter_files(files)
#         return [
#             datasets.SplitGenerator(name=datasets.Split.TRAIN,
#                                     gen_kwargs={
#                                         "files": files,
#                                         'annotations': annotations
#                                     }),
#         ]

#     def _generate_examples(self, files, annotations):
#         annotations_df = pd.read_csv(annotations, sep=';')

#         files = sorted(files)
#         files = [files[i:i + 5] for i in range(0, len(files), 5)]
#         for idx, files_set in enumerate(files):
#             set_id = int(files_set[0].split('/')[2])
#             data = {'set_id': set_id}

#             for file in files_set:
#                 file_name = file.split('/')[3]
#                 if 'fist' in file_name.lower():
#                     data['fist'] = file
#                 elif 'four' in file_name.lower():
#                     data['four'] = file
#                 elif 'me' in file_name.lower():
#                     data['me'] = file
#                 elif 'one' in file_name.lower():
#                     data['one'] = file
#                 elif 'small' in file_name.lower():
#                     data['small'] = file
#             yield idx, data