Datasets:

Modalities:
Image
Text
Size:
< 1K
ArXiv:
Libraries:
Datasets
License:
davanstrien HF staff commited on
Commit
627ad4b
1 Parent(s): 21d0c0d

first draft

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. yalt_ai_tabular_dataset.py +123 -0
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "TODO", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image": {"decode": true, "id": null, "_type": "Image"}, "objects": {"feature": {"label": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "bbox": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60704, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 7537, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 47159, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 115400, "size_in_bytes": 376305464}}
yalt_ai_tabular_dataset.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Script for reading 'Object Detection for Chess Pieces' dataset."""
15
+
16
+
17
+ import os
18
+ from glob import glob
19
+
20
+ import datasets
21
+ from PIL import Image
22
+
23
+ _CITATION = """\
24
+ @dataset{clerice_thibault_2022_6827706,
25
+ author = {Clérice, Thibault},
26
+ title = {YALTAi: Tabular Dataset},
27
+ month = jul,
28
+ year = 2022,
29
+ publisher = {Zenodo},
30
+ version = {1.0.0},
31
+ doi = {10.5281/zenodo.6827706},
32
+ url = {https://doi.org/10.5281/zenodo.6827706}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """TODO"""
37
+
38
+ _HOMEPAGE = "https://doi.org/10.5281/zenodo.6827706"
39
+
40
+ _LICENSE = "Creative Commons Attribution 4.0 International"
41
+
42
+ _URL = "https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1"
43
+
44
+ _CATEGORIES = ["Header", "Col", "Marginal", "text"]
45
+
46
+
47
+ class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
48
+ """Object Detection for historic manuscripts"""
49
+
50
+ VERSION = datasets.Version("1.0.0")
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ features=datasets.Features(
55
+ {
56
+ "image": datasets.Image(),
57
+ "objects": datasets.Sequence(
58
+ {
59
+ "label": datasets.ClassLabel(names=_CATEGORIES),
60
+ "bbox": datasets.Sequence(
61
+ datasets.Value("int32"), length=4
62
+ ),
63
+ }
64
+ ),
65
+ }
66
+ ),
67
+ supervised_keys=None,
68
+ description=_DESCRIPTION,
69
+ homepage=_HOMEPAGE,
70
+ license=_LICENSE,
71
+ citation=_CITATION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ data_dir = dl_manager.download_and_extract(_URL)
76
+ return [
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ gen_kwargs={
80
+ "data_dir": os.path.join(data_dir, "yaltai-table/", "train")
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.VALIDATION,
85
+ gen_kwargs={"data_dir": os.path.join(data_dir, "yaltai-table/", "val")},
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={
90
+ "data_dir": os.path.join(data_dir, "yaltai-table/", "test")
91
+ },
92
+ ),
93
+ ]
94
+
95
+ def _generate_examples(self, data_dir):
96
+ image_dir = os.path.join(data_dir, "images")
97
+ label_dir = os.path.join(data_dir, "labels")
98
+ image_paths = sorted(glob(f"{image_dir}/*.jpg"))
99
+ label_paths = sorted(glob(f"{label_dir}/*.txt"))
100
+
101
+ for idx, (image_path, label_path) in enumerate(zip(image_paths, label_paths)):
102
+ im = Image.open(image_path)
103
+ width, height = im.size
104
+
105
+ with open(label_path, "r") as f:
106
+ lines = f.readlines()
107
+
108
+ objects = []
109
+ for line in lines:
110
+ line = line.strip().split()
111
+ bbox_class = int(line[0])
112
+ bbox_xcenter = int(float(line[1]) * width)
113
+ bbox_ycenter = int(float(line[2]) * height)
114
+ bbox_width = int(float(line[3]) * width)
115
+ bbox_height = int(float(line[4]) * height)
116
+ objects.append(
117
+ {
118
+ "label": bbox_class,
119
+ "bbox": [bbox_xcenter, bbox_ycenter, bbox_width, bbox_height],
120
+ }
121
+ )
122
+
123
+ yield idx, {"image": image_path, "objects": objects}