Datasets:
parquet-converter
commited on
Commit
•
9c2aa1a
1
Parent(s):
65d7baf
Update parquet files
Browse files- .gitattributes +0 -41
- COCO/yalta_ai_tabular_dataset-test.parquet +3 -0
- COCO/yalta_ai_tabular_dataset-train.parquet +3 -0
- COCO/yalta_ai_tabular_dataset-validation.parquet +3 -0
- README.md +0 -327
- YOLO/yalta_ai_tabular_dataset-test.parquet +3 -0
- YOLO/yalta_ai_tabular_dataset-train.parquet +3 -0
- YOLO/yalta_ai_tabular_dataset-validation.parquet +3 -0
- dataset_infos.json +0 -1
- yalta_ai_tabular_dataset.py +0 -242
.gitattributes
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
-
# Audio files - uncompressed
|
33 |
-
*.pcm filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.sam filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*.raw filter=lfs diff=lfs merge=lfs -text
|
36 |
-
# Audio files - compressed
|
37 |
-
*.aac filter=lfs diff=lfs merge=lfs -text
|
38 |
-
*.flac filter=lfs diff=lfs merge=lfs -text
|
39 |
-
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
40 |
-
*.ogg filter=lfs diff=lfs merge=lfs -text
|
41 |
-
*.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
COCO/yalta_ai_tabular_dataset-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0643334a8c58e7fd4d48a405243f25639027c6b448c2e7fda3a9f2bb0ecb7665
|
3 |
+
size 59596060
|
COCO/yalta_ai_tabular_dataset-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b35b473d5e9cb0e3731373626930496d4f979b05a7841c782d6e03d4b7f3cc19
|
3 |
+
size 281121320
|
COCO/yalta_ai_tabular_dataset-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e53e21bb53ab575331949cfcbf953d00c05715731ccfb129d6975880740e4945
|
3 |
+
size 37182479
|
README.md
DELETED
@@ -1,327 +0,0 @@
|
|
1 |
-
---
|
2 |
-
annotations_creators:
|
3 |
-
- expert-generated
|
4 |
-
language: []
|
5 |
-
language_creators:
|
6 |
-
- expert-generated
|
7 |
-
license:
|
8 |
-
- cc-by-4.0
|
9 |
-
multilinguality: []
|
10 |
-
pretty_name: YALTAi Tabular Dataset
|
11 |
-
size_categories:
|
12 |
-
- n<1K
|
13 |
-
source_datasets: []
|
14 |
-
tags:
|
15 |
-
- manuscripts
|
16 |
-
- LAM
|
17 |
-
task_categories:
|
18 |
-
- object-detection
|
19 |
-
task_ids: []
|
20 |
-
---
|
21 |
-
|
22 |
-
# YALTAi Tabular Dataset
|
23 |
-
|
24 |
-
## Table of Contents
|
25 |
-
- [YALTAi Tabular Dataset](#YALTAi-Tabular-Dataset)
|
26 |
-
- [Table of Contents](#table-of-contents)
|
27 |
-
- [Dataset Description](#dataset-description)
|
28 |
-
- [Dataset Summary](#dataset-summary)
|
29 |
-
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
30 |
-
- [Dataset Structure](#dataset-structure)
|
31 |
-
- [Data Instances](#data-instances)
|
32 |
-
- [Data Fields](#data-fields)
|
33 |
-
- [Data Splits](#data-splits)
|
34 |
-
- [Dataset Creation](#dataset-creation)
|
35 |
-
- [Curation Rationale](#curation-rationale)
|
36 |
-
- [Source Data](#source-data)
|
37 |
-
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
|
38 |
-
- [Who are the source language producers?](#who-are-the-source-language-producers)
|
39 |
-
- [Annotations](#annotations)
|
40 |
-
- [Annotation process](#annotation-process)
|
41 |
-
- [Who are the annotators?](#who-are-the-annotators)
|
42 |
-
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
43 |
-
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
44 |
-
- [Social Impact of Dataset](#social-impact-of-dataset)
|
45 |
-
- [Discussion of Biases](#discussion-of-biases)
|
46 |
-
- [Other Known Limitations](#other-known-limitations)
|
47 |
-
- [Additional Information](#additional-information)
|
48 |
-
- [Dataset Curators](#dataset-curators)
|
49 |
-
- [Licensing Information](#licensing-information)
|
50 |
-
- [Citation Information](#citation-information)
|
51 |
-
- [Contributions](#contributions)
|
52 |
-
|
53 |
-
## Dataset Description
|
54 |
-
|
55 |
-
- **Homepage:** [https://doi.org/10.5281/zenodo.6827706](https://doi.org/10.5281/zenodo.6827706)
|
56 |
-
- **Paper:** [https://arxiv.org/abs/2207.11230](https://arxiv.org/abs/2207.11230)
|
57 |
-
|
58 |
-
### Dataset Summary
|
59 |
-
|
60 |
-
This dataset contains a subset of data used in the paper [You Actually Look Twice At it (YALTAi): using an object detectionapproach instead of region segmentation within the Kraken engine](https://arxiv.org/abs/2207.11230). This paper proposes treating page layout recognition on historical documents as an object detection task (compared to the usual pixel segmentation approach). This dataset covers pages with tabular information with the following objects "Header", "Col", "Marginal", "text".
|
61 |
-
|
62 |
-
### Supported Tasks and Leaderboards
|
63 |
-
|
64 |
-
- `object-detection`: This dataset can be used to train a model for object-detection on historic document images.
|
65 |
-
|
66 |
-
|
67 |
-
## Dataset Structure
|
68 |
-
|
69 |
-
This dataset has two configurations. These configurations both cover the same data and annotations but provide these annotations in different forms to make it easier to integrate the data with existing processing pipelines.
|
70 |
-
|
71 |
-
- The first configuration, `YOLO`, uses the data's original format.
|
72 |
-
- The second configuration converts the YOLO format into a format which is closer to the `COCO` annotation format. This is done to make it easier to work with the `feature_extractor`s from the `Transformers` models for object detection, which expect data to be in a COCO style format.
|
73 |
-
|
74 |
-
### Data Instances
|
75 |
-
|
76 |
-
An example instance from the COCO config:
|
77 |
-
|
78 |
-
```
|
79 |
-
{'height': 2944,
|
80 |
-
'image': <PIL.PngImagePlugin.PngImageFile image mode=L size=2064x2944 at 0x7FA413CDA210>,
|
81 |
-
'image_id': 0,
|
82 |
-
'objects': [{'area': 435956,
|
83 |
-
'bbox': [0.0, 244.0, 1493.0, 292.0],
|
84 |
-
'category_id': 0,
|
85 |
-
'id': 0,
|
86 |
-
'image_id': '0',
|
87 |
-
'iscrowd': False,
|
88 |
-
'segmentation': []},
|
89 |
-
{'area': 88234,
|
90 |
-
'bbox': [305.0, 127.0, 562.0, 157.0],
|
91 |
-
'category_id': 2,
|
92 |
-
'id': 0,
|
93 |
-
'image_id': '0',
|
94 |
-
'iscrowd': False,
|
95 |
-
'segmentation': []},
|
96 |
-
{'area': 5244,
|
97 |
-
'bbox': [1416.0, 196.0, 92.0, 57.0],
|
98 |
-
'category_id': 2,
|
99 |
-
'id': 0,
|
100 |
-
'image_id': '0',
|
101 |
-
'iscrowd': False,
|
102 |
-
'segmentation': []},
|
103 |
-
{'area': 5720,
|
104 |
-
'bbox': [1681.0, 182.0, 88.0, 65.0],
|
105 |
-
'category_id': 2,
|
106 |
-
'id': 0,
|
107 |
-
'image_id': '0',
|
108 |
-
'iscrowd': False,
|
109 |
-
'segmentation': []},
|
110 |
-
{'area': 374085,
|
111 |
-
'bbox': [0.0, 540.0, 163.0, 2295.0],
|
112 |
-
'category_id': 1,
|
113 |
-
'id': 0,
|
114 |
-
'image_id': '0',
|
115 |
-
'iscrowd': False,
|
116 |
-
'segmentation': []},
|
117 |
-
{'area': 577599,
|
118 |
-
'bbox': [104.0, 537.0, 253.0, 2283.0],
|
119 |
-
'category_id': 1,
|
120 |
-
'id': 0,
|
121 |
-
'image_id': '0',
|
122 |
-
'iscrowd': False,
|
123 |
-
'segmentation': []},
|
124 |
-
{'area': 598670,
|
125 |
-
'bbox': [304.0, 533.0, 262.0, 2285.0],
|
126 |
-
'category_id': 1,
|
127 |
-
'id': 0,
|
128 |
-
'image_id': '0',
|
129 |
-
'iscrowd': False,
|
130 |
-
'segmentation': []},
|
131 |
-
{'area': 56,
|
132 |
-
'bbox': [284.0, 539.0, 8.0, 7.0],
|
133 |
-
'category_id': 1,
|
134 |
-
'id': 0,
|
135 |
-
'image_id': '0',
|
136 |
-
'iscrowd': False,
|
137 |
-
'segmentation': []},
|
138 |
-
{'area': 1868412,
|
139 |
-
'bbox': [498.0, 513.0, 812.0, 2301.0],
|
140 |
-
'category_id': 1,
|
141 |
-
'id': 0,
|
142 |
-
'image_id': '0',
|
143 |
-
'iscrowd': False,
|
144 |
-
'segmentation': []},
|
145 |
-
{'area': 307800,
|
146 |
-
'bbox': [1250.0, 512.0, 135.0, 2280.0],
|
147 |
-
'category_id': 1,
|
148 |
-
'id': 0,
|
149 |
-
'image_id': '0',
|
150 |
-
'iscrowd': False,
|
151 |
-
'segmentation': []},
|
152 |
-
{'area': 494109,
|
153 |
-
'bbox': [1330.0, 503.0, 217.0, 2277.0],
|
154 |
-
'category_id': 1,
|
155 |
-
'id': 0,
|
156 |
-
'image_id': '0',
|
157 |
-
'iscrowd': False,
|
158 |
-
'segmentation': []},
|
159 |
-
{'area': 52,
|
160 |
-
'bbox': [1734.0, 1013.0, 4.0, 13.0],
|
161 |
-
'category_id': 1,
|
162 |
-
'id': 0,
|
163 |
-
'image_id': '0',
|
164 |
-
'iscrowd': False,
|
165 |
-
'segmentation': []},
|
166 |
-
{'area': 90666,
|
167 |
-
'bbox': [0.0, 1151.0, 54.0, 1679.0],
|
168 |
-
'category_id': 1,
|
169 |
-
'id': 0,
|
170 |
-
'image_id': '0',
|
171 |
-
'iscrowd': False,
|
172 |
-
'segmentation': []}],
|
173 |
-
'width': 2064}
|
174 |
-
```
|
175 |
-
|
176 |
-
An example instance from the YOLO config:
|
177 |
-
|
178 |
-
``` python
|
179 |
-
{'image': <PIL.PngImagePlugin.PngImageFile image mode=L size=2064x2944 at 0x7FAA140F2450>,
|
180 |
-
'objects': {'bbox': [[747, 390, 1493, 292],
|
181 |
-
[586, 206, 562, 157],
|
182 |
-
[1463, 225, 92, 57],
|
183 |
-
[1725, 215, 88, 65],
|
184 |
-
[80, 1688, 163, 2295],
|
185 |
-
[231, 1678, 253, 2283],
|
186 |
-
[435, 1675, 262, 2285],
|
187 |
-
[288, 543, 8, 7],
|
188 |
-
[905, 1663, 812, 2301],
|
189 |
-
[1318, 1653, 135, 2280],
|
190 |
-
[1439, 1642, 217, 2277],
|
191 |
-
[1737, 1019, 4, 13],
|
192 |
-
[26, 1991, 54, 1679]],
|
193 |
-
'label': [0, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]}}
|
194 |
-
```
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
### Data Fields
|
199 |
-
|
200 |
-
The fields for the YOLO config:
|
201 |
-
|
202 |
-
- `image`: the image
|
203 |
-
- `objects`: the annotations which consist of:
|
204 |
-
- `bbox`: a list of bounding boxes for the image
|
205 |
-
- `label`: a list of labels for this image
|
206 |
-
|
207 |
-
The fields for the COCO config:
|
208 |
-
|
209 |
-
- `height`: height of the image
|
210 |
-
- `width`: width of the image
|
211 |
-
- `image`: image
|
212 |
-
- `image_id`: id for the image
|
213 |
-
- `objects`: annotations in COCO format, consisting of a list containing dictionaries with the following keys:
|
214 |
-
- `bbox`: bounding boxes for the images
|
215 |
-
- `category_id`: a label for the image
|
216 |
-
- `image_id`: id for the image
|
217 |
-
- `iscrowd`: COCO `iscrowd` flag
|
218 |
-
- `segmentation`: COCO segmentation annotations (empty in this case but kept for compatibility with other processing scripts)
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
### Data Splits
|
223 |
-
|
224 |
-
The dataset contains a train, validation and test split with the following numbers per split:
|
225 |
-
|
226 |
-
|
227 |
-
| | train | validation | test |
|
228 |
-
|----------|-------|------------|------|
|
229 |
-
| examples | 196 | 22 | 135 |
|
230 |
-
|
231 |
-
|
232 |
-
## Dataset Creation
|
233 |
-
|
234 |
-
> [this] dataset was produced using a single source, the Lectaurep Repertoires dataset [Rostaing et al., 2021], which served as a basis for only the training and development split. The testset is composed of original data, from various documents, from the 17th century up to the early 20th with a single soldier war report. The test set is voluntarily very different and out of domain with column borders that are not drawn nor printed in certain cases, layout in some kind of masonry layout. p.8
|
235 |
-
.
|
236 |
-
### Curation Rationale
|
237 |
-
|
238 |
-
This dataset was created to produce a simplified version of the [Lectaurep Repertoires dataset](https://github.com/HTR-United/lectaurep-repertoires), which was found to contain:
|
239 |
-
|
240 |
-
> around 16 different ways to describe columns, from Col1 to Col7, the case-different col1-col7 and finally ColPair and ColOdd, which we all reduced to Col p.8
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
### Source Data
|
245 |
-
|
246 |
-
#### Initial Data Collection and Normalization
|
247 |
-
|
248 |
-
The LECTAUREP (LECTure Automatique de REPertoires) project, which began in 2018, is a joint initiative of the Minutier central des notaires de Paris, the National Archives and the
|
249 |
-
Minutier central des notaires de Paris of the National Archives, the [ALMAnaCH (Automatic Language Modeling and Analysis & Computational Humanities)](https://www.inria.fr/en/almanach) team at Inria and the EPHE (Ecole Pratique des Hautes Etudes), in partnership with the Ministry of Culture.
|
250 |
-
|
251 |
-
> The lectaurep-bronod corpus brings together 100 pages from the repertoire of Maître Louis Bronod (1719-1765), notary in Paris from December 13, 1719 to July 23, 1765. The pages concerned were written during the years 1742 to 1745.
|
252 |
-
|
253 |
-
#### Who are the source language producers?
|
254 |
-
|
255 |
-
[More information needed]
|
256 |
-
|
257 |
-
### Annotations
|
258 |
-
|
259 |
-
| | Train | Dev | Test | Total | Average area | Median area |
|
260 |
-
|----------|-------|-----|------|-------|--------------|-------------|
|
261 |
-
| Col | 724 | 105 | 829 | 1658 | 9.32 | 6.33 |
|
262 |
-
| Header | 103 | 15 | 42 | 160 | 6.78 | 7.10 |
|
263 |
-
| Marginal | 60 | 8 | 0 | 68 | 0.70 | 0.71 |
|
264 |
-
| Text | 13 | 5 | 0 | 18 | 0.01 | 0.00 |
|
265 |
-
| | | | - | | | |
|
266 |
-
|
267 |
-
|
268 |
-
#### Annotation process
|
269 |
-
|
270 |
-
[More information needed]
|
271 |
-
|
272 |
-
#### Who are the annotators?
|
273 |
-
|
274 |
-
[More information needed]
|
275 |
-
|
276 |
-
### Personal and Sensitive Information
|
277 |
-
|
278 |
-
This data does not contain information relating to living individuals.
|
279 |
-
|
280 |
-
## Considerations for Using the Data
|
281 |
-
|
282 |
-
### Social Impact of Dataset
|
283 |
-
|
284 |
-
A growing number of datasets are related to page layout for historical documents. This dataset offers a different approach to annotating these datasets (focusing on object detection rather than pixel-level annotations). Improving document layout recognition can have a positive impact on downstream tasks, in particular Optical Character Recognition.
|
285 |
-
|
286 |
-
### Discussion of Biases
|
287 |
-
|
288 |
-
Historical documents contain a wide variety of page layouts. This means that the ability of models trained on this dataset to transfer to documents with very different layouts is not guaranteed.
|
289 |
-
|
290 |
-
### Other Known Limitations
|
291 |
-
|
292 |
-
[More information needed]
|
293 |
-
|
294 |
-
|
295 |
-
## Additional Information
|
296 |
-
|
297 |
-
### Dataset Curators
|
298 |
-
|
299 |
-
|
300 |
-
### Licensing Information
|
301 |
-
|
302 |
-
[Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0/legalcode)
|
303 |
-
|
304 |
-
### Citation Information
|
305 |
-
|
306 |
-
```
|
307 |
-
@dataset{clerice_thibault_2022_6827706,
|
308 |
-
author = {Clérice, Thibault},
|
309 |
-
title = {YALTAi: Tabular Dataset},
|
310 |
-
month = jul,
|
311 |
-
year = 2022,
|
312 |
-
publisher = {Zenodo},
|
313 |
-
version = {1.0.0},
|
314 |
-
doi = {10.5281/zenodo.6827706},
|
315 |
-
url = {https://doi.org/10.5281/zenodo.6827706}
|
316 |
-
}
|
317 |
-
```
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.6827706.svg)](https://doi.org/10.5281/zenodo.6827706)
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
### Contributions
|
326 |
-
|
327 |
-
Thanks to [@davanstrien](https://github.com/davanstrien) for adding this dataset.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
YOLO/yalta_ai_tabular_dataset-test.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f4e7452949675136b0ca870da37baf651dd0ef060fd54c713c0871323c5bb753
|
3 |
+
size 59584052
|
YOLO/yalta_ai_tabular_dataset-train.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be47a99a2a345ef9d32a7981a6b15a35a36b14ea07f2feb208f311462dd55bca
|
3 |
+
size 281109368
|
YOLO/yalta_ai_tabular_dataset-validation.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:16335e2329430bde39ad3a19dd89841dbc49224bbe2ac9025c8e9177f0b7eed1
|
3 |
+
size 37177320
|
dataset_infos.json
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
{"default": {"description": "TODO", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image": {"decode": true, "id": null, "_type": "Image"}, "objects": {"feature": {"label": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "bbox": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60704, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 7537, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 47159, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 115400, "size_in_bytes": 376305464}, "YOLO": {"description": "Yalt AI Tabular Dataset", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image": {"decode": true, "id": null, "_type": "Image"}, "objects": {"feature": {"label": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "bbox": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "YOLO", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 60704, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 7537, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 47159, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 115400, "size_in_bytes": 376305464}, "COCO": {"description": "Yalt AI Tabular Dataset", "citation": " @dataset{clerice_thibault_2022_6827706,\n author = {Cl\u00e9rice, Thibault},\n title = {YALTAi: Tabular Dataset},\n month = jul,\n year = 2022,\n publisher = {Zenodo},\n version = {1.0.0},\n doi = {10.5281/zenodo.6827706},\n url = {https://doi.org/10.5281/zenodo.6827706}\n}\n", "homepage": "https://doi.org/10.5281/zenodo.6827706", "license": "Creative Commons Attribution 4.0 International", "features": {"image_id": {"dtype": "int64", "id": null, "_type": "Value"}, "image": {"decode": true, "id": null, "_type": "Image"}, "width": {"dtype": "int32", "id": null, "_type": "Value"}, "height": {"dtype": "int32", "id": null, "_type": "Value"}, "objects": [{"category_id": {"num_classes": 4, "names": ["Header", "Col", "Marginal", "text"], "id": null, "_type": "ClassLabel"}, "image_id": {"dtype": "string", "id": null, "_type": "Value"}, "id": {"dtype": "int64", "id": null, "_type": "Value"}, "area": {"dtype": "int64", "id": null, "_type": "Value"}, "bbox": {"feature": {"dtype": "float32", "id": null, "_type": "Value"}, "length": 4, "id": null, "_type": "Sequence"}, "segmentation": [[{"dtype": "float32", "id": null, "_type": "Value"}]], "iscrowd": {"dtype": "bool", "id": null, "_type": "Value"}}]}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "yalt_ai_tabular_dataset", "config_name": "COCO", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 87171, "num_examples": 196, "dataset_name": "yalt_ai_tabular_dataset"}, "validation": {"name": "validation", "num_bytes": 11225, "num_examples": 22, "dataset_name": "yalt_ai_tabular_dataset"}, "test": {"name": "test", "num_bytes": 71491, "num_examples": 135, "dataset_name": "yalt_ai_tabular_dataset"}}, "download_checksums": {"https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1": {"num_bytes": 376190064, "checksum": "5b312faf097939302fb98ab0a8b35c007962d88978ea9dc28d2f560b89dc0657"}}, "download_size": 376190064, "post_processing_size": null, "dataset_size": 169887, "size_in_bytes": 376359951}}
|
|
|
|
yalta_ai_tabular_dataset.py
DELETED
@@ -1,242 +0,0 @@
|
|
1 |
-
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""Script for reading 'You Actually Look Twice At it (YALTAi)' dataset."""
|
15 |
-
|
16 |
-
|
17 |
-
import os
|
18 |
-
from glob import glob
|
19 |
-
|
20 |
-
import datasets
|
21 |
-
from PIL import Image
|
22 |
-
|
23 |
-
_CITATION = """\
|
24 |
-
@dataset{clerice_thibault_2022_6827706,
|
25 |
-
author = {Clérice, Thibault},
|
26 |
-
title = {YALTAi: Tabular Dataset},
|
27 |
-
month = jul,
|
28 |
-
year = 2022,
|
29 |
-
publisher = {Zenodo},
|
30 |
-
version = {1.0.0},
|
31 |
-
doi = {10.5281/zenodo.6827706},
|
32 |
-
url = {https://doi.org/10.5281/zenodo.6827706}
|
33 |
-
}
|
34 |
-
"""
|
35 |
-
|
36 |
-
_DESCRIPTION = """Yalt AI Tabular Dataset"""
|
37 |
-
|
38 |
-
_HOMEPAGE = "https://doi.org/10.5281/zenodo.6827706"
|
39 |
-
|
40 |
-
_LICENSE = "Creative Commons Attribution 4.0 International"
|
41 |
-
|
42 |
-
_URL = "https://zenodo.org/record/6827706/files/yaltai-table.zip?download=1"
|
43 |
-
|
44 |
-
_CATEGORIES = ["Header", "Col", "Marginal", "text"]
|
45 |
-
|
46 |
-
|
47 |
-
class YaltAiTabularDatasetConfig(datasets.BuilderConfig):
|
48 |
-
"""BuilderConfig for YaltAiTabularDataset."""
|
49 |
-
|
50 |
-
def __init__(self, name, **kwargs):
|
51 |
-
"""BuilderConfig for YaltAiTabularDataset."""
|
52 |
-
super(YaltAiTabularDatasetConfig, self).__init__(
|
53 |
-
version=datasets.Version("1.0.0"), name=name, description=None, **kwargs
|
54 |
-
)
|
55 |
-
|
56 |
-
|
57 |
-
class YaltAiTabularDataset(datasets.GeneratorBasedBuilder):
|
58 |
-
"""Object Detection for historic manuscripts"""
|
59 |
-
|
60 |
-
BUILDER_CONFIGS = [
|
61 |
-
YaltAiTabularDatasetConfig("YOLO"),
|
62 |
-
YaltAiTabularDatasetConfig("COCO"),
|
63 |
-
]
|
64 |
-
|
65 |
-
def _info(self):
|
66 |
-
if self.config.name == "COCO":
|
67 |
-
features = datasets.Features(
|
68 |
-
{
|
69 |
-
"image_id": datasets.Value("int64"),
|
70 |
-
"image": datasets.Image(),
|
71 |
-
"width": datasets.Value("int32"),
|
72 |
-
"height": datasets.Value("int32"),
|
73 |
-
}
|
74 |
-
)
|
75 |
-
object_dict = {
|
76 |
-
"category_id": datasets.ClassLabel(names=_CATEGORIES),
|
77 |
-
"image_id": datasets.Value("string"),
|
78 |
-
"id": datasets.Value("int64"),
|
79 |
-
"area": datasets.Value("int64"),
|
80 |
-
"bbox": datasets.Sequence(datasets.Value("float32"), length=4),
|
81 |
-
"segmentation": [[datasets.Value("float32")]],
|
82 |
-
"iscrowd": datasets.Value("bool"),
|
83 |
-
}
|
84 |
-
features["objects"] = [object_dict]
|
85 |
-
if self.config.name == "YOLO":
|
86 |
-
features = datasets.Features(
|
87 |
-
{
|
88 |
-
"image": datasets.Image(),
|
89 |
-
"objects": datasets.Sequence(
|
90 |
-
{
|
91 |
-
"label": datasets.ClassLabel(names=_CATEGORIES),
|
92 |
-
"bbox": datasets.Sequence(
|
93 |
-
datasets.Value("int32"), length=4
|
94 |
-
),
|
95 |
-
}
|
96 |
-
),
|
97 |
-
}
|
98 |
-
)
|
99 |
-
return datasets.DatasetInfo(
|
100 |
-
features=features,
|
101 |
-
supervised_keys=None,
|
102 |
-
description=_DESCRIPTION,
|
103 |
-
homepage=_HOMEPAGE,
|
104 |
-
license=_LICENSE,
|
105 |
-
citation=_CITATION,
|
106 |
-
)
|
107 |
-
|
108 |
-
def _split_generators(self, dl_manager):
|
109 |
-
data_dir = dl_manager.download_and_extract(_URL)
|
110 |
-
return [
|
111 |
-
datasets.SplitGenerator(
|
112 |
-
name=datasets.Split.TRAIN,
|
113 |
-
gen_kwargs={
|
114 |
-
"data_dir": os.path.join(data_dir, "yaltai-table/", "train")
|
115 |
-
},
|
116 |
-
),
|
117 |
-
datasets.SplitGenerator(
|
118 |
-
name=datasets.Split.VALIDATION,
|
119 |
-
gen_kwargs={"data_dir": os.path.join(data_dir, "yaltai-table/", "val")},
|
120 |
-
),
|
121 |
-
datasets.SplitGenerator(
|
122 |
-
name=datasets.Split.TEST,
|
123 |
-
gen_kwargs={
|
124 |
-
"data_dir": os.path.join(data_dir, "yaltai-table/", "test")
|
125 |
-
},
|
126 |
-
),
|
127 |
-
]
|
128 |
-
|
129 |
-
def _generate_examples(self, data_dir):
|
130 |
-
def create_annotation_from_yolo_format(
|
131 |
-
min_x,
|
132 |
-
min_y,
|
133 |
-
width,
|
134 |
-
height,
|
135 |
-
image_id,
|
136 |
-
category_id,
|
137 |
-
annotation_id,
|
138 |
-
segmentation=False,
|
139 |
-
):
|
140 |
-
bbox = (float(min_x), float(min_y), float(width), float(height))
|
141 |
-
area = width * height
|
142 |
-
max_x = min_x + width
|
143 |
-
max_y = min_y + height
|
144 |
-
if segmentation:
|
145 |
-
seg = [[min_x, min_y, max_x, min_y, max_x, max_y, min_x, max_y]]
|
146 |
-
else:
|
147 |
-
seg = []
|
148 |
-
return {
|
149 |
-
"id": annotation_id,
|
150 |
-
"image_id": image_id,
|
151 |
-
"bbox": bbox,
|
152 |
-
"area": area,
|
153 |
-
"iscrowd": 0,
|
154 |
-
"category_id": category_id,
|
155 |
-
"segmentation": seg,
|
156 |
-
}
|
157 |
-
|
158 |
-
image_dir = os.path.join(data_dir, "images")
|
159 |
-
label_dir = os.path.join(data_dir, "labels")
|
160 |
-
image_paths = sorted(glob(f"{image_dir}/*.jpg"))
|
161 |
-
label_paths = sorted(glob(f"{label_dir}/*.txt"))
|
162 |
-
if self.config.name == "COCO":
|
163 |
-
for idx, (image_path, label_path) in enumerate(
|
164 |
-
zip(image_paths, label_paths)
|
165 |
-
):
|
166 |
-
image_id = idx
|
167 |
-
annotations = []
|
168 |
-
image = Image.open(image_path) # Possibly conver to RGB?
|
169 |
-
w, h = image.size
|
170 |
-
with open(label_path, "r") as f:
|
171 |
-
lines = f.readlines()
|
172 |
-
for line in lines:
|
173 |
-
line = line.strip().split()
|
174 |
-
category_id = line[0]
|
175 |
-
x_center = float(line[1])
|
176 |
-
y_center = float(line[2])
|
177 |
-
width = float(line[3])
|
178 |
-
height = float(line[4])
|
179 |
-
|
180 |
-
float_x_center = w * x_center
|
181 |
-
float_y_center = h * y_center
|
182 |
-
float_width = w * width
|
183 |
-
float_height = h * height
|
184 |
-
|
185 |
-
min_x = int(float_x_center - float_width / 2)
|
186 |
-
min_y = int(float_y_center - float_height / 2)
|
187 |
-
width = int(float_width)
|
188 |
-
height = int(float_height)
|
189 |
-
|
190 |
-
annotation = create_annotation_from_yolo_format(
|
191 |
-
min_x,
|
192 |
-
min_y,
|
193 |
-
width,
|
194 |
-
height,
|
195 |
-
image_id,
|
196 |
-
category_id,
|
197 |
-
image_id,
|
198 |
-
)
|
199 |
-
annotations.append(annotation)
|
200 |
-
|
201 |
-
example = {
|
202 |
-
"image_id": image_id,
|
203 |
-
"image": image,
|
204 |
-
"width": w,
|
205 |
-
"height": h,
|
206 |
-
"objects": annotations,
|
207 |
-
}
|
208 |
-
yield idx, example
|
209 |
-
if self.config.name == "YOLO":
|
210 |
-
for idx, (image_path, label_path) in enumerate(
|
211 |
-
zip(image_paths, label_paths)
|
212 |
-
):
|
213 |
-
im = Image.open(image_path)
|
214 |
-
width, height = im.size
|
215 |
-
image_id = idx
|
216 |
-
annotations = []
|
217 |
-
with open(label_path, "r") as f:
|
218 |
-
lines = f.readlines()
|
219 |
-
objects = []
|
220 |
-
for line in lines:
|
221 |
-
line = line.strip().split()
|
222 |
-
bbox_class = int(line[0])
|
223 |
-
bbox_xcenter = int(float(line[1]) * width)
|
224 |
-
bbox_ycenter = int(float(line[2]) * height)
|
225 |
-
bbox_width = int(float(line[3]) * width)
|
226 |
-
bbox_height = int(float(line[4]) * height)
|
227 |
-
objects.append(
|
228 |
-
{
|
229 |
-
"label": bbox_class,
|
230 |
-
"bbox": [
|
231 |
-
bbox_xcenter,
|
232 |
-
bbox_ycenter,
|
233 |
-
bbox_width,
|
234 |
-
bbox_height,
|
235 |
-
],
|
236 |
-
}
|
237 |
-
)
|
238 |
-
|
239 |
-
yield idx, {
|
240 |
-
"image": image_path,
|
241 |
-
"objects": objects,
|
242 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|