Datasets:

Modalities:
Image
Text
Formats:
parquet
Libraries:
Datasets
Dask
License:
parquet-converter commited on
Commit
d8ba8eb
·
verified ·
1 Parent(s): 3cb5323

Update parquet files

Browse files
.gitignore DELETED
@@ -1,4 +0,0 @@
1
- env
2
- .idea
3
- *.json
4
- *.arrow
 
 
 
 
 
README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- task_categories:
3
- - object-detection
4
- license: cc-by-4.0
5
- pretty_name: LADaS
6
- size_categories:
7
- - 1K<n<10K
8
- ---
9
-
10
- # LADaS: Layout Analysis Dataset with Segmonto
11
-
 
 
 
 
 
 
 
 
 
 
 
 
build.py DELETED
@@ -1,50 +0,0 @@
1
- import os
2
- from datasets import load_dataset
3
- from datasets import config
4
- from datasets.utils.py_utils import convert_file_size_to_int
5
- from datasets.table import embed_table_storage
6
- from tqdm import tqdm
7
-
8
-
9
- def build_parquet(split):
10
- # Source: https://discuss.huggingface.co/t/how-to-save-audio-dataset-with-parquet-format-on-disk/66179
11
- dataset = load_dataset("./src/LADaS.py", split=split, trust_remote_code=True)
12
- max_shard_size = '500MB'
13
-
14
- dataset_nbytes = dataset._estimate_nbytes()
15
- max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE)
16
- num_shards = int(dataset_nbytes / max_shard_size) + 1
17
- num_shards = max(num_shards, 1)
18
- shards = (dataset.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards))
19
-
20
- def shards_with_embedded_external_files(shards):
21
- for shard in shards:
22
- format = shard.format
23
- shard = shard.with_format("arrow")
24
- shard = shard.map(
25
- embed_table_storage,
26
- batched=True,
27
- batch_size=1000,
28
- keep_in_memory=True,
29
- )
30
- shard = shard.with_format(**format)
31
- yield shard
32
-
33
- shards = shards_with_embedded_external_files(shards)
34
-
35
- os.makedirs("data", exist_ok=True)
36
-
37
- for index, shard in tqdm(
38
- enumerate(shards),
39
- desc="Save the dataset shards",
40
- total=num_shards,
41
- ):
42
- shard_path = f"data/{split}-{index:05d}-of-{num_shards:05d}.parquet"
43
- shard.to_parquet(shard_path)
44
-
45
-
46
- if __name__ == "__main__":
47
- build_parquet("train")
48
- build_parquet("validation")
49
- build_parquet("test")
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/test-00000-of-00001.parquet → default/test/0000.parquet RENAMED
File without changes
data/train-00000-of-00004.parquet → default/train/0000.parquet RENAMED
File without changes
data/train-00001-of-00004.parquet → default/train/0001.parquet RENAMED
File without changes
data/train-00002-of-00004.parquet → default/train/0002.parquet RENAMED
File without changes
data/train-00003-of-00004.parquet → default/train/0003.parquet RENAMED
File without changes
data/validation-00000-of-00001.parquet → default/validation/0000.parquet RENAMED
File without changes
src/LADaS.py DELETED
@@ -1,125 +0,0 @@
1
- import glob
2
- import os
3
- from typing import List, Any
4
-
5
- import yaml
6
- import datasets
7
- from PIL import Image
8
-
9
-
10
- _VERSION = "2024-07-17"
11
- _URL = f"https://github.com/DEFI-COLaF/LADaS/archive/refs/tags/{_VERSION}.tar.gz"
12
- _HOMEPAGE = "https://github.com/DEFI-COLaF/LADaS"
13
- _LICENSE = "CC BY 4.0"
14
- _CITATION = """\
15
- @misc{Clerice_Layout_Analysis_Dataset,
16
- author = {Clérice, Thibault and Janès, Juliette and Scheithauer, Hugo and Bénière, Sarah and Romary, Laurent and Sagot, Benoit and Bougrelle, Roxane},
17
- title = {{Layout Analysis Dataset with SegmOnto (LADaS)}},
18
- url = {https://github.com/DEFI-COLaF/LADaS}
19
- }
20
- """
21
-
22
- _CATEGORIES: list[str] = ["AdvertisementZone", "DigitizationArtefactZone", "DropCapitalZone", "FigureZone",
23
- "FigureZone-FigDesc", "FigureZone-Head", "GraphicZone", "GraphicZone-Decoration",
24
- "GraphicZone-FigDesc", "GraphicZone-Head", "GraphicZone-Maths", "GraphicZone-Part",
25
- "GraphicZone-TextualContent", "MainZone-Date", "MainZone-Entry", "MainZone-Entry-Continued",
26
- "MainZone-Form", "MainZone-Head", "MainZone-Lg", "MainZone-Lg-Continued", "MainZone-List",
27
- "MainZone-List-Continued", "MainZone-Other", "MainZone-P", "MainZone-P-Continued",
28
- "MainZone-Signature", "MainZone-Sp", "MainZone-Sp-Continued",
29
- "MarginTextZone-ManuscriptAddendum", "MarginTextZone-Notes", "MarginTextZone-Notes-Continued",
30
- "NumberingZone", "TitlePageZone", "TitlePageZone-Index", "QuireMarksZone", "RunningTitleZone",
31
- "StampZone", "StampZone-Sticker", "TableZone", "TableZone-Continued", "TableZone-Head"]
32
-
33
-
34
- class LadasConfig(datasets.BuilderConfig):
35
- """Builder Config for LADaS"""
36
- def __init__(self, *args, **kwargs):
37
- super().__init__(*args, **kwargs)
38
-
39
-
40
- class LadasDataset(datasets.GeneratorBasedBuilder):
41
- VERSION = datasets.Version(_VERSION.replace("-", "."))
42
- BUILDER_CONFIGS = [
43
- LadasConfig(
44
- name="full",
45
- description="Full version of the dataset"
46
- )
47
- ]
48
-
49
- def _info(self) -> datasets.DatasetInfo:
50
- features = datasets.Features({
51
- "image_path": datasets.Value("string"),
52
- "image": datasets.Image(),
53
- "width": datasets.Value("int32"),
54
- "height": datasets.Value("int32"),
55
- "objects": datasets.Sequence(
56
- {
57
- "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
58
- "category": datasets.ClassLabel(names=_CATEGORIES),
59
- }
60
- )
61
- })
62
- return datasets.DatasetInfo(
63
- features=features,
64
- homepage=_HOMEPAGE,
65
- citation=_CITATION,
66
- license=_LICENSE
67
- )
68
-
69
- def _split_generators(self, dl_manager):
70
- urls_to_download = _URL
71
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
72
- return [
73
- datasets.SplitGenerator(
74
- name=datasets.Split.TRAIN,
75
- gen_kwargs={
76
- "local_dir": downloaded_files,
77
- "split": "train"
78
- },
79
- ),
80
- datasets.SplitGenerator(
81
- name=datasets.Split.VALIDATION,
82
- gen_kwargs={
83
- "local_dir": downloaded_files,
84
- "split": "valid"
85
- },
86
- ),
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TEST,
89
- gen_kwargs={
90
- "local_dir": downloaded_files,
91
- "split": "test"
92
- },
93
- ),
94
- ]
95
-
96
- def _generate_examples(self, local_dir: str, split: str):
97
-
98
- idx = 0
99
-
100
- for file in glob.glob(os.path.join(local_dir, "*", "data", "*", split, "labels", "*.txt")):
101
- objects = []
102
- with open(file) as f:
103
- for line in f:
104
- cls, *bbox = line.strip().split()
105
- objects.append({"category": _CATEGORIES[int(cls)], "bbox": list(map(float, bbox))})
106
-
107
- image_path = os.path.normpath(file).split(os.sep)
108
- image_path = os.path.join(*image_path[:-2], "images", image_path[-1].replace(".txt", ".jpg"))
109
- if file.startswith("/") and not image_path.startswith("/"):
110
- image_path = "/" + image_path
111
-
112
- with open(image_path, "rb") as f:
113
- image_bytes = f.read()
114
-
115
- with Image.open(image_path) as im:
116
- width, height = im.size
117
-
118
- yield idx, {
119
- "image_id": f"{image_path[-4]}/{image_path[-1]}",
120
- "image": {"path": image_path, "bytes": image_bytes},
121
- "width": width,
122
- "height": height,
123
- "objects": objects,
124
- }
125
- idx += 1