nehulagrawal commited on
Commit
b7c1a08
1 Parent(s): 0d40dbd

Upload table-detection-yolo.py

Browse files
Files changed (1) hide show
  1. table-detection-yolo.py +124 -0
table-detection-yolo.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+
9
+ _CATEGORIES = ['bordered', 'borderless']
10
+ _ANNOTATION_FILENAME = "_annotations.coco.json"
11
+
12
+
13
+ class TABLEEXTRACTIONConfig(datasets.BuilderConfig):
14
+ """Builder Config for table-extraction"""
15
+
16
+ def __init__(self, data_urls, **kwargs):
17
+ """
18
+ BuilderConfig for table-extraction.
19
+ Args:
20
+ data_urls: `dict`, name to url to download the zip file from.
21
+ **kwargs: keyword arguments forwarded to super.
22
+ """
23
+ super(TABLEEXTRACTIONConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
24
+ self.data_urls = data_urls
25
+
26
+
27
+ class TABLEEXTRACTION(datasets.GeneratorBasedBuilder):
28
+ """table-extraction object detection dataset"""
29
+
30
+ VERSION = datasets.Version("1.0.0")
31
+ BUILDER_CONFIGS = [
32
+ TABLEEXTRACTIONConfig(
33
+ name="full",
34
+ description="Full version of table-extraction dataset.",
35
+ data_urls={
36
+ "train": "https://huggingface.co/datasets/foduucom/table-detection-yolo/resolve/main/data/train.zip",
37
+ "validation": "https://huggingface.co/datasets/foduucom/table-detection-yolo/resolve/main/data/valid.zip",
38
+ "test": "https://huggingface.co/datasets/foduucom/table-detection-yolo/resolve/main/data/test.zip",
39
+ },
40
+ )
41
+ ]
42
+
43
+ def _info(self):
44
+ features = datasets.Features(
45
+ {
46
+ "image_id": datasets.Value("int64"),
47
+ "image": datasets.Image(),
48
+ "width": datasets.Value("int32"),
49
+ "height": datasets.Value("int32"),
50
+ "objects": datasets.Sequence(
51
+ {
52
+ "id": datasets.Value("int64"),
53
+ "area": datasets.Value("int64"),
54
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
55
+ "category": datasets.ClassLabel(names=_CATEGORIES),
56
+ }
57
+ ),
58
+ }
59
+ )
60
+ return datasets.DatasetInfo(
61
+ features=features
62
+ )
63
+
64
+ def _split_generators(self, dl_manager):
65
+ data_files = dl_manager.download_and_extract(self.config.data_urls)
66
+ return [
67
+ datasets.SplitGenerator(
68
+ name=datasets.Split.TRAIN,
69
+ gen_kwargs={
70
+ "folder_dir": data_files["train"],
71
+ },
72
+ ),
73
+ datasets.SplitGenerator(
74
+ name=datasets.Split.VALIDATION,
75
+ gen_kwargs={
76
+ "folder_dir": data_files["validation"],
77
+ },
78
+ ),
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.TEST,
81
+ gen_kwargs={
82
+ "folder_dir": data_files["test"],
83
+ },
84
+ ),
85
+ ]
86
+
87
+ def _generate_examples(self, folder_dir):
88
+ def process_annot(annot, category_id_to_category):
89
+ return {
90
+ "id": annot["id"],
91
+ "area": annot["area"],
92
+ "bbox": annot["bbox"],
93
+ "category": category_id_to_category[annot["category_id"]],
94
+ }
95
+
96
+ image_id_to_image = {}
97
+ idx = 0
98
+
99
+ annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
100
+ with open(annotation_filepath, "r") as f:
101
+ annotations = json.load(f)
102
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
103
+ image_id_to_annotations = collections.defaultdict(list)
104
+ for annot in annotations["annotations"]:
105
+ image_id_to_annotations[annot["image_id"]].append(annot)
106
+ filename_to_image = {image["file_name"]: image for image in annotations["images"]}
107
+
108
+ for filename in os.listdir(folder_dir):
109
+ filepath = os.path.join(folder_dir, filename)
110
+ if filename in filename_to_image:
111
+ image = filename_to_image[filename]
112
+ objects = [
113
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
114
+ ]
115
+ with open(filepath, "rb") as f:
116
+ image_bytes = f.read()
117
+ yield idx, {
118
+ "image_id": image["id"],
119
+ "image": {"path": filepath, "bytes": image_bytes},
120
+ "width": image["width"],
121
+ "height": image["height"],
122
+ "objects": objects,
123
+ }
124
+ idx += 1