Upload cvdataset-layoutlmv3.py

#1
by nvm472001 - opened
Files changed (1) hide show
  1. cvdataset-layoutlmv3.py +128 -0
cvdataset-layoutlmv3.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import datasets
4
+ from PIL import Image
5
+ import pandas as pd
6
+ import json
7
+
8
+ logger = datasets.logging.get_logger(__name__)
9
+
10
+ def load_image(image_path):
11
+ image = Image.open(image_path).convert("RGB")
12
+
13
+ w, h = image.size
14
+
15
+ return image, (w, h)
16
+
17
+ def normalize_bbox(bbox, size):
18
+ return [
19
+ int(1000 * bbox[0] / size[0]),
20
+ int(1000 * bbox[1] / size[1]),
21
+ int(1000 * bbox[2] / size[0]),
22
+ int(100 * bbox[3] / size[1]),
23
+ ]
24
+
25
+ def _get_drive_url(url):
26
+ base_url = 'https://drive.google.com/uc?id='
27
+ split_url = url.split("/")
28
+
29
+ return base_url + split_url[5]
30
+
31
+ _URLS = [
32
+ _get_drive_url("https://drive.google.com/file/d/1KdDBmGP96lFc7jv2Bf4eqrO121ST-TCh/"),
33
+ ]
34
+
35
+ _CITATION = """\
36
+ @article{liharding-nguyen,
37
+ title={CVDS: A Dataset for CV Form Understanding},
38
+ author={MISA - employees},
39
+ year={2022},
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ Dataset for key information extraction with cv form understanding
45
+ """
46
+
47
+ class DatasetConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for CV Dataset"""
49
+ def __init__(self, **kwargs):
50
+ """BuilderConfig for CV Dataset.
51
+ Args:
52
+ **kwargs: keyword arguments forwarded to super.
53
+ """
54
+ super(DatasetConfig, self).__init__(**kwargs)
55
+
56
+ class CVDS(datasets.GeneratorBasedBuilder):
57
+ BUILDER_CONFIGS = [
58
+ DatasetConfig(name="CVDS", version=datasets.Version("1.0.0"), description="CV Dataset"),
59
+ ]
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "id": datasets.Value("string"),
67
+ "words": datasets.Sequence(datasets.Value("String")),
68
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
69
+ "ner_tags": datasets.Sequence(
70
+ datasets.features.ClassLabel(
71
+ names=['person_name', 'dob_key', 'dob_value', 'gender_key', 'gender_value', 'phonenumber_key', 'phonenumber_value', 'email_key', 'email_value', 'address_key', 'address_value', 'socical_address_value', 'education', 'education_name', 'education_time', 'experience', 'experience_name', 'experience_time', 'information', 'undefined']
72
+ )
73
+ ),
74
+ "image_path": datasets.Value("string"),
75
+ }
76
+ ),
77
+ supervised_keys=None,
78
+ citation=_CITATION,
79
+ homepage=""
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ download_file = dl_manager.download_and_extract(_URLS)
84
+ dest = Path(download_file[0])/"data1"
85
+
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TRAIN, gen_kwargs={ "filepath": dest/"train.txt", "dest": dest }
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST, gen_kwargs={ "filepath": dest/"test.txt", "dest": dest}
92
+ )
93
+ ]
94
+
95
+ def _generate_examples(self, file_path, dest):
96
+ df = pd.read_csv(dest/"class_list.txt", delimiter="\s", header=None)
97
+ id2label = dict(zip(df[0].tolist(), df[1].tolist()))
98
+
99
+ logger.info("⏳ Generating examples from = %s", file_path)
100
+
101
+ item_list = []
102
+ with open(file_path, "r", encoding="utf8") as f:
103
+ for line in f:
104
+ item_list.append(line.rstrip('\n\r'))
105
+
106
+ for guid, fname in enumerate(item_list):
107
+ data = json.loads(fname)
108
+
109
+ image_path = dest/data['file_name']
110
+ image, size = load_image(image_path)
111
+
112
+ bboxes = [[i["box"][6], i["box"][7], i["box"][2]. i["box"][3]] for i in data["annotations"]]
113
+ word = [i['text'] for i in data["annotations"]]
114
+ label = [id2label[i["label"]] for i in data["annotations"]]
115
+
116
+ bboxes = [normalize_bbox(box, size) for box in bboxes]
117
+
118
+ flag=0
119
+ for i in bboxes:
120
+ for j in i:
121
+ if j > 1000:
122
+ flag+=1
123
+ pass
124
+
125
+ if flag > 0:
126
+ print(image_path)
127
+
128
+ yield guid, {"id": str(guid), "words": word, "bboxes": bboxes, "ner_tags": label, "image_path": image_path}