goatrider commited on
Commit
b20f998
1 Parent(s): 1bda17a

Upload 3 files

Browse files
finetune_layoutlmv3.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
invoice_dataset_loading.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
layoutlmv3.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import ast
4
+ from pathlib import Path
5
+ import datasets
6
+ from PIL import Image
7
+ import pandas as pd
8
+
9
+ logger = datasets.logging.get_logger(__name__)
10
+ _CITATION = """\
11
+ @article{,
12
+ title={},
13
+ author={},
14
+ journal={},
15
+ year={},
16
+ volume={}
17
+ }
18
+ """
19
+ _DESCRIPTION = """\
20
+ This is a sample dataset for training layoutlmv3 model on custom annotated data.
21
+ """
22
+
23
+ def load_image(image_path):
24
+ image = Image.open(image_path).convert("RGB")
25
+ w, h = image.size
26
+ return image, (w,h)
27
+
28
+ def normalize_bbox(bbox, size):
29
+ return [
30
+ int(1000 * bbox[0] / size[0]),
31
+ int(1000 * bbox[1] / size[1]),
32
+ int(1000 * bbox[2] / size[0]),
33
+ int(1000 * bbox[3] / size[1]),
34
+ ]
35
+
36
+
37
+ _URLS = []
38
+
39
+ '''Edit your working directory folder path here if required.
40
+ If this file is in the same folder as the "layoutlmv3" folder keep it as it is.
41
+ '''
42
+ data_path = r'./'
43
+
44
+ class DatasetConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for InvoiceExtraction Dataset"""
46
+ def __init__(self, **kwargs):
47
+ """BuilderConfig for InvoiceExtraction Dataset.
48
+ Args:
49
+ **kwargs: keyword arguments forwarded to super.
50
+ """
51
+ super(DatasetConfig, self).__init__(**kwargs)
52
+
53
+
54
+ class InvoiceExtraction(datasets.GeneratorBasedBuilder):
55
+ BUILDER_CONFIGS = [
56
+ DatasetConfig(name="InvoiceExtraction", version=datasets.Version("1.0.0"), description="InvoiceExtraction dataset"),
57
+ ]
58
+
59
+ def _info(self):
60
+ return datasets.DatasetInfo(
61
+ description=_DESCRIPTION,
62
+ features=datasets.Features(
63
+ {
64
+ "id": datasets.Value("string"),
65
+ "tokens": datasets.Sequence(datasets.Value("string")),
66
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
67
+ "ner_tags": datasets.Sequence(
68
+ datasets.features.ClassLabel(
69
+ names = ['num_facture','date_facture','fournisseur','client','mat_client','mat_fournisseur','tva','pourcentage_tva','remise','pourcentage_remise','timbre','fodec','ttc','devise','net_ht'] #Enter the list of labels that you have here.
70
+ )
71
+ ),
72
+ "image_path": datasets.Value("string"),
73
+ "image": datasets.features.Image()
74
+ }
75
+ ),
76
+ supervised_keys=None,
77
+ citation=_CITATION,
78
+ homepage="",
79
+ )
80
+
81
+
82
+
83
+
84
+ def _split_generators(self, dl_manager):
85
+ """Returns SplitGenerators."""
86
+ """Uses local files located with data_dir"""
87
+ dest = os.path.join(data_path, 'layoutlmv3')
88
+
89
+ return [
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(dest, "train.txt"), "dest": dest}
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(dest, "test.txt"), "dest": dest}
95
+ ),
96
+ ]
97
+
98
+ def _generate_examples(self, filepath, dest):
99
+
100
+ df = pd.read_csv(os.path.join(dest, 'class_list.txt'), delimiter=',', header=None)
101
+ id2labels = dict(zip(df[0].tolist(), df[1].tolist()))
102
+
103
+
104
+ logger.info("⏳ Generating examples from = %s", filepath)
105
+
106
+ item_list = []
107
+ with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
108
+ for line in f:
109
+ item_list.append(line.rstrip('\n\r'))
110
+ print(item_list)
111
+ for guid, fname in enumerate(item_list):
112
+ print(fname)
113
+ data = ast.literal_eval(fname)
114
+ image_path = os.path.join(dest, data['file_name'])
115
+ image, size = load_image(image_path)
116
+ boxes = data['bboxes']
117
+
118
+ text = data['tokens']
119
+ label = data['ner_tags']
120
+
121
+ #print(boxes)
122
+ #for i in boxes:
123
+ # print(i)
124
+ boxes = [normalize_bbox(box, size) for box in boxes]
125
+ flag=0
126
+ #print(image_path)
127
+ for i in boxes:
128
+ #print(i)
129
+ for j in i:
130
+ if j>1000:
131
+ flag+=1
132
+ #print(j)
133
+ pass
134
+ if flag>0: print(image_path)
135
+
136
+ yield guid, {"id": str(guid), "tokens": text, "bboxes": boxes, "ner_tags": label, "image_path": image_path, "image": image}