Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
machine-generated
Annotations Creators:
machine-generated
Source Datasets:
original
ArXiv:
Tags:
document-ai
License:
maveriq commited on
Commit
708cafd
1 Parent(s): f77fb83

first commit

Browse files

json-split need to be checked

Files changed (6) hide show
  1. .gitattributes +1 -0
  2. README.md +28 -1
  3. dev.jsonl +0 -0
  4. docbank.py +167 -0
  5. test.jsonl +0 -0
  6. train.jsonl +3 -0
.gitattributes CHANGED
@@ -52,3 +52,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,30 @@
1
  ---
2
- license: apache-2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
1
  ---
2
+ dataset_info:
3
+ features:
4
+ - name: image
5
+ dtype: image
6
+ - name: token
7
+ dtype: string
8
+ - name: bounding_box
9
+ sequence:
10
+ sequence: uint16
11
+ - name: color
12
+ sequence:
13
+ sequence: uint8
14
+ - name: font
15
+ dtype: string
16
+ - name: label
17
+ dtype: string
18
+ splits:
19
+ - name: train
20
+ num_bytes: 17139
21
+ num_examples: 1
22
+ - name: validation
23
+ num_bytes: 17139
24
+ num_examples: 1
25
+ - name: test
26
+ num_bytes: 17139
27
+ num_examples: 1
28
+ download_size: 0
29
+ dataset_size: 51417
30
  ---
dev.jsonl ADDED
The diff for this file is too large to render. See raw diff
docbank.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """DocBank document understanding dataset."""
16
+
17
+ import os
18
+ import datasets
19
+
20
+ # Find for instance the citation on arxiv or on the dataset repo/website
21
+ _CITATION = """\
22
+ @misc{li2020docbank,
23
+ title={DocBank: A Benchmark Dataset for Document Layout Analysis},
24
+ author={Minghao Li and Yiheng Xu and Lei Cui and Shaohan Huang and Furu Wei and Zhoujun Li and Ming Zhou},
25
+ year={2020},
26
+ eprint={2006.01038},
27
+ archivePrefix={arXiv},
28
+ primaryClass={cs.CL}
29
+ }
30
+ """
31
+
32
+ # You can copy an official description
33
+ _DESCRIPTION = """\
34
+ DocBank is a new large-scale dataset that is constructed using a weak supervision approach.
35
+ It enables models to integrate both the textual and layout information for downstream tasks.
36
+ The current DocBank dataset totally includes 500K document pages, where 400K for training, 50K for validation and 50K for testing.
37
+ """
38
+
39
+ _HOMEPAGE = "https://doc-analysis.github.io/docbank-page/index.html"
40
+
41
+ _LICENSE = "Apache-2.0 license"
42
+
43
+
44
+ class DocBank(datasets.GeneratorBasedBuilder):
45
+ """DocBank is a dataset for Visual Document Understanding.
46
+ It enable models to integrate both textual and layout informtion for downstream tasks."""
47
+
48
+ VERSION = datasets.Version("1.1.0")
49
+
50
+ @property
51
+ def manual_download_instructions(self):
52
+ return """\
53
+ Please download the DocBank dataset from https://doc-analysis.github.io/docbank-page/index.html. Uncompress the dataset and use that location in
54
+ --data_dir argument. """
55
+
56
+ def _info(self):
57
+
58
+ features = datasets.Features(
59
+ {
60
+ "image": datasets.Image(),
61
+ "token": datasets.Value("string"),
62
+ "bounding_box": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
63
+ "color": datasets.Sequence(datasets.Sequence(datasets.Value("uint8"))),
64
+ "font": datasets.Value("string"),
65
+ "label": datasets.Value("string"),
66
+ }
67
+ )
68
+
69
+ return datasets.DatasetInfo(
70
+ # This is the description that will appear on the datasets page.
71
+ description=_DESCRIPTION,
72
+ # This defines the different columns of the dataset and their types
73
+ features=features, # Here we define them above because they are different between the two configurations
74
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
75
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
76
+ # supervised_keys=("sentence", "label"),
77
+ # Homepage of the dataset for documentation
78
+ homepage=_HOMEPAGE,
79
+ # License for the dataset if available
80
+ license=_LICENSE,
81
+ # Citation for the dataset
82
+ citation=_CITATION,
83
+ )
84
+
85
+ def _split_generators(self, dl_manager):
86
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
87
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
88
+
89
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
90
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
91
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
92
+ # urls = _URLS[self.config.name]
93
+ # data_dir = dl_manager.download_and_extract(urls)
94
+
95
+ self.data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
96
+
97
+ return [
98
+ datasets.SplitGenerator(
99
+ name=datasets.Split.TRAIN,
100
+ # These kwargs will be passed to _generate_examples
101
+ gen_kwargs={
102
+ "filepath": os.path.join("train.jsonl"),
103
+ "split": "train",
104
+ },
105
+ ),
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.VALIDATION,
108
+ # These kwargs will be passed to _generate_examples
109
+ gen_kwargs={
110
+ "filepath": os.path.join("dev.jsonl"),
111
+ "split": "dev",
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": os.path.join("test.jsonl"),
119
+ "split": "test"
120
+ },
121
+ ),
122
+ ]
123
+
124
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
125
+ def _generate_examples(self, filepath, split):
126
+
127
+ print(os.getcwd())
128
+ print(os.path.dirname(os.path.abspath(__file__)))
129
+
130
+ with open(filepath,'rt') as fp:
131
+ index,basename = eval(fp.readline().strip())
132
+
133
+ txt_file = self.data_dir+'/DocBank_500K_txt/'+basename+'.txt'
134
+ img_file = self.data_dir+'/DocBank_500K_ori_img/'+basename+'_ori.jpg'
135
+
136
+ words = []
137
+ bboxes = []
138
+ rgbs = []
139
+ fontnames = []
140
+ structures = []
141
+
142
+ with open(txt_file, 'r', encoding='utf8') as fp:
143
+ for line in fp.readlines():
144
+ tts = line.split('\t')
145
+
146
+ assert len(tts) == 10, f'Incomplete line in file {txt_file}'
147
+
148
+ word = tts[0]
149
+ bbox = list(map(int, tts[1:5]))
150
+ rgb = list(map(int, tts[5:8]))
151
+ fontname = tts[8]
152
+ structure = tts[9].strip()
153
+
154
+ words.append(word)
155
+ bboxes.append(bbox)
156
+ rgbs.append(rgb)
157
+ fontnames.append(fontname)
158
+ structures.append(structure)
159
+
160
+ yield index, {
161
+ "image": img_file,
162
+ "token": words,
163
+ "bounding_box": bboxes,
164
+ "color": rgbs,
165
+ "font": fontnames,
166
+ "label": structures,
167
+ }
test.jsonl ADDED
The diff for this file is too large to render. See raw diff
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a70200ff136876c3dea1d5b4430eea05cf40f4af0d08d160a642d2e893c5b7c7
3
+ size 20655679