README.md CHANGED
@@ -50,6 +50,18 @@ dataset_info:
50
  - name: boxes
51
  sequence:
52
  sequence: int32
 
 
 
 
 
 
 
 
 
 
 
 
53
  ---
54
 
55
  # Dataset Card for RVL-CDIP
 
50
  - name: boxes
51
  sequence:
52
  sequence: int32
53
+ splits:
54
+ - name: train
55
+ num_bytes: 38835143890
56
+ num_examples: 320000
57
+ - name: test
58
+ num_bytes: 4865648030
59
+ num_examples: 40000
60
+ - name: validation
61
+ num_bytes: 4871031282
62
+ num_examples: 40000
63
+ download_size: 38779484559
64
+ dataset_size: 48571823202
65
  ---
66
 
67
  # Dataset Card for RVL-CDIP
dataset_infos.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "The RVL-CDIP (Ryerson Vision Lab Complex Document Information Processing) dataset consists of 400,000 grayscale images in 16 classes, with 25,000 images per class. There are 320,000 training images, 40,000 validation images, and 40,000 test images.\n",
4
+ "citation": "@inproceedings{harley2015icdar,\n title = {Evaluation of Deep Convolutional Nets for Document Image Classification and Retrieval},\n author = {Adam W Harley and Alex Ufkes and Konstantinos G Derpanis},\n booktitle = {International Conference on Document Analysis and Recognition ({ICDAR})}},\n year = {2015}\n}\n",
5
+ "homepage": "https://www.cs.cmu.edu/~aharley/rvl-cdip/",
6
+ "license": "https://www.industrydocuments.ucsf.edu/help/copyright/",
7
+ "features": {
8
+ "image": {
9
+ "decode": true,
10
+ "id": null,
11
+ "_type": "Image"
12
+ },
13
+ "label": {
14
+ "num_classes": 16,
15
+ "names": [
16
+ "letter",
17
+ "form",
18
+ "email",
19
+ "handwritten",
20
+ "advertisement",
21
+ "scientific report",
22
+ "scientific publication",
23
+ "specification",
24
+ "file folder",
25
+ "news article",
26
+ "budget",
27
+ "invoice",
28
+ "presentation",
29
+ "questionnaire",
30
+ "resume",
31
+ "memo"
32
+ ],
33
+ "id": null,
34
+ "_type": "ClassLabel"
35
+ },
36
+ "id": {
37
+ "_type": "Value",
38
+ "dtype": "string"
39
+ },
40
+ "words": {
41
+ "_type": "Sequence",
42
+ "feature": {
43
+ "dtype": "string",
44
+ "_type": "Value"
45
+ }
46
+ },
47
+ "boxes": {
48
+ "_type": "Sequence",
49
+ "feature": {
50
+ "_type": "Sequence",
51
+ "feature": {
52
+ "dtype": "int32",
53
+ "_type": "Value"
54
+ }
55
+ }
56
+ }
57
+ },
58
+ "post_processed": null,
59
+ "supervised_keys": {
60
+ "input": "image",
61
+ "output": "label"
62
+ },
63
+ "task_templates": [
64
+ {
65
+ "task": "image-classification",
66
+ "image_column": "image",
67
+ "label_column": "label"
68
+ }
69
+ ],
70
+ "builder_name": "rvl_cdip_easyOCR",
71
+ "config_name": "default",
72
+ "version": {
73
+ "version_str": "1.0.0",
74
+ "description": null,
75
+ "major": 1,
76
+ "minor": 0,
77
+ "patch": 0
78
+ },
79
+ "splits": {
80
+ "train": {
81
+ "name": "train",
82
+ "num_bytes": 38816373360,
83
+ "num_examples": 320000,
84
+ "dataset_name": "rvl_cdip_easyOCR"
85
+ },
86
+ "test": {
87
+ "name": "test",
88
+ "num_bytes": 4863300853,
89
+ "num_examples": 40000,
90
+ "dataset_name": "rvl_cdip_easyOCR"
91
+ },
92
+ "validation": {
93
+ "name": "validation",
94
+ "num_bytes": 4868685208,
95
+ "num_examples": 40000,
96
+ "dataset_name": "rvl_cdip_easyOCR"
97
+ }
98
+ }
99
+ }
100
+ }
rvl_cdip_easyocr.py → rvl_cdip_easyOCR.py RENAMED
@@ -19,7 +19,7 @@ import os
19
  import numpy as np
20
  from tqdm import tqdm
21
  import datasets
22
- from pathlib import Path
23
 
24
  _CITATION = """\
25
  @inproceedings{harley2015icdar,
@@ -52,12 +52,6 @@ _METADATA_URLS = {
52
  "val": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/val.txt",
53
  }
54
 
55
- _OCR_URLS = {
56
- "train": "https://huggingface.co/datasets/jordyvl/rvl_cdip_easyocr/resolve/main/data/Easy_Train_Data.npy",
57
- "test": "https://huggingface.co/datasets/jordyvl/rvl_cdip_easyocr/resolve/main/data/Easy_Test_Data.npy",
58
- "val": "https://huggingface.co/datasets/jordyvl/rvl_cdip_easyocr/resolve/main/data/Easy_Valid_Data.npy",
59
- }
60
-
61
  _CLASSES = [
62
  "letter",
63
  "form",
@@ -77,27 +71,27 @@ _CLASSES = [
77
  "memo",
78
  ]
79
 
80
- _IMAGES_DIR = "images/"
81
 
82
 
83
  # class OCRConfig(datasets.BuilderConfig):
84
  # """BuilderConfig for RedCaps."""
85
 
86
- # def __init__(self, name, OCR_dir, **kwargs):
87
  # """BuilderConfig for RedCaps.
88
  # Args:
89
  # **kwargs: keyword arguments forwarded to super.
90
  # """
91
  # assert "description" not in kwargs
92
- # super(OCRConfig, self).__init__(version=kwargs["version"], name=name, **kwargs)
93
- # self.OCR_dir = OCR_dir
 
94
 
95
 
96
  class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
97
  """Ryerson Vision Lab Complex Document Information Processing dataset."""
98
 
99
  VERSION = datasets.Version("1.0.0")
100
- # BUILDER_CONFIGS = [OCRConfig("default",version=VERSION)]
101
  DEFAULT_CONFIG_NAME = "default"
102
 
103
  def _info(self):
@@ -127,7 +121,6 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
127
  _URLS["rvl-cdip"]
128
  ) # only download images if need be
129
  labels_path = dl_manager.download(_METADATA_URLS)
130
- ocrs_filepath = dl_manager.download(_OCR_URLS)
131
 
132
  return [
133
  datasets.SplitGenerator(
@@ -135,7 +128,6 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
135
  gen_kwargs={
136
  "archive_iterator": dl_manager.iter_archive(archive_path),
137
  "labels_filepath": labels_path["train"],
138
- "ocrs_filepath": ocrs_filepath["train"],
139
  "split": "train",
140
  },
141
  ),
@@ -144,7 +136,6 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
144
  gen_kwargs={
145
  "archive_iterator": dl_manager.iter_archive(archive_path),
146
  "labels_filepath": labels_path["test"],
147
- "ocrs_filepath": ocrs_filepath["test"],
148
  "split": "test",
149
  },
150
  ),
@@ -153,7 +144,6 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
153
  gen_kwargs={
154
  "archive_iterator": dl_manager.iter_archive(archive_path),
155
  "labels_filepath": labels_path["val"],
156
- "ocrs_filepath": ocrs_filepath["val"],
157
  "split": "validation",
158
  },
159
  ),
@@ -170,19 +160,21 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
170
  return image_to_class_id
171
 
172
  @staticmethod
173
- def _get_image_to_OCR(ocrs_filepath, split):
174
  def parse_easyOCR_box(box):
175
  # {'x0': 39, 'y0': 39, 'x1': 498, 'y1': 82, 'width': 459, 'height': 43}
176
  return (box["x0"], box["y0"], box["x1"], box["y1"])
177
 
 
 
178
  image_to_OCR = {}
179
  data = np.load(
180
- ocrs_filepath,
181
  allow_pickle=True,
182
  )
183
  for ex in tqdm(data, desc="Loading OCR data"):
184
  w, h = ex["images"][0]["image_width"], ex["images"][0]["image_height"]
185
- filename = Path(ex["images"][0]["file_name"]).stem
186
  words = ex["word-level annotations"][0]["ocred_text"]
187
  box_info = ex["word-level annotations"][0]["ocred_boxes"]
188
  boxes = [parse_easyOCR_box(box) for box in box_info]
@@ -194,18 +186,15 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
194
  def _path_to_OCR(image_to_OCR, file_path):
195
  # obtain text and boxes given file_path
196
  words, boxes = None, None
197
- #imagesv/v/u/b/vub13c00/523466896+-6898.tif
198
- #523466896+-6898.jpg
199
- file_path = Path(file_path).stem
200
  if file_path in image_to_OCR:
201
  words, boxes = image_to_OCR[file_path]
202
  return words, boxes
203
 
204
- def _generate_examples(self, archive_iterator, labels_filepath, ocrs_filepath, split):
205
  with open(labels_filepath, encoding="utf-8") as f:
206
  data = f.read().splitlines()
207
 
208
- image_to_OCR = self._get_image_to_OCR(ocrs_filepath, split)
209
  image_to_class_id = self._get_image_to_class_map(data)
210
 
211
  for file_path, file_obj in archive_iterator:
@@ -214,12 +203,12 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
214
  class_id = image_to_class_id[file_path]
215
  label = _CLASSES[class_id]
216
  words, boxes = self._path_to_OCR(image_to_OCR, file_path)
217
- if words is not None: #skipping all items for which we do not have OCR
218
- a = dict(
219
- id=file_path,
220
- image={"path": file_path, "bytes": file_obj.read()},
221
- label=label,
222
- words=words,
223
- boxes=boxes,
224
- )
225
- yield file_path, a
 
19
  import numpy as np
20
  from tqdm import tqdm
21
  import datasets
22
+
23
 
24
  _CITATION = """\
25
  @inproceedings{harley2015icdar,
 
52
  "val": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/val.txt",
53
  }
54
 
 
 
 
 
 
 
55
  _CLASSES = [
56
  "letter",
57
  "form",
 
71
  "memo",
72
  ]
73
 
74
+ _IMAGES_DIR = "images/"
75
 
76
 
77
  # class OCRConfig(datasets.BuilderConfig):
78
  # """BuilderConfig for RedCaps."""
79
 
80
+ # def __init__(self, name, **kwargs):
81
  # """BuilderConfig for RedCaps.
82
  # Args:
83
  # **kwargs: keyword arguments forwarded to super.
84
  # """
85
  # assert "description" not in kwargs
86
+ # super(OCRConfig, self).__init__(
87
+ # version=kwargs['version'], name=name, **kwargs
88
+ # )
89
 
90
 
91
  class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
92
  """Ryerson Vision Lab Complex Document Information Processing dataset."""
93
 
94
  VERSION = datasets.Version("1.0.0")
 
95
  DEFAULT_CONFIG_NAME = "default"
96
 
97
  def _info(self):
 
121
  _URLS["rvl-cdip"]
122
  ) # only download images if need be
123
  labels_path = dl_manager.download(_METADATA_URLS)
 
124
 
125
  return [
126
  datasets.SplitGenerator(
 
128
  gen_kwargs={
129
  "archive_iterator": dl_manager.iter_archive(archive_path),
130
  "labels_filepath": labels_path["train"],
 
131
  "split": "train",
132
  },
133
  ),
 
136
  gen_kwargs={
137
  "archive_iterator": dl_manager.iter_archive(archive_path),
138
  "labels_filepath": labels_path["test"],
 
139
  "split": "test",
140
  },
141
  ),
 
144
  gen_kwargs={
145
  "archive_iterator": dl_manager.iter_archive(archive_path),
146
  "labels_filepath": labels_path["val"],
 
147
  "split": "validation",
148
  },
149
  ),
 
160
  return image_to_class_id
161
 
162
  @staticmethod
163
+ def _get_image_to_OCR(OCR_dir, split):
164
  def parse_easyOCR_box(box):
165
  # {'x0': 39, 'y0': 39, 'x1': 498, 'y1': 82, 'width': 459, 'height': 43}
166
  return (box["x0"], box["y0"], box["x1"], box["y1"])
167
 
168
+ if OCR_dir is None:
169
+ return {}
170
  image_to_OCR = {}
171
  data = np.load(
172
+ os.path.join(OCR_dir, f"Easy_{split[0].upper()+split[1:]}_Data.npy"),
173
  allow_pickle=True,
174
  )
175
  for ex in tqdm(data, desc="Loading OCR data"):
176
  w, h = ex["images"][0]["image_width"], ex["images"][0]["image_height"]
177
+ filename = ex["images"][0]["file_name"]
178
  words = ex["word-level annotations"][0]["ocred_text"]
179
  box_info = ex["word-level annotations"][0]["ocred_boxes"]
180
  boxes = [parse_easyOCR_box(box) for box in box_info]
 
186
  def _path_to_OCR(image_to_OCR, file_path):
187
  # obtain text and boxes given file_path
188
  words, boxes = None, None
 
 
 
189
  if file_path in image_to_OCR:
190
  words, boxes = image_to_OCR[file_path]
191
  return words, boxes
192
 
193
+ def _generate_examples(self, archive_iterator, labels_filepath, split):
194
  with open(labels_filepath, encoding="utf-8") as f:
195
  data = f.read().splitlines()
196
 
197
+ image_to_OCR = self._get_image_to_OCR(self.config.data_dir, split)
198
  image_to_class_id = self._get_image_to_class_map(data)
199
 
200
  for file_path, file_obj in archive_iterator:
 
203
  class_id = image_to_class_id[file_path]
204
  label = _CLASSES[class_id]
205
  words, boxes = self._path_to_OCR(image_to_OCR, file_path)
206
+ a = dict(
207
+ id=file_path,
208
+ image={"path": file_path, "bytes": file_obj.read()},
209
+ label=label,
210
+ words=words,
211
+ boxes=boxes,
212
+ )
213
+ yield file_path, a
214
+
test_loader.py CHANGED
@@ -1,18 +1,4 @@
1
- from datasets import (
2
- load_dataset_builder,
3
- get_dataset_config_names,
4
- get_dataset_infos,
5
- load_dataset,
6
- )
7
-
8
-
9
- data = load_dataset(
10
- "jordyvl/rvl-cdip_easyOCR",
11
- )
12
- from pdb import set_trace
13
-
14
- set_trace()
15
-
16
 
17
  # print(get_dataset_infos('jordyvl/rvl-cdip_easyOCR'))
18
  # print(get_dataset_config_names("jordyvl/rvl-cdip_easyOCR"))
@@ -21,16 +7,12 @@ set_trace()
21
 
22
  builder = load_dataset_builder("rvl_cdip")
23
 
24
- from pdb import set_trace
25
-
26
- set_trace()
27
  builder = load_dataset_builder("jordyvl/rvl-cdip_easyOCR")
28
  print(builder._info())
29
  print(builder.get_all_exported_dataset_infos())
30
  ds = builder.download_and_prepare()
31
- from pdb import set_trace
32
-
33
- set_trace()
34
  # data = load_dataset(
35
  # "jordyvl/rvl-cdip_easyOCR",
36
  # split="test",
@@ -42,17 +24,16 @@ set_trace()
42
  # #data_dir="/home/jordy/Downloads/OCRedText", # this is the path to the OCR data
43
  # )
44
 
45
- from pdb import set_trace
46
-
47
- set_trace()
48
 
49
 
50
  data = load_dataset(
51
  "./rvl_cdip_easyOCR.py",
52
  split="test",
53
- # cache_dir="/mnt/lerna/data/HFcache",
54
- data_files={ # this is the path to the images if it does not download it
55
- "binary": __file__ # "/mnt/lerna/data/HFcache/downloads/c8cc6f89129255a9adf3e97e319ebe2055cf97662135b3ad26c79e9432544db5",
56
  },
57
- data_dir="/home/jordy/Downloads/OCRedText", # this is the path to the OCR data
58
  )
 
 
1
+ from datasets import load_dataset_builder, get_dataset_config_names, get_dataset_infos, load_dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  # print(get_dataset_infos('jordyvl/rvl-cdip_easyOCR'))
4
  # print(get_dataset_config_names("jordyvl/rvl-cdip_easyOCR"))
 
7
 
8
  builder = load_dataset_builder("rvl_cdip")
9
 
10
+ from pdb import set_trace; set_trace()
 
 
11
  builder = load_dataset_builder("jordyvl/rvl-cdip_easyOCR")
12
  print(builder._info())
13
  print(builder.get_all_exported_dataset_infos())
14
  ds = builder.download_and_prepare()
15
+ from pdb import set_trace; set_trace()
 
 
16
  # data = load_dataset(
17
  # "jordyvl/rvl-cdip_easyOCR",
18
  # split="test",
 
24
  # #data_dir="/home/jordy/Downloads/OCRedText", # this is the path to the OCR data
25
  # )
26
 
27
+ from pdb import set_trace; set_trace()
 
 
28
 
29
 
30
  data = load_dataset(
31
  "./rvl_cdip_easyOCR.py",
32
  split="test",
33
+ #cache_dir="/mnt/lerna/data/HFcache",
34
+ data_files={ # this is the path to the images if it does not download it
35
+ "binary": __file__#"/mnt/lerna/data/HFcache/downloads/c8cc6f89129255a9adf3e97e319ebe2055cf97662135b3ad26c79e9432544db5",
36
  },
37
+ data_dir="/home/jordy/Downloads/OCRedText", # this is the path to the OCR data
38
  )
39
+