Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
extended|iit_cdip
ArXiv:
License:
jordyvl jpizarrom commited on
Commit
39d9170
1 Parent(s): 2cfed7f

use dl_manager to get the ocrs files (#5)

Browse files

- use dl_manager to get the ocrs files (05f5fb2c7111a7cffc2fda58781408a85640979b)


Co-authored-by: Juan Pizarro <jpizarrom@users.noreply.huggingface.co>

Files changed (1) hide show
  1. rvl_cdip_easyocr.py +14 -10
rvl_cdip_easyocr.py CHANGED
@@ -52,6 +52,12 @@ _METADATA_URLS = {
52
  "val": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/val.txt",
53
  }
54
 
 
 
 
 
 
 
55
  _CLASSES = [
56
  "letter",
57
  "form",
@@ -72,10 +78,6 @@ _CLASSES = [
72
  ]
73
 
74
  _IMAGES_DIR = "images/"
75
- # hardcoded to not get stuck in annoying IO and LFS problems in Hub
76
- _OCR_DIR = "/cw/liir_data/NoCsBack/jordy/BDPC"
77
- _OCR_DIR = _OCR_DIR if os.path.exists(_OCR_DIR) else "data/"
78
-
79
 
80
 
81
  # class OCRConfig(datasets.BuilderConfig):
@@ -125,6 +127,7 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
125
  _URLS["rvl-cdip"]
126
  ) # only download images if need be
127
  labels_path = dl_manager.download(_METADATA_URLS)
 
128
 
129
  return [
130
  datasets.SplitGenerator(
@@ -132,6 +135,7 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
132
  gen_kwargs={
133
  "archive_iterator": dl_manager.iter_archive(archive_path),
134
  "labels_filepath": labels_path["train"],
 
135
  "split": "train",
136
  },
137
  ),
@@ -140,6 +144,7 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
140
  gen_kwargs={
141
  "archive_iterator": dl_manager.iter_archive(archive_path),
142
  "labels_filepath": labels_path["test"],
 
143
  "split": "test",
144
  },
145
  ),
@@ -148,6 +153,7 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
148
  gen_kwargs={
149
  "archive_iterator": dl_manager.iter_archive(archive_path),
150
  "labels_filepath": labels_path["val"],
 
151
  "split": "validation",
152
  },
153
  ),
@@ -164,16 +170,14 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
164
  return image_to_class_id
165
 
166
  @staticmethod
167
- def _get_image_to_OCR(OCR_dir, split):
168
  def parse_easyOCR_box(box):
169
  # {'x0': 39, 'y0': 39, 'x1': 498, 'y1': 82, 'width': 459, 'height': 43}
170
  return (box["x0"], box["y0"], box["x1"], box["y1"])
171
 
172
- if OCR_dir is None:
173
- return {}
174
  image_to_OCR = {}
175
  data = np.load(
176
- os.path.join(OCR_dir, f"Easy_{split[0].upper()+split[1:]}_Data.npy"),
177
  allow_pickle=True,
178
  )
179
  for ex in tqdm(data, desc="Loading OCR data"):
@@ -197,11 +201,11 @@ class RvlCdipEasyOcr(datasets.GeneratorBasedBuilder):
197
  words, boxes = image_to_OCR[file_path]
198
  return words, boxes
199
 
200
- def _generate_examples(self, archive_iterator, labels_filepath, split):
201
  with open(labels_filepath, encoding="utf-8") as f:
202
  data = f.read().splitlines()
203
 
204
- image_to_OCR = self._get_image_to_OCR(_OCR_DIR, split)
205
  image_to_class_id = self._get_image_to_class_map(data)
206
 
207
  for file_path, file_obj in archive_iterator:
 
52
  "val": "https://huggingface.co/datasets/rvl_cdip/resolve/main/data/val.txt",
53
  }
54
 
55
+ _OCR_URLS = {
56
+ "train": "https://huggingface.co/datasets/jordyvl/rvl_cdip_easyocr/resolve/main/data/Easy_Train_Data.npy",
57
+ "test": "https://huggingface.co/datasets/jordyvl/rvl_cdip_easyocr/resolve/main/data/Easy_Test_Data.npy",
58
+ "val": "https://huggingface.co/datasets/jordyvl/rvl_cdip_easyocr/resolve/main/data/Easy_Valid_Data.npy",
59
+ }
60
+
61
  _CLASSES = [
62
  "letter",
63
  "form",
 
78
  ]
79
 
80
  _IMAGES_DIR = "images/"
 
 
 
 
81
 
82
 
83
  # class OCRConfig(datasets.BuilderConfig):
 
127
  _URLS["rvl-cdip"]
128
  ) # only download images if need be
129
  labels_path = dl_manager.download(_METADATA_URLS)
130
+ ocrs_filepath = dl_manager.download(_OCR_URLS)
131
 
132
  return [
133
  datasets.SplitGenerator(
 
135
  gen_kwargs={
136
  "archive_iterator": dl_manager.iter_archive(archive_path),
137
  "labels_filepath": labels_path["train"],
138
+ "ocrs_filepath": ocrs_filepath["train"],
139
  "split": "train",
140
  },
141
  ),
 
144
  gen_kwargs={
145
  "archive_iterator": dl_manager.iter_archive(archive_path),
146
  "labels_filepath": labels_path["test"],
147
+ "ocrs_filepath": ocrs_filepath["test"],
148
  "split": "test",
149
  },
150
  ),
 
153
  gen_kwargs={
154
  "archive_iterator": dl_manager.iter_archive(archive_path),
155
  "labels_filepath": labels_path["val"],
156
+ "ocrs_filepath": ocrs_filepath["val"],
157
  "split": "validation",
158
  },
159
  ),
 
170
  return image_to_class_id
171
 
172
  @staticmethod
173
+ def _get_image_to_OCR(ocrs_filepath, split):
174
  def parse_easyOCR_box(box):
175
  # {'x0': 39, 'y0': 39, 'x1': 498, 'y1': 82, 'width': 459, 'height': 43}
176
  return (box["x0"], box["y0"], box["x1"], box["y1"])
177
 
 
 
178
  image_to_OCR = {}
179
  data = np.load(
180
+ ocrs_filepath,
181
  allow_pickle=True,
182
  )
183
  for ex in tqdm(data, desc="Loading OCR data"):
 
201
  words, boxes = image_to_OCR[file_path]
202
  return words, boxes
203
 
204
+ def _generate_examples(self, archive_iterator, labels_filepath, ocrs_filepath, split):
205
  with open(labels_filepath, encoding="utf-8") as f:
206
  data = f.read().splitlines()
207
 
208
+ image_to_OCR = self._get_image_to_OCR(ocrs_filepath, split)
209
  image_to_class_id = self._get_image_to_class_map(data)
210
 
211
  for file_path, file_obj in archive_iterator: