jordyvl commited on
Commit
49508ed
1 Parent(s): d3a654c

try to run for test set image conversion

Browse files
Files changed (3) hide show
  1. cache_loader.py +48 -0
  2. rvl_cdip_multi.py +0 -4
  3. test_loader.py +167 -15
cache_loader.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from io import BytesIO
3
+ from tqdm import tqdm
4
+ from typing import Callable, Dict, List, Literal, NamedTuple, Optional, Tuple, Union
5
+
6
+ from datasets import load_dataset
7
+ from PIL import Image
8
+
9
+ import fitz as PyMuPDF
10
+
11
+
12
+ def pymupdf_image_extraction(data: bytes):
13
+ images = []
14
+ with PyMuPDF.open(stream=data, filetype="pdf") as pdf_file:
15
+ for page_index in range(len(pdf_file)):
16
+ page = pdf_file[page_index]
17
+ for image_index, img in enumerate(page.get_images(), start=1):
18
+ xref = img[0]
19
+ base_image = pdf_file.extract_image(xref)
20
+ image_bytes = base_image["image"]
21
+ image_ext = base_image["ext"]
22
+ im = Image.open(BytesIO(image_bytes))
23
+ images.append(im)
24
+ return images
25
+
26
+
27
+ def batch_get_images(streams, wrapper):
28
+ return {"images": [pymupdf_image_extraction(pdf_stream) for pdf_stream in streams]}
29
+
30
+
31
+ def batch_get_pages(streams, wrapper):
32
+ return {
33
+ "pages": [len(PyMuPDF.open(stream=pdf_stream, filetype="pdf")) for pdf_stream in streams]
34
+ }
35
+
36
+
37
+ testds = load_dataset("jordyvl/rvl_cdip_multi", cache_dir="/mnt/lerna/data/HFcache", split="test")
38
+ func = lambda batch: batch_get_pages(batch["file"], wrapper=binding)
39
+ testds = testds.map(func, batched=True, keep_in_memory=False)
40
+
41
+ func = lambda batch: batch_get_images(batch["file"], wrapper=binding)
42
+ testds = testds.map(func, batched=True, keep_in_memory=False)
43
+ import pdb; pdb.set_trace() # breakpoint 37054086 //
44
+
45
+ """
46
+ Filter based on page count?
47
+ """
48
+
rvl_cdip_multi.py CHANGED
@@ -30,10 +30,6 @@ _HOMEPAGE = "TBD"
30
  _LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/"
31
 
32
 
33
- _BINARY_URL = (
34
- "https://huggingface.co/datasets/jordyvl/unit-test_PDFfolder/resolve/main/data/data.tar.gz"
35
- )
36
-
37
  _BACKOFF_folder = "/mnt/lerna/data/RVL-CDIP_pdf"
38
  # "/mnt/lerna/data/RVL_CDIP_multi.tar.gz"
39
 
 
30
  _LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/"
31
 
32
 
 
 
 
 
33
  _BACKOFF_folder = "/mnt/lerna/data/RVL-CDIP_pdf"
34
  # "/mnt/lerna/data/RVL_CDIP_multi.tar.gz"
35
 
test_loader.py CHANGED
@@ -1,31 +1,183 @@
1
  import os
 
2
  from tqdm import tqdm
 
 
3
  from datasets import load_dataset, load_dataset_builder, load_from_disk
4
 
5
- if os.getlogin() == "jordy": #local testing
6
- DATASET = "/home/jordy/code/opensource/Beyond-Document-Page-Classification/src/tests/rvl_cdip_multi/rvl_cdip_multi.py"
7
- stepwise = True
8
- else:
9
- DATASET = "jordyvl/rvl_cdip_multi"
10
- stepwise = False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  if stepwise:
15
  ds = load_dataset(DATASET, cache_dir="/mnt/lerna/data/HFcache")
16
- import pdb; pdb.set_trace() # breakpoint 1898e7e0 //
17
-
 
 
18
  builder = load_dataset_builder(DATASET)
19
  ds = builder.download_and_prepare()
20
- ds = builder.as_dataset(split='test')
21
 
22
 
23
- ds = load_dataset("jordyvl/rvl_cdip_multi", cache_dir="/mnt/lerna/data/HFcache")
24
- #streaming=True, num_proc=8
25
- #stream it
26
-
27
- #count pages
28
  counter = []
29
  for i, d in tqdm(enumerate(ds["test"])):
30
  counter.append(len(d["file"]))
31
- print(sum(counter)/len(counter))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ from io import BytesIO
3
  from tqdm import tqdm
4
+ from typing import Callable, Dict, List, Literal, NamedTuple, Optional, Tuple, Union
5
+
6
  from datasets import load_dataset, load_dataset_builder, load_from_disk
7
 
8
+ from PIL import Image
9
+ from codetiming import Timer
10
+
11
+ import pdf2image
12
+ import PyPDF2
13
+ import pikepdf
14
+ import fitz as PyMuPDF
15
+
16
+ #https://gist.github.com/gstorer/f6a9f1dfe41e8e64dcf58d07afa9ab2a
17
+
18
+
19
+
20
+
21
+ # if os.getlogin() == "jordy": #local testing
22
+ # DATASET = "/home/jordy/code/opensource/Beyond-Document-Page-Classification/src/tests/rvl_cdip_multi/rvl_cdip_multi.py"
23
+ # stepwise = True
24
+
25
+
26
+ DATASET = "jordyvl/rvl_cdip_multi"
27
+ stepwise = False
28
+
29
+
30
+ testds = load_dataset("jordyvl/rvl_cdip_multi", cache_dir="/mnt/lerna/data/HFcache", split="test")
31
+
32
+
33
+ # time how long each way to pdf2image lasts
34
+
35
+
36
+ def batched_conversion(pdf_file):
37
+ info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None)
38
+ maxPages = info["Pages"]
39
+
40
+ logger.info(f"{pdf_file} has {str(maxPages)} pages")
41
+
42
+ images = []
43
+
44
+ for page in range(1, maxPages + 1, 10):
45
+ images.extend(
46
+ pdf2image.convert_from_path(
47
+ pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages)
48
+ )
49
+ )
50
+ return images
51
+
52
+
53
+ def batch_get_images(streams, wrapper):
54
+ return {"images": [get_images(pdf_stream, wrapper=wrapper) for pdf_stream in streams]}
55
 
56
 
57
+ def batch_get_pages(streams, wrapper):
58
+ return {"pages": [get_pages(pdf_stream, wrapper=wrapper) for pdf_stream in streams]}
59
+
60
+ def get_pages(pdf_stream, wrapper="pike"):
61
+ if wrapper == "pdf2image":
62
+ return pdf2image.pdfinfo_from_bytes(pdf_stream, userpw=None, poppler_path=None)["Pages"]
63
+ pdf_file = BytesIO(pdf_stream)
64
+ if wrapper == "pike":
65
+ return len(pikepdf.Pdf.open(pdf_file).pages)
66
+ if wrapper == "PyPDF2":
67
+ return len(PyPDF2.PdfReader(pdf_file).pages)
68
+ if wrapper == "pymupdf":
69
+ return len(PyMuPDF.open(stream=pdf_stream, filetype="pdf"))
70
+
71
+
72
+ def pymupdf_image_extraction(data: bytes):
73
+ images = []
74
+ with PyMuPDF.open(stream=data, filetype="pdf") as pdf_file:
75
+ for page_index in range(len(pdf_file)):
76
+ page = pdf_file[page_index]
77
+ for image_index, img in enumerate(page.get_images(), start=1):
78
+ xref = img[0]
79
+ base_image = pdf_file.extract_image(xref)
80
+ image_bytes = base_image["image"]
81
+ image_ext = base_image["ext"]
82
+ im = Image.open(BytesIO(image_bytes))
83
+ images.append(
84
+ im
85
+ #(f"image{page_index+1}_{image_index}.{image_ext}", image_bytes)
86
+ )
87
+ return images
88
+
89
+ def pypdf_image_extraction(data: bytes): # -> List[Tuple[str, bytes]]
90
+ images = []
91
+ try:
92
+ reader = PyPDF2.PdfReader(BytesIO(data))
93
+ for page in reader.pages:
94
+ for image in page.images:
95
+ im = Image.open(BytesIO(image.data))
96
+ images.append(im)
97
+ #images.append(Image.frombytes("RGB",(image.width,image.height),image.data)) #(image.name, #width,height
98
+ except Exception as exc:
99
+ print(f"PyPDF2 Image extraction failure: {exc}")
100
+ return images
101
+
102
+ def pike_image_extraction(data: bytes):
103
+ images = []
104
+ reader = pikepdf.Pdf.open(BytesIO(data))
105
+ for page in reader.pages:
106
+ for raw_name, raw_image in page.images.items():
107
+ image = pikepdf.PdfImage(raw_image).as_pil_image()
108
+ images.append(image)
109
+ return images
110
+
111
+ def get_images(pdf_stream, wrapper="pike"):
112
+ if wrapper == "pike":
113
+ return pike_image_extraction(pdf_stream)
114
+ if wrapper == "pdf2image":
115
+ return pdf2image.convert_from_bytes(pdf_stream)
116
+
117
+ if wrapper == "pymupdf":
118
+ return pymupdf_image_extraction(pdf_stream)
119
+
120
+ if wrapper == "PyPDF2":
121
+ return pypdf_image_extraction(pdf_stream)
122
+
123
+
124
+ #use same tool for page and images
125
+
126
+ subset = testds.select(list(range(0, 100)))
127
+
128
+ images_per_binding = {}
129
+ for binding in ["pike", "pdf2image", "PyPDF2", "pymupdf"]:
130
+ with Timer(name=f"{binding}", text=binding + " Elapsed time: {:.4f} seconds"):
131
+ func = lambda batch: batch_get_images(batch["file"], wrapper=binding)
132
+ images_per_binding[binding] = subset.map(func, batched=True, keep_in_memory=False)
133
+
134
+ """
135
+ Image.open(BytesIO(images_per_binding["pymupdf"]["images"][0][0]["bytes"])).show()
136
+ Image.open(BytesIO(images_per_binding["PyPDF2"]["images"][0][0]["bytes"])).show()
137
+ Image.open(BytesIO(images_per_binding["pike"]["images"][0][0]["bytes"])).show()
138
+ Image.open(BytesIO(images_per_binding["pdf2image"]["images"][0][0]["bytes"])).show()
139
+ """
140
+
141
+ # whut?
142
+ # now they are PIL: Image.open(BytesIO(images_per_binding["pymupdf"]["images"][0][0]["bytes"]))
143
+
144
+ import pdb; pdb.set_trace() # breakpoint 01d24a47 //
145
+
146
 
147
  if stepwise:
148
  ds = load_dataset(DATASET, cache_dir="/mnt/lerna/data/HFcache")
149
+ import pdb
150
+
151
+ pdb.set_trace() # breakpoint 1898e7e0 //
152
+
153
  builder = load_dataset_builder(DATASET)
154
  ds = builder.download_and_prepare()
155
+ ds = builder.as_dataset(split="test")
156
 
157
 
158
+ # count pages
 
 
 
 
159
  counter = []
160
  for i, d in tqdm(enumerate(ds["test"])):
161
  counter.append(len(d["file"]))
162
+ print(sum(counter) / len(counter))
163
+
164
+ """
165
+ pdfinfo ylhw0010.pdf
166
+ Creator: CaptureSDK ENGine - www.scansoft.com
167
+ Producer: Scansoft Capture Development System V12.0; modified using iText 2.1.7 by 1T3XT
168
+ CreationDate: Sat Jan 1 01:00:00 2005 CET
169
+ ModDate: Sat Dec 29 08:04:44 2018 CET
170
+ Tagged: no
171
+ UserProperties: no
172
+ Suspects: no
173
+ Form: none
174
+ JavaScript: no
175
+ Pages: 1
176
+ Encrypted: no
177
+ Page size: 576 x 756 pts
178
+ Page rot: 0
179
+ File size: 24443 bytes
180
+ Optimized: yes
181
+ PDF version: 1.3
182
+ jordy@jordy-OMEN:~/Downl
183
+ """