markobinario commited on
Commit
0ef980f
·
verified ·
1 Parent(s): 33645a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +137 -149
app.py CHANGED
@@ -1,149 +1,137 @@
1
- import os
2
- import io
3
- import json
4
- from typing import List, Tuple, Dict, Any
5
-
6
- import fitz # PyMuPDF
7
- from PIL import Image
8
- import gradio as gr
9
-
10
-
11
- # Lazy-load the OCR model to reduce startup time and memory
12
- _ocr_model = None
13
-
14
-
15
- def get_ocr_model(lang: str = "en"):
16
- global _ocr_model
17
- if _ocr_model is not None:
18
- return _ocr_model
19
-
20
- # PaddleOCR supports language packs like 'en', 'ch', 'fr', 'german', etc.
21
- # The Spaces container will download the model weights on first run and cache them.
22
- from paddleocr import PaddleOCR # import here to avoid heavy import at startup
23
-
24
- _ocr_model = PaddleOCR(use_angle_cls=True, lang=lang, show_log=False)
25
- return _ocr_model
26
-
27
-
28
- def pdf_page_to_image(pdf_doc: fitz.Document, page_index: int, dpi: int = 170) -> Image.Image:
29
- page = pdf_doc.load_page(page_index)
30
- zoom = dpi / 72.0 # 72 dpi is PDF default
31
- mat = fitz.Matrix(zoom, zoom)
32
- pix = page.get_pixmap(matrix=mat, alpha=False)
33
- img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
34
- return img
35
-
36
-
37
- def run_paddle_ocr_on_image(image: Image.Image, lang: str = "en") -> Tuple[str, List[Dict[str, Any]]]:
38
- ocr = get_ocr_model(lang=lang)
39
- # Convert PIL image to numpy array for PaddleOCR
40
- import numpy as np
41
-
42
- img_np = np.array(image)
43
- result = ocr.ocr(img_np, cls=True)
44
-
45
- lines: List[str] = []
46
- items: List[Dict[str, Any]] = []
47
-
48
- # PaddleOCR returns list per image: [[(box, (text, conf)), ...]]
49
- for page_result in result:
50
- if page_result is None:
51
- continue
52
- for det in page_result:
53
- box = det[0]
54
- text = det[1][0]
55
- conf = float(det[1][1])
56
- lines.append(text)
57
- items.append({"bbox": box, "text": text, "confidence": conf})
58
-
59
- return "\n".join(lines), items
60
-
61
-
62
- def extract_text_from_pdf(file_obj, dpi: int = 170, max_pages: int | None = None, lang: str = "en") -> Tuple[str, str]:
63
- """
64
- Returns combined text and a JSON string with per-page OCR results.
65
- """
66
- if file_obj is None:
67
- return "", json.dumps({"pages": []}, ensure_ascii=False)
68
-
69
- # Gradio may pass a path or a tempfile.NamedTemporaryFile-like with .name
70
- pdf_path = file_obj if isinstance(file_obj, str) else getattr(file_obj, "name", None)
71
- if pdf_path is None or not os.path.exists(pdf_path):
72
- # If bytes were passed, fall back to reading from buffer
73
- file_bytes = file_obj.read() if hasattr(file_obj, "read") else None
74
- if not file_bytes:
75
- return "", json.dumps({"pages": []}, ensure_ascii=False)
76
- pdf_doc = fitz.open(stream=file_bytes, filetype="pdf")
77
- else:
78
- pdf_doc = fitz.open(pdf_path)
79
-
80
- try:
81
- num_pages = pdf_doc.page_count
82
- if max_pages is not None:
83
- num_pages = min(num_pages, max_pages)
84
-
85
- all_text_lines: List[str] = []
86
- pages_payload: List[Dict[str, Any]] = []
87
-
88
- for page_index in range(num_pages):
89
- image = pdf_page_to_image(pdf_doc, page_index, dpi=dpi)
90
- page_text, page_items = run_paddle_ocr_on_image(image, lang=lang)
91
-
92
- all_text_lines.append(page_text)
93
- pages_payload.append({
94
- "page": page_index + 1,
95
- "items": page_items,
96
- })
97
-
98
- combined_text = "\n\n".join([t for t in all_text_lines if t])
99
- json_payload = json.dumps({"pages": pages_payload}, ensure_ascii=False)
100
-
101
- return combined_text, json_payload
102
- finally:
103
- pdf_doc.close()
104
-
105
-
106
- def gradio_predict(pdf_file, dpi, max_pages, lang):
107
- text, payload = extract_text_from_pdf(pdf_file, dpi=int(dpi), max_pages=(int(max_pages) if max_pages else None), lang=lang)
108
- return text, payload
109
-
110
-
111
- with gr.Blocks(title="PDF OCR with PaddleOCR + PyMuPDF") as demo:
112
- gr.Markdown("""
113
- # PDF OCR (PaddleOCR + PyMuPDF)
114
-
115
- Upload a PDF to extract text using OCR. Processes each page as an image rendered by PyMuPDF, then recognizes text with PaddleOCR.
116
- """)
117
-
118
- with gr.Row():
119
- pdf_input = gr.File(label="PDF", file_types=[".pdf"], file_count="single")
120
- with gr.Column():
121
- dpi_input = gr.Slider(100, 300, value=170, step=10, label="Render DPI (higher = slower but more accurate)")
122
- max_pages_input = gr.Number(value=None, label="Max pages (optional)")
123
- lang_input = gr.Dropdown(choices=["en", "ch", "fr", "german", "korean", "japanese", "ta", "te", "latin"], value="en", label="OCR Language")
124
-
125
- with gr.Row():
126
- text_output = gr.Textbox(label="Extracted Text", lines=15)
127
- json_output = gr.JSON(label="Per-page OCR details (bbox, text, confidence)")
128
-
129
- run_btn = gr.Button("Run OCR")
130
- run_btn.click(gradio_predict, inputs=[pdf_input, dpi_input, max_pages_input, lang_input], outputs=[text_output, json_output])
131
-
132
- gr.Examples(
133
- examples=[],
134
- inputs=[pdf_input, dpi_input, max_pages_input, lang_input],
135
- )
136
-
137
- # Enable simple API for clients via gradio_client or Spaces Inference API
138
- gr.Markdown("""
139
- ## API usage
140
- - Use `gradio_client` to call this Space programmatically.
141
- - Endpoint function: `gradio_predict(pdf_file, dpi, max_pages, lang)` returning `(text, json)`.
142
- """)
143
-
144
-
145
- if __name__ == "__main__":
146
- # On Spaces, the host/port are managed by the platform. Locally, this runs on 7860 by default.
147
- demo.launch()
148
-
149
-
 
1
+ import os
2
+ import io
3
+ import json
4
+ from typing import List, Tuple, Dict, Any
5
+
6
+ import fitz # PyMuPDF
7
+ from PIL import Image
8
+ import gradio as gr
9
+
10
+
11
+ # Lazy-load the OCR model to reduce startup time and memory
12
+ _ocr_model = None
13
+
14
+
15
+ def get_ocr_model(lang: str = "en"):
16
+ global _ocr_model
17
+ if _ocr_model is not None:
18
+ return _ocr_model
19
+
20
+ # PaddleOCR supports language packs like 'en', 'ch', 'fr', 'german', etc.
21
+ # The Spaces container will download the model weights on first run and cache them.
22
+ from paddleocr import PaddleOCR # import here to avoid heavy import at startup
23
+
24
+ _ocr_model = PaddleOCR(use_angle_cls=True, lang=lang, show_log=False)
25
+ return _ocr_model
26
+
27
+
28
+ def pdf_page_to_image(pdf_doc: fitz.Document, page_index: int, dpi: int = 170) -> Image.Image:
29
+ page = pdf_doc.load_page(page_index)
30
+ zoom = dpi / 72.0 # 72 dpi is PDF default
31
+ mat = fitz.Matrix(zoom, zoom)
32
+ pix = page.get_pixmap(matrix=mat, alpha=False)
33
+ img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
34
+ return img
35
+
36
+
37
+ def run_paddle_ocr_on_image(image: Image.Image, lang: str = "en") -> Tuple[str, List[Dict[str, Any]]]:
38
+ ocr = get_ocr_model(lang=lang)
39
+ # Convert PIL image to numpy array for PaddleOCR
40
+ import numpy as np
41
+
42
+ img_np = np.array(image)
43
+ result = ocr.ocr(img_np, cls=True)
44
+
45
+ lines: List[str] = []
46
+ items: List[Dict[str, Any]] = []
47
+
48
+ # PaddleOCR returns list per image: [[(box, (text, conf)), ...]]
49
+ for page_result in result:
50
+ if page_result is None:
51
+ continue
52
+ for det in page_result:
53
+ box = det[0]
54
+ text = det[1][0]
55
+ conf = float(det[1][1])
56
+ lines.append(text)
57
+ items.append({"bbox": box, "text": text, "confidence": conf})
58
+
59
+ return "\n".join(lines), items
60
+
61
+
62
+ def extract_text_from_pdf(file_obj, dpi: int = 170, max_pages: int | None = None, lang: str = "en") -> Tuple[str, str]:
63
+ """
64
+ Returns combined text and a JSON string with per-page OCR results.
65
+ """
66
+ if file_obj is None:
67
+ return "", json.dumps({"pages": []}, ensure_ascii=False)
68
+
69
+ # Gradio may pass a path or a tempfile.NamedTemporaryFile-like with .name
70
+ pdf_path = file_obj if isinstance(file_obj, str) else getattr(file_obj, "name", None)
71
+ if pdf_path is None or not os.path.exists(pdf_path):
72
+ # If bytes were passed, fall back to reading from buffer
73
+ file_bytes = file_obj.read() if hasattr(file_obj, "read") else None
74
+ if not file_bytes:
75
+ return "", json.dumps({"pages": []}, ensure_ascii=False)
76
+ pdf_doc = fitz.open(stream=file_bytes, filetype="pdf")
77
+ else:
78
+ pdf_doc = fitz.open(pdf_path)
79
+
80
+ try:
81
+ num_pages = pdf_doc.page_count
82
+ if max_pages is not None:
83
+ num_pages = min(num_pages, max_pages)
84
+
85
+ all_text_lines: List[str] = []
86
+ pages_payload: List[Dict[str, Any]] = []
87
+
88
+ for page_index in range(num_pages):
89
+ image = pdf_page_to_image(pdf_doc, page_index, dpi=dpi)
90
+ page_text, page_items = run_paddle_ocr_on_image(image, lang=lang)
91
+
92
+ all_text_lines.append(page_text)
93
+ pages_payload.append({
94
+ "page": page_index + 1,
95
+ "items": page_items,
96
+ })
97
+
98
+ combined_text = "\n\n".join([t for t in all_text_lines if t])
99
+ json_payload = json.dumps({"pages": pages_payload}, ensure_ascii=False)
100
+
101
+ return combined_text, json_payload
102
+ finally:
103
+ pdf_doc.close()
104
+
105
+
106
+ def gradio_predict(pdf_file):
107
+ # Always render at a high DPI for accuracy and use English OCR by default
108
+ text, _ = extract_text_from_pdf(pdf_file, dpi=300, max_pages=None, lang="en")
109
+ return text
110
+
111
+
112
+ with gr.Blocks(title="PDF OCR with PaddleOCR + PyMuPDF") as demo:
113
+ gr.Markdown("""
114
+ # PDF OCR (PaddleOCR + PyMuPDF)
115
+
116
+ Upload a PDF to extract text using OCR. The app renders pages with PyMuPDF at a high DPI and uses PaddleOCR for recognition.
117
+ """)
118
+
119
+ pdf_input = gr.File(label="PDF", file_types=[".pdf"], file_count="single")
120
+ text_output = gr.Textbox(label="Extracted Text", lines=20)
121
+
122
+ # Auto-run OCR when a PDF is uploaded
123
+ pdf_input.change(fn=gradio_predict, inputs=[pdf_input], outputs=[text_output])
124
+
125
+ # Simple API note
126
+ gr.Markdown("""
127
+ ## API usage
128
+ - Use `gradio_client` to call this Space. Function signature: `gradio_predict(pdf_file)` → `text`.
129
+ """)
130
+
131
+
132
+ if __name__ == "__main__":
133
+ # On Spaces, the host/port are managed by the platform. Locally, this runs on 7860 by default.
134
+ demo.launch()
135
+
136
+
137
+