Spaces:
Running
Running
# -*- encoding: utf-8 -*- | |
# @Author: SWHL | |
# @Contact: liekkaskono@163.com | |
import hashlib | |
import io | |
import numpy as np | |
import pandas as pd | |
import pypdfium2 | |
import streamlit as st | |
from PIL import Image | |
from rapid_latex_ocr import LatexOCR | |
from streamlit_drawable_canvas import st_canvas | |
MAX_WIDTH = 800 | |
MAX_HEIGHT = 1000 | |
st.set_page_config(layout="wide") | |
def load_model_cached(): | |
return LatexOCR() | |
def get_canvas_hash(pil_image): | |
return hashlib.md5(pil_image.tobytes()).hexdigest() | |
def open_pdf(pdf_file): | |
stream = io.BytesIO(pdf_file.getvalue()) | |
return pypdfium2.PdfDocument(stream) | |
def page_count(pdf_file): | |
doc = open_pdf(pdf_file) | |
return len(doc) | |
def get_page_image(pdf_file, page_num, dpi=96): | |
doc = open_pdf(pdf_file) | |
renderer = doc.render( | |
pypdfium2.PdfBitmap.to_pil, | |
page_indices=[page_num - 1], | |
scale=dpi / 72, | |
) | |
png = list(renderer)[0] | |
png_image = png.convert("RGB") | |
return png_image | |
def get_uploaded_image(in_file): | |
if isinstance(in_file, Image.Image): | |
return in_file.convert("RGB") | |
return Image.open(in_file).convert("RGB") | |
def resize_image(pil_image): | |
if pil_image is None: | |
return | |
pil_image.thumbnail((MAX_WIDTH, MAX_HEIGHT), Image.Resampling.LANCZOS) | |
def get_image_size(pil_image): | |
if pil_image is None: | |
return MAX_HEIGHT, MAX_WIDTH | |
height, width = pil_image.height, pil_image.width | |
return height, width | |
if __name__ == "__main__": | |
st.markdown( | |
"<h1 style='text-align: center;'><a href='https://github.com/RapidAI/RapidLatexOCR' style='text-decoration: none'>Rapid ⚡︎ LaTeX OCR</a></h1>", | |
unsafe_allow_html=True, | |
) | |
st.markdown( | |
""" | |
<p align="center"> | |
<a href=""><img src="https://img.shields.io/badge/Python->=3.6,<3.12-aff.svg"></a> | |
<a href=""><img src="https://img.shields.io/badge/OS-Linux%2C%20Win%2C%20Mac-pink.svg"></a> | |
<a href="https://pepy.tech/project/rapid_latex_ocr"><img src="https://static.pepy.tech/personalized-badge/rapid_latex_ocr?period=total&units=abbreviation&left_color=grey&right_color=blue&left_text=Downloads"></a> | |
<a href="https://pypi.org/project/rapid_latex_ocr/"><img alt="PyPI" src="https://img.shields.io/pypi/v/rapid_latex_ocr"></a> | |
<a href="https://semver.org/"><img alt="SemVer2.0" src="https://img.shields.io/badge/SemVer-2.0-brightgreen"></a> | |
<a href="https://github.com/psf/black"><img src="https://img.shields.io/badge/code%20style-black-000000.svg"></a> | |
<a href="https://github.com/RapidAI/RapidLatexOCR"><img src="https://img.shields.io/badge/Github-link-brightgreen.svg"></a> | |
</p> | |
""", | |
unsafe_allow_html=True, | |
) | |
col1, col2 = st.columns([0.5, 0.5]) | |
in_file = st.sidebar.file_uploader( | |
"PDF file or image:", type=["pdf", "png", "jpg", "jpeg", "gif", "webp"] | |
) | |
if in_file is None: | |
st.stop() | |
filetype = in_file.type | |
if "pdf" in filetype: | |
page_count = page_count(in_file) | |
page_number = st.sidebar.number_input( | |
f"Page number out of {page_count}:", | |
min_value=1, | |
value=1, | |
max_value=page_count, | |
) | |
pil_image = get_page_image(in_file, page_number) | |
else: | |
pil_image = get_uploaded_image(in_file) | |
resize_image(pil_image) | |
canvas_hash = get_canvas_hash(pil_image) if pil_image else "canvas" | |
model = load_model_cached() | |
with col1: | |
canvas_result = st_canvas( | |
fill_color="rgba(255, 165, 0, 0.1)", | |
stroke_width=1, | |
stroke_color="#FFAA00", | |
background_color="#FFF", | |
background_image=pil_image, | |
update_streamlit=True, | |
height=get_image_size(pil_image)[0], | |
width=get_image_size(pil_image)[1], | |
drawing_mode="rect", | |
point_display_radius=0, | |
key=canvas_hash, | |
) | |
if canvas_result.json_data is not None: | |
objects = pd.json_normalize(canvas_result.json_data["objects"]) | |
bbox_list = None | |
if objects.shape[0] > 0: | |
boxes = objects[objects["type"] == "rect"][ | |
["left", "top", "width", "height"] | |
] | |
boxes["right"] = boxes["left"] + boxes["width"] | |
boxes["bottom"] = boxes["top"] + boxes["height"] | |
bbox_list = boxes[["left", "top", "right", "bottom"]].values.tolist() | |
if bbox_list: | |
with col2: | |
bbox_nums = len(bbox_list) | |
for i, bbox in enumerate(bbox_list): | |
input_img = pil_image.crop(bbox) | |
rec_res, elapse = model(np.array(input_img)) | |
st.markdown(f"#### {i + 1}") | |
st.latex(rec_res) | |
st.code(rec_res) | |