Spaces:
Sleeping
Sleeping
import streamlit as st | |
import openvino as ov | |
from pathlib import Path | |
import numpy as np | |
from PIL import Image | |
from src.config import ( | |
DICT_DIR, | |
IMAGE_TYPES, | |
IMAGE_EXAMPLE, | |
MODEL_DIR, | |
DEVICE, | |
) | |
from src.image_processing import recognize | |
# hide deprication warnings which directly don't affect the working of the application | |
import warnings | |
warnings.filterwarnings("ignore") | |
# Setting custom Page Title and Icon with changed layout and sidebar state | |
st.set_page_config( | |
page_title="日本語手書き認識", | |
page_icon="", | |
layout="centered", | |
initial_sidebar_state="auto", | |
) | |
# hide the part of the code, as this is just for adding some custom CSS styling but not a part of the main idea | |
hide_streamlit_style = """ | |
<style> | |
#MainMenu {visibility: hidden;} | |
footer {visibility: hidden;} | |
</style> | |
""" | |
st.markdown(hide_streamlit_style, unsafe_allow_html=True) # hide the CSS code from the screen as they are embedded in markdown text. Also, allow streamlit to unsafely process as HTML | |
def init(): | |
# Load models | |
core = ov.Core() | |
model = core.read_model(model=Path(MODEL_DIR)) | |
print("[INFO] Loaded recognition model") | |
# Select device (CPU or GPU) | |
compiled_model = core.compile_model(model=model, device_name=DEVICE) | |
# Fetch Information About Input and Output Layers | |
recognition_input_layer = compiled_model.input(0) | |
recognition_output_layer = compiled_model.output(0) | |
print("[INFO] Fetched recognition model") | |
# In JA model, there should be blank symbol added at index 0 of each charlist. | |
blank_char = "~" | |
with Path(DICT_DIR).open(mode="r", encoding="utf-8") as charlist: | |
letters = blank_char + "".join(line.strip() for line in charlist) | |
print("[INFO] Loaded dictionary") | |
return [compiled_model, recognition_input_layer, recognition_output_layer, letters] | |
def display_text(bounds): | |
text = [] | |
for x in bounds: | |
t = x[1] | |
text.append(t) | |
text = ' '.join(text) | |
return text | |
HWRmodel = init() | |
st.set_option('deprecation.showfileUploaderEncoding',False) | |
st.title('日本語手書き認識') | |
st.subheader('Tokyo Teachies (DEMO)') | |
st.subheader('注意:画像には1行のテキストしか含まれていません。') | |
#st.text('Select source Language from the Sidebar.') | |
image_file = st.file_uploader("画像をアップロード…",type=IMAGE_TYPES) | |
if image_file is not None: | |
st.subheader('アップロードした画像') | |
st.image(image_file,width=450) | |
else: | |
st.subheader('例:') | |
image_file = IMAGE_EXAMPLE | |
st.image(image_file, use_column_width=450) | |
if st.button("Recognize"): | |
if image_file is not None: | |
img = Image.open(image_file).convert('L') | |
img = np.array(img) | |
with st.spinner('テキストの抽出...'): | |
recognized_text = recognize(img, HWRmodel[0], HWRmodel[1], HWRmodel[2], HWRmodel[3]) | |
#st.subheader('Extracted text is ...') | |
#text = display_text(recognized_text) | |
st.write("".join(recognized_text)) | |