import tensorflow as tf import os import CALTextModel import gradio as gr import data os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # Create an instance of the model CALText = CALTextModel.CALText_Model(training=False) CALText.load_weights('final_caltextModel/cp-0037.ckpt') test_loss = tf.keras.metrics.Mean(name='test_loss') def recognize_text(input_image): x, x_mask=data.preprocess_img(input_image) output_str, gifImage=CALTextModel.predict(CALText, x, x_mask) return output_str,gifImage examples = [['sample_test_images/91-34.png'], ['sample_test_images/97-58.png'], ['sample_test_images/99-18.png'], ['sample_test_images/98-37.png'], ['sample_test_images/99-17.png'], ['sample_test_images/98-56.png'], ['sample_test_images/59-11.png'], ['sample_test_images/59-14.png'], ] title = "CALText Demo" description = "

Gradio demo for CALText model architecture [GitHub Code] trained on the PUCIT-OHUL dataset. To use it, simply add your image, or click one of the examples to load them. This demo is running on CPU that's why it can take a bit more time.

" article="

CALText: Contextual Attention Localization for Offline Handwritten Text

" inputs = gr.Image(label="Input Image") demo = gr.Interface(fn=recognize_text, inputs=inputs, outputs=[gr.Textbox(label="Output"), gr.Image(label="Attended Regions")], examples=examples, title=title, description=description, article=article, allow_flagging='never') demo.launch()