Tayaba171's picture
Update app.py
5036bad
import tensorflow as tf
import os
import CALTextModel
import gradio as gr
import data
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Create an instance of the model
CALText = CALTextModel.CALText_Model(training=False)
CALText.load_weights('final_caltextModel/cp-0037.ckpt')
test_loss = tf.keras.metrics.Mean(name='test_loss')
def recognize_text(input_image):
x, x_mask=data.preprocess_img(input_image)
output_str, gifImage=CALTextModel.predict(CALText, x, x_mask)
return output_str,gifImage
examples = [['sample_test_images/91-34.png'],
['sample_test_images/97-58.png'],
['sample_test_images/99-18.png'],
['sample_test_images/98-37.png'],
['sample_test_images/99-17.png'],
['sample_test_images/98-56.png'],
['sample_test_images/59-11.png'],
['sample_test_images/59-14.png'],
]
title = "CALText Demo"
description = "<p style='text-align: center'>Gradio demo for CALText model architecture <a href='https://github.com/nazar-khan/CALText'>[GitHub Code]</a> trained on the <a href='http://faculty.pucit.edu.pk/nazarkhan/work/urdu_ohtr/pucit_ohul_dataset.html'>PUCIT-OHUL</a> dataset. To use it, simply add your image, or click one of the examples to load them. This demo is running on CPU that's why it can take a bit more time. </p>"
article="<p style='text-align: center'><a href='https://link.springer.com/article/10.1007/s11063-023-11258-5'>CALText: Contextual Attention Localization for Offline Handwritten Text</a></p>"
inputs = gr.Image(label="Input Image")
demo = gr.Interface(fn=recognize_text,
inputs=inputs,
outputs=[gr.Textbox(label="Output"), gr.Image(label="Attended Regions")],
examples=examples,
title=title,
description=description,
article=article,
allow_flagging='never')
demo.launch()