File size: 1,604 Bytes
7b68e51
 
 
44df331
cfb71c7
7b68e51
 
 
94da9e4
7b68e51
 
 
 
9defbc9
 
7b68e51
 
 
 
3aff513
9defbc9
9cd0c22
7b68e51
97a1cb9
 
 
 
 
 
7b68e51
 
 
 
 
 
3469024
 
97a1cb9
3469024
 
7b68e51
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import tensorflow as tf
import os
import CALTextModel
import gradio as gr
import data



os.environ['CUDA_VISIBLE_DEVICES'] = '-1'



# Create an instance of the model
CALText = CALTextModel.CALText_Model(training=False)
CALText.load_weights('final_caltextModel/cp-0037.ckpt')
test_loss = tf.keras.metrics.Mean(name='test_loss')


def recognize_text(input_image):
    x, x_mask=data.preprocess_img(input_image)
    output_str, gifImage=CALTextModel.predict(CALText, x, x_mask)
    return output_str,gifImage

examples = [['sample_test_images/59-11.png'],
		   ['sample_test_images/59-21.png'],
		   ['sample_test_images/59-32.png'],
		   ['sample_test_images/59-37.png'],
		   ['sample_test_images/91-47.png'],
		   ['sample_test_images/91-49.png']]

title = "CALText Demo"
description = "<p style='text-align: center'>Gradio demo for an CALText model architecture <a href='https://github.com/nazar-khan/CALText'>[GitHub Code]</a> trained on the <a href='http://faculty.pucit.edu.pk/nazarkhan/work/urdu_ohtr/pucit_ohul_dataset.html'>PUCIT-OHUL</a> dataset. To use it, simply add your image, or click one of the examples to load them.  </p>"
article = "<p style='text-align: center'></p>"
inputs = gr.inputs.Image(label="Input Image")

demo = gr.Interface(fn=recognize_text,
                    inputs=inputs,
                    outputs=[gr.Textbox(label="Output"), gr.Image(label="Attended Regions")],
                    examples=examples,
                    title=title,
                    description=description,
                    article=article,allow_flagging='never')

demo.launch()