File size: 6,814 Bytes
6fd5cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce2c75c
 
 
 
 
 
 
 
95802bd
 
ce2c75c
6fd5cfa
 
 
 
 
 
 
 
a1f56e0
6fd5cfa
 
 
 
 
 
a1f56e0
6fd5cfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
from transformers import AutoModel, AutoTokenizer,AutoProcessor
import streamlit as st
import os
from PIL import Image
from torchvision import io
import torchvision.transforms as transforms
import random
import easyocr
import numpy as np
import re


def start():
    st.session_state.start = True

def reset():
    del st.session_state['start']
    del st.session_state.language

@st.cache_resource
def model():
    model_path = "D:\IIT-r"
    tokenize = AutoTokenizer.from_pretrained('srimanth-d/GOT_CPU',trust_remote_code = True)
    model = AutoModel.from_pretrained('srimanth-d/GOT_CPU',trust_remote_code= True,use_safetensors= True,pad_token_id=tokenize.eos_token_id)
    model = model.eval()
    return model, tokenize

@st.cache_data
def get_text(image_file,_model,_tokenizer):
    res =_model.chat(_tokenizer,image_file,ocr_type= 'ocr')
    return res
    
@st.cache_resource
def highlight_keywords(text, keywords):
    colors = generate_unique_colors(len(keywords))
    highlighted_text = text
    found_keywords = []
    for keyword, color in zip(keywords, colors):
        if keyword.lower() in text.lower():
            highlighted_text = highlighted_text.replace(keyword, f'<mark style="background-color: {color};">{keyword}</mark>')
            found_keywords.append(keyword)
    return highlighted_text, found_keywords

@st.cache_data
def generate_unique_colors(n):
    colors = []
    for i in range(n):
        color = "#{:06x}".format(random.randint(0, 0xFFFFFF))
        while color in colors:
            color = "#{:06x}".format(random.randint(0, 0xFFFFFF))
        colors.append(color)
    return colors

@st.cache_data
def extract_text_easyocr(_image):
    reader = easyocr.Reader(['hi'],gpu = False)
    results = reader.readtext(np.array(_image))
    # return results
    return " ".join([result[1] for result in results])


def search():
    st.session_state.search = True

if 'start' not in st.session_state:
    st.session_state.start = False

if 'search' not in st.session_state:
    st.session_state.search = False

if 'reset' not in st.session_state:
    st.session_state.reset = False

if 'language' not in st.session_state:
    st.session_state.language = False
    
with st.sidebar:
    st.header("Instructions")
    st.write("1. Choose a language (English or Hindi).")
    st.write("2. Upload an image in JPG, PNG, or JPEG format.")
    st.write("3. The app will extract text from the image using OCR.")
    st.write("4. Enter keywords to search within the extracted text.")
    st.write("5. If needed, click 'Reset' to upload a new image.")
    st.sidebar.markdown("<br>" * 10, unsafe_allow_html=True)
    st.write("🤖 Please wait while the model is processing... This may take a moment.")


st.header("Optical Character Recognition ")
col1, col2 = st.columns(2)

with col1:
    if st.button('English'):
        
        st.session_state.language = 'English'
        st.experimental_rerun()
        

with col2:
    if st.button('Hindi'):
        
        st.session_state.language = 'Hindi'
        st.experimental_rerun()
        

if st.session_state.language == 'English':
        st.title("GOT OCR - Extract Text from Images")
        st.write("Upload an image and let the GOT model extract the text!")

        try:
            MODEL, TOKENIZER = model()
            st.success("GOT model loaded successfully")
        except Exception as e:
            st.error(f"Error loading GOT model: {str(e)}")

        image_file = st.file_uploader("Upload an Image", type=["jpg", "png", "jpeg"])

        if image_file is not None:
            st.image(image_file, caption="Uploaded Image", use_column_width=True)

            if not os.path.exists("images"):
                os.makedirs("images")
            with open(f"images/{image_file.name}", "wb") as f:
                f.write(image_file.getbuffer())

            extracted_text = get_text(f"images/{image_file.name}", MODEL, TOKENIZER)
            # st.session_state.extracted_text = extracted_text
            
            st.subheader("Extracted Text:")
            st.write(extracted_text)

            keywords_input = st.text_input("Enter keywords to search within the extracted text (comma-separated):")
            if keywords_input:
                keywords = [keyword.strip().lower() for keyword in keywords_input.split(',')]  # Convert keywords to lowercase
                lower_extracted_text = extracted_text.lower()  # Convert extracted text to lowercase
                highlighted_text, found_keywords = highlight_keywords(lower_extracted_text, keywords)
                st.button("Search", on_click=search)
                if st.session_state.search:
                    if found_keywords:
                        st.markdown(highlighted_text, unsafe_allow_html=True)
                        st.write(f"Found keywords: {', '.join(found_keywords)}")
                    else:
                        st.warning("No keywords found in the extracted text.")

                    not_found_keywords = set(keywords) - set(found_keywords)
                    if not_found_keywords:
                        st.error(f"Keywords not found: {', '.join(not_found_keywords)}")
            
            st.button("Reset and Upload New Image",on_click=reset)
                


elif st.session_state.language == 'Hindi':
        st.title("HINDI OCR - Extract Text from Images")
        st.write("Upload an image and let EasyOCR extract the text!")
        
        
        image_file_hi = st.file_uploader("Upload an Image", type=["jpg", "png", "jpeg"])
        
        if image_file_hi:
            st.image(image_file_hi, caption="Uploaded Image", use_column_width=True)

            image = Image.open(image_file_hi)
            extracted_text_hindi =extract_text_easyocr(image)
            
            st.subheader("Extracted Text:")
            st.write(extracted_text_hindi)

            keywords_input = st.text_input("Enter keywords to search within the extracted text (comma-separated):")
            if keywords_input:
                keywords = [keyword.strip() for keyword in keywords_input.split(',')]
                highlighted_text, found_keywords = highlight_keywords(extracted_text_hindi, keywords)

                st.subheader("Search Results:")
                if found_keywords:
                    st.markdown(highlighted_text, unsafe_allow_html=True)
                    st.write(f"Found keywords: {', '.join(found_keywords)}")
                else:
                    st.warning("No keywords found in the extracted text.")

                not_found_keywords = set(keywords) - set(found_keywords)
                if not_found_keywords:
                    st.error(f"Keywords not found: {', '.join(not_found_keywords)}")

            st.button("Reset and Upload New Image",on_click=reset)