dperales commited on
Commit
ffda5f3
β€’
1 Parent(s): 65ffad7

Create app_v3.txt

Browse files
Files changed (1) hide show
  1. app_v3.txt +320 -0
app_v3.txt ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import numpy as np
4
+ import easyocr
5
+ import streamlit as st
6
+ from annotated_text import annotated_text
7
+ from streamlit_option_menu import option_menu
8
+ from sentiment_analysis import SentimentAnalysis
9
+ from keyword_extraction import KeywordExtractor
10
+ from part_of_speech_tagging import POSTagging
11
+ from emotion_detection import EmotionDetection
12
+ from named_entity_recognition import NamedEntityRecognition
13
+ from Object_Detector import ObjectDetector
14
+ from OCR_Detector import OCRDetector
15
+ import PIL
16
+ from PIL import Image
17
+ from PIL import ImageColor
18
+ from PIL import ImageDraw
19
+ from PIL import ImageFont
20
+ import time
21
+
22
+ # Imports de Object Detection
23
+ import tensorflow as tf
24
+ import tensorflow_hub as hub
25
+ # Load compressed models from tensorflow_hub
26
+ os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED'
27
+ import matplotlib.pyplot as plt
28
+ import matplotlib as mpl
29
+ # For drawing onto the image.
30
+ import numpy as np
31
+ from tensorflow.python.ops.numpy_ops import np_config
32
+ np_config.enable_numpy_behavior()
33
+
34
+ import torch
35
+ import librosa
36
+ from models import infere_speech_emotion, infere_text_emotion, infere_voice2text
37
+
38
+ st.set_page_config(layout="wide")
39
+
40
+ hide_streamlit_style = """
41
+ <style>
42
+ #MainMenu {visibility: hidden;}
43
+ footer {visibility: hidden;}
44
+ </style>
45
+ """
46
+ st.markdown(hide_streamlit_style, unsafe_allow_html=True)
47
+
48
+ @st.cache_resource
49
+ def load_sentiment_model():
50
+ return SentimentAnalysis()
51
+
52
+ @st.cache_resource
53
+ def load_keyword_model():
54
+ return KeywordExtractor()
55
+
56
+ @st.cache_resource
57
+ def load_pos_model():
58
+ return POSTagging()
59
+
60
+ @st.cache_resource
61
+ def load_emotion_model():
62
+ return EmotionDetection()
63
+
64
+ @st.cache_resource
65
+ def load_ner_model():
66
+ return NamedEntityRecognition()
67
+
68
+ @st.cache_resource
69
+ def load_objectdetector_model():
70
+ return ObjectDetector()
71
+
72
+ @st.cache_resource
73
+ def load_ocrdetector_model():
74
+ return OCRDetector()
75
+
76
+ sentiment_analyzer = load_sentiment_model()
77
+ keyword_extractor = load_keyword_model()
78
+ pos_tagger = load_pos_model()
79
+ emotion_detector = load_emotion_model()
80
+ ner = load_ner_model()
81
+ objectdetector1 = load_objectdetector_model()
82
+ ocrdetector1 = load_ocrdetector_model()
83
+
84
+ def rectangle(image, result):
85
+ draw = ImageDraw.Draw(image)
86
+ for res in result:
87
+ top_left = tuple(res[0][0]) # top left coordinates as tuple
88
+ bottom_right = tuple(res[0][2]) # bottom right coordinates as tuple
89
+ draw.rectangle((top_left, bottom_right), outline="blue", width=2)
90
+ st.image(image)
91
+
92
+ example_text = "My name is Daniel: The attention to detail, swift resolution, and accuracy demonstrated by ITACA Insurance Company in Spain in handling my claim were truly impressive. This undoubtedly reflects their commitment to being a customer-centric insurance provider."
93
+
94
+ with st.sidebar:
95
+ image = Image.open('./itaca_logo.png')
96
+ st.image(image,width=150) #use_column_width=True)
97
+ page = option_menu(menu_title='Menu',
98
+ menu_icon="robot",
99
+ options=["Sentiment Analysis",
100
+ "Keyword Extraction",
101
+ "Part of Speech Tagging",
102
+ "Emotion Detection",
103
+ "Named Entity Recognition",
104
+ "Speech & Text Emotion",
105
+ "Object Detector",
106
+ "OCR Detector"],
107
+ icons=["chat-dots",
108
+ "key",
109
+ "tag",
110
+ "emoji-heart-eyes",
111
+ "building",
112
+ "book",
113
+ "camera",
114
+ "list-task"],
115
+ default_index=0
116
+ )
117
+
118
+ st.title('ITACA Insurance Core AI Module')
119
+
120
+ # Replace '20px' with your desired font size
121
+ font_size = '20px'
122
+
123
+ if page == "Sentiment Analysis":
124
+ st.header('Sentiment Analysis')
125
+ # st.markdown("![Alt Text](https://media.giphy.com/media/XIqCQx02E1U9W/giphy.gif)")
126
+ st.write(
127
+ """
128
+ """
129
+ )
130
+
131
+ text = st.text_area("Paste text here", value=example_text)
132
+
133
+ if st.button('πŸ”₯ Run!'):
134
+ with st.spinner("Loading..."):
135
+ preds, html = sentiment_analyzer.run(text)
136
+ st.success('All done!')
137
+ st.write("")
138
+ st.subheader("Sentiment Predictions")
139
+ st.bar_chart(data=preds, width=0, height=0, use_container_width=True)
140
+ st.write("")
141
+ st.subheader("Sentiment Justification")
142
+ raw_html = html._repr_html_()
143
+ st.components.v1.html(raw_html, height=500)
144
+
145
+ elif page == "Keyword Extraction":
146
+ st.header('Keyword Extraction')
147
+ # st.markdown("![Alt Text](https://media.giphy.com/media/xT9C25UNTwfZuk85WP/giphy-downsized-large.gif)")
148
+ st.write(
149
+ """
150
+ """
151
+ )
152
+
153
+ text = st.text_area("Paste text here", value=example_text)
154
+
155
+ max_keywords = st.slider('# of Keywords Max Limit', min_value=1, max_value=10, value=5, step=1)
156
+
157
+ if st.button('πŸ”₯ Run!'):
158
+ with st.spinner("Loading..."):
159
+ annotation, keywords = keyword_extractor.generate(text, max_keywords)
160
+ st.success('All done!')
161
+
162
+ if annotation:
163
+ st.subheader("Keyword Annotation")
164
+ st.write("")
165
+ annotated_text(*annotation)
166
+ st.text("")
167
+
168
+ st.subheader("Extracted Keywords")
169
+ st.write("")
170
+ df = pd.DataFrame(keywords, columns=['Extracted Keywords'])
171
+ csv = df.to_csv(index=False).encode('utf-8')
172
+ st.download_button('Download Keywords to CSV', csv, file_name='news_intelligence_keywords.csv')
173
+
174
+ data_table = st.table(df)
175
+
176
+ elif page == "Part of Speech Tagging":
177
+ st.header('Part of Speech Tagging')
178
+ # st.markdown("![Alt Text](https://media.giphy.com/media/WoWm8YzFQJg5i/giphy.gif)")
179
+ st.write(
180
+ """
181
+ """
182
+ )
183
+
184
+ text = st.text_area("Paste text here", value=example_text)
185
+
186
+ if st.button('πŸ”₯ Run!'):
187
+ with st.spinner("Loading..."):
188
+ preds = pos_tagger.classify(text)
189
+ st.success('All done!')
190
+ st.write("")
191
+ st.subheader("Part of Speech tags")
192
+ annotated_text(*preds)
193
+ st.write("")
194
+ st.components.v1.iframe('https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html', height=1000)
195
+
196
+ elif page == "Emotion Detection":
197
+ st.header('Emotion Detection')
198
+ # st.markdown("![Alt Text](https://media.giphy.com/media/fU8X6ozSszyEw/giphy.gif)")
199
+ st.write(
200
+ """
201
+ """
202
+ )
203
+
204
+ text = st.text_area("Paste text here", value=example_text)
205
+
206
+ if st.button('πŸ”₯ Run!'):
207
+ with st.spinner("Loading..."):
208
+ preds, html = emotion_detector.run(text)
209
+ st.success('All done!')
210
+ st.write("")
211
+ st.subheader("Emotion Predictions")
212
+ st.bar_chart(data=preds, width=0, height=0, use_container_width=True)
213
+ raw_html = html._repr_html_()
214
+ st.write("")
215
+ st.subheader("Emotion Justification")
216
+ st.components.v1.html(raw_html, height=500)
217
+
218
+ elif page == "Named Entity Recognition":
219
+ st.header('Named Entity Recognition')
220
+ # st.markdown("![Alt Text](https://media.giphy.com/media/lxO8wdWdu4tig/giphy.gif)")
221
+ st.write(
222
+ """
223
+ """
224
+ )
225
+
226
+ text = st.text_area("Paste text here", value=example_text)
227
+
228
+ if st.button('πŸ”₯ Run!'):
229
+ with st.spinner("Loading..."):
230
+ preds, ner_annotation = ner.classify(text)
231
+ st.success('All done!')
232
+ st.write("")
233
+ st.subheader("NER Predictions")
234
+ annotated_text(*ner_annotation)
235
+ st.write("")
236
+ st.subheader("NER Prediction Metadata")
237
+ st.write(preds)
238
+
239
+ elif page == "Object Detector":
240
+ st.header('Object Detector')
241
+ st.write(
242
+ """
243
+ """
244
+ )
245
+
246
+ img_file_buffer = st.file_uploader("Load an image", type=["png", "jpg", "jpeg"])
247
+ if img_file_buffer is not None:
248
+ image = np.array(Image.open(img_file_buffer))
249
+
250
+ if st.button('πŸ”₯ Run!'):
251
+ with st.spinner("Loading..."):
252
+ img, primero = objectdetector1.run_detector(image)
253
+ st.success('The first image detected is: ' + primero)
254
+ st.image(img, caption="Imagen", use_column_width=True)
255
+
256
+ elif page == "OCR Detector":
257
+ st.header('OCR Detector')
258
+ st.write(
259
+ """
260
+ """
261
+ )
262
+
263
+ file = st.file_uploader("Load an image", type=["png", "jpg", "jpeg"])
264
+
265
+ #read the csv file and display the dataframe
266
+ if file is not None:
267
+ image = Image.open(file) # read image with PIL library
268
+
269
+ if st.button('πŸ”₯ Run!'):
270
+ with st.spinner("Loading..."):
271
+ result = ocrdetector1.reader.readtext(np.array(image)) # turn image to numpy array
272
+
273
+ # collect the results in dictionary:
274
+ textdic_easyocr = {}
275
+ for idx in range(len(result)):
276
+ pred_coor = result[idx][0]
277
+ pred_text = result[idx][1]
278
+ pred_confidence = result[idx][2]
279
+ textdic_easyocr[pred_text] = {}
280
+ textdic_easyocr[pred_text]['pred_confidence'] = pred_confidence
281
+
282
+ # get boxes on the image
283
+ rectangle(image, result)
284
+
285
+ # create a dataframe which shows the predicted text and prediction confidence
286
+ df = pd.DataFrame.from_dict(textdic_easyocr).T
287
+ st.table(df)
288
+ elif page == "Speech & Text Emotion":
289
+ st.header('Speech & Text Emotion')
290
+ st.write(
291
+ """
292
+ """
293
+ )
294
+ uploaded_file = st.file_uploader("Choose an audio file", type=["mp3", "wav", "ogg"])
295
+
296
+ if uploaded_file is not None:
297
+ st.audio(uploaded_file, format='audio/' + uploaded_file.type.split('/')[1])
298
+ st.write("Audio file uploaded and playing.")
299
+
300
+ else:
301
+ st.write("Please upload an audio file.")
302
+
303
+ if st.button("Analysis"):
304
+ with st.spinner("Loading..."):
305
+ st.header('Results of the Audio & Text analysis:')
306
+ samples, sample_rate = librosa.load(uploaded_file, sr=16000)
307
+ p_voice2text = infere_voice2text (samples)
308
+ p_speechemotion = infere_speech_emotion(samples)
309
+ p_textemotion = infere_text_emotion(p_voice2text)
310
+ st.subheader("Text from the Audio:")
311
+ st.write(p_voice2text)
312
+ st.write("---")
313
+ st.subheader("Speech emotion:")
314
+ st.write(p_speechemotion)
315
+ st.write("---")
316
+ st.subheader("Text emotion:")
317
+ st.write(p_textemotion)
318
+ st.write("---")
319
+
320
+