Update app.py
Browse files
app.py
CHANGED
@@ -24,8 +24,7 @@ from PIL import Image
|
|
24 |
from langchain.vectorstores import FAISS
|
25 |
import transformers
|
26 |
from pydub import AudioSegment
|
27 |
-
|
28 |
-
from streamlit_card import card
|
29 |
|
30 |
user_session_id = uuid.uuid4()
|
31 |
|
@@ -149,10 +148,6 @@ if prompt := st.chat_input("How can I help you today?"):
|
|
149 |
message_history = "\n".join(list(get_message_history())[-3:])
|
150 |
result = qa_chain(prompt)
|
151 |
output = [result['result']]
|
152 |
-
for item in output:
|
153 |
-
full_response += item
|
154 |
-
message_placeholder.markdown(full_response + "▌")
|
155 |
-
message_placeholder.markdown(full_response)
|
156 |
|
157 |
def generate_pdf():
|
158 |
page_number = int(result['source_documents'][0].metadata['page'])
|
@@ -165,7 +160,7 @@ if prompt := st.chat_input("How can I help you today?"):
|
|
165 |
highlight = page.add_highlight_annot(inst)
|
166 |
highlight.update()
|
167 |
doc.save("/home/user/app/pdf2image/output.pdf", garbage=4, deflate=True, clean=True)
|
168 |
-
|
169 |
def pdf_page_to_image(pdf_file, page_number, output_image):
|
170 |
pdf_document = fitz.open(pdf_file)
|
171 |
page = pdf_document[page_number]
|
@@ -174,53 +169,38 @@ if prompt := st.chat_input("How can I help you today?"):
|
|
174 |
pix.save(output_image, "png")
|
175 |
pdf_document.close()
|
176 |
pdf_page_to_image('/home/user/app/pdf2image/output.pdf', page_number, '/home/user/app/pdf2image/output.png')
|
|
|
|
|
177 |
#image = Image.open('/home/user/app/pdf2image/output.png')
|
178 |
-
st.image('/home/user/app/pdf2image/output.png')
|
179 |
#message_placeholder.image(image)
|
180 |
#st.session_state.reference = True
|
181 |
|
182 |
|
|
|
183 |
def generate_audio():
|
184 |
with open('/home/user/app/audio/audio.mp3','wb') as sound_file:
|
185 |
tts = gTTS(result['result'], lang='en', tld='co.in')
|
186 |
tts.write_to_fp(sound_file)
|
187 |
sound = AudioSegment.from_mp3("/home/user/app/audio/audio.mp3")
|
188 |
sound.export("/home/user/app/audio/audio.wav", format="wav")
|
189 |
-
|
190 |
-
|
191 |
-
card(title="Reference",
|
192 |
-
text='Ref',
|
193 |
-
url=None,
|
194 |
-
on_click=generate_pdf)
|
195 |
-
|
196 |
-
card(title='Audio',
|
197 |
-
text='Audioo',
|
198 |
-
url = None,
|
199 |
-
on_click = generate_audio)
|
200 |
-
|
201 |
-
# if "reference" not in st.session_state:
|
202 |
-
# st.session_state.reference = '/home/user/app/pdf2image/default_output.png'
|
203 |
-
# if "audio" not in st.session_state:
|
204 |
-
# st.session_state.audio = "/home/user/app/audio/beep-06.mp3"
|
205 |
-
|
206 |
-
|
207 |
-
# with st.sidebar:
|
208 |
-
# choice = st.radio("References and TTS",["Reference" , 'TTS'], index=None,)
|
209 |
-
|
210 |
-
# if choice == 'Reference':
|
211 |
-
# generate_pdf()
|
212 |
-
# st.session_state['reference'] = '/home/user/app/pdf2image/output.png'
|
213 |
-
# st.image(st.session_state['reference'])
|
214 |
-
|
215 |
-
# if choice == 'TTS':
|
216 |
-
# with open('/home/user/app/audio/audio.mp3','wb') as sound_file:
|
217 |
-
# tts = gTTS(result['result'], lang='en', tld = 'co.in')
|
218 |
-
# tts.write_to_fp(sound_file)
|
219 |
-
# sound = AudioSegment.from_mp3("/home/user/app/audio/audio.mp3")
|
220 |
-
# sound.export("/home/user/app/audio/audio.wav", format="wav")
|
221 |
-
# st.session_state['audio'] = '/home/user/app/audio/audio.wav'
|
222 |
-
# st.audio(st.session_state['audio'])
|
223 |
-
|
224 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
225 |
-
|
226 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
from langchain.vectorstores import FAISS
|
25 |
import transformers
|
26 |
from pydub import AudioSegment
|
27 |
+
from streamlit_extras.stateful_button import button
|
|
|
28 |
|
29 |
user_session_id = uuid.uuid4()
|
30 |
|
|
|
148 |
message_history = "\n".join(list(get_message_history())[-3:])
|
149 |
result = qa_chain(prompt)
|
150 |
output = [result['result']]
|
|
|
|
|
|
|
|
|
151 |
|
152 |
def generate_pdf():
|
153 |
page_number = int(result['source_documents'][0].metadata['page'])
|
|
|
160 |
highlight = page.add_highlight_annot(inst)
|
161 |
highlight.update()
|
162 |
doc.save("/home/user/app/pdf2image/output.pdf", garbage=4, deflate=True, clean=True)
|
163 |
+
|
164 |
def pdf_page_to_image(pdf_file, page_number, output_image):
|
165 |
pdf_document = fitz.open(pdf_file)
|
166 |
page = pdf_document[page_number]
|
|
|
169 |
pix.save(output_image, "png")
|
170 |
pdf_document.close()
|
171 |
pdf_page_to_image('/home/user/app/pdf2image/output.pdf', page_number, '/home/user/app/pdf2image/output.png')
|
172 |
+
pdfPath = '/home/user/app/pdf2image/output.png'
|
173 |
+
return pdfPath
|
174 |
#image = Image.open('/home/user/app/pdf2image/output.png')
|
|
|
175 |
#message_placeholder.image(image)
|
176 |
#st.session_state.reference = True
|
177 |
|
178 |
|
179 |
+
|
180 |
def generate_audio():
|
181 |
with open('/home/user/app/audio/audio.mp3','wb') as sound_file:
|
182 |
tts = gTTS(result['result'], lang='en', tld='co.in')
|
183 |
tts.write_to_fp(sound_file)
|
184 |
sound = AudioSegment.from_mp3("/home/user/app/audio/audio.mp3")
|
185 |
sound.export("/home/user/app/audio/audio.wav", format="wav")
|
186 |
+
audioPath = "/home/user/app/audio/audio.wav"
|
187 |
+
return audioPath
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
+
for item in output:
|
190 |
+
full_response += item
|
191 |
+
message_placeholder.markdown(full_response + "▌")
|
192 |
+
message_placeholder.markdown(full_response)
|
193 |
+
|
194 |
+
if button('Audio', key='audio'):
|
195 |
+
ap = generate_audio()
|
196 |
+
st.audio(ap)
|
197 |
+
|
198 |
+
if button('Ref', key='ref'):
|
199 |
+
pp = generate_pdf()
|
200 |
+
st.image(pp)
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
205 |
+
|
206 |
+
|