vishwask commited on
Commit
1939650
1 Parent(s): 88a84bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -11
app.py CHANGED
@@ -213,10 +213,10 @@ if prompt := st.chat_input("How can I help you today?"):
213
 
214
  output = [result['result']]
215
 
216
- for item in output:
217
- full_response += item
218
- message_placeholder.markdown(full_response + "▌")
219
- message_placeholder.markdown(full_response)
220
  #st.write(repr(result['source_documents'][0].metadata['page']))
221
  #st.write(repr(result['source_documents'][0]))
222
 
@@ -266,15 +266,17 @@ if prompt := st.chat_input("How can I help you today?"):
266
  pdf_page_to_image('/home/user/app/pdf2image/output.pdf', page_number, '/home/user/app/pdf2image/output.png')
267
 
268
  image = Image.open('/home/user/app/pdf2image/output.png')
269
- st.sidebar.image(image)
270
  st.session_state.image_displayed = True
 
271
 
272
  def generate_audio():
273
  sound_file = BytesIO()
274
  tts = gTTS(result['result'], lang='en')
275
  tts.write_to_fp(sound_file)
276
- st.sidebar.audio(sound_file)
277
  st.session_state.sound_played = True
 
278
 
279
 
280
  #st.button(':speaker:', type='primary',on_click=generate_audio)
@@ -322,11 +324,7 @@ if prompt := st.chat_input("How can I help you today?"):
322
  #st.button("Display Image", on_click=generate_pdf)
323
  #st.button("Play Sound", on_click=generate_audio)
324
 
325
- if st.sidebar.button("Display Image"):
326
- generate_pdf()
327
 
328
- if st.sidebar.button("Play Sound"):
329
- generate_audio()
330
 
331
  # # Check if the image has been displayed and display it if it has not
332
  # if not st.session_state.image_displayed:
@@ -335,7 +333,22 @@ if prompt := st.chat_input("How can I help you today?"):
335
  # # Check if the sound has been played and play it if it has not
336
  # if not st.session_state.sound_played:
337
  # generate_audio()
338
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  st.session_state.messages.append({"role": "assistant", "content": full_response})
340
 
341
 
 
213
 
214
  output = [result['result']]
215
 
216
+ # for item in output:
217
+ # full_response += item
218
+ # message_placeholder.markdown(full_response + "▌")
219
+ # message_placeholder.markdown(full_response)
220
  #st.write(repr(result['source_documents'][0].metadata['page']))
221
  #st.write(repr(result['source_documents'][0]))
222
 
 
266
  pdf_page_to_image('/home/user/app/pdf2image/output.pdf', page_number, '/home/user/app/pdf2image/output.png')
267
 
268
  image = Image.open('/home/user/app/pdf2image/output.png')
269
+ #st.sidebar.image(image)
270
  st.session_state.image_displayed = True
271
+ return image
272
 
273
  def generate_audio():
274
  sound_file = BytesIO()
275
  tts = gTTS(result['result'], lang='en')
276
  tts.write_to_fp(sound_file)
277
+ #st.sidebar.audio(sound_file)
278
  st.session_state.sound_played = True
279
+ return sound_file
280
 
281
 
282
  #st.button(':speaker:', type='primary',on_click=generate_audio)
 
324
  #st.button("Display Image", on_click=generate_pdf)
325
  #st.button("Play Sound", on_click=generate_audio)
326
 
 
 
327
 
 
 
328
 
329
  # # Check if the image has been displayed and display it if it has not
330
  # if not st.session_state.image_displayed:
 
333
  # # Check if the sound has been played and play it if it has not
334
  # if not st.session_state.sound_played:
335
  # generate_audio()
336
+
337
+
338
+ for item in output:
339
+ full_response += item
340
+ message_placeholder.markdown(full_response + "▌")
341
+ message_placeholder.markdown(full_response)
342
+
343
+ if st.sidebar.button("Display Image"):
344
+ a=generate_pdf()
345
+ message_placeholder.image(a)
346
+
347
+
348
+ if st.sidebar.button("Play Sound"):
349
+ x=generate_audio()
350
+ message_placeholder.audio(x)
351
+
352
  st.session_state.messages.append({"role": "assistant", "content": full_response})
353
 
354