AlbertoFH98 commited on
Commit
ff71933
1 Parent(s): 71ebe48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -18
app.py CHANGED
@@ -191,26 +191,27 @@ RESPUESTA: """
191
  with st.chat_message("assistant"):
192
  if 'GPT' not in genre:
193
  llm_response = qa_chain(prompt)
194
- llm_response = utils.process_llm_response(llm_response, nlp)
195
- st.markdown(llm_response)
196
- start_time_str_list = []; start_time_seconds_list = []; end_time_seconds_list = []
197
- for response in llm_response.split('\n'):
198
- if re.search(r'(\d{2}:\d{2}:\d{2}(.\d{6})?)', response) != None:
199
- start_time_str, start_time_seconds, _, end_time_seconds = utils.add_hyperlink_and_convert_to_seconds(response, cleaned_prompt)
200
- start_time_str_list.append(start_time_str)
201
- start_time_seconds_list.append(start_time_seconds)
202
- end_time_seconds_list.append(end_time_seconds)
203
-
204
- if start_time_str_list:
205
- for start_time_seconds, start_time_str, end_time_seconds in zip(start_time_seconds_list, start_time_str_list, end_time_seconds_list):
206
- st.markdown("__Fragmento: " + start_time_str + "__")
207
- _, container, _ = st.columns([SIDE, WIDTH, SIDE])
208
- with container:
209
- st_player(youtube_video_url.replace("?enablejsapi=1", "") + f'?start={start_time_seconds}&end={end_time_seconds}')
210
  else:
211
  llm_response = utils.get_gpt_response(TRANSCRIPTION_PATH, prompt)
212
- st.markdown(llm_response)
213
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  st.session_state.messages.append({"role": "assistant", "content": llm_response})
215
  # -- Sample: streamlit run app.py -- --DEFAULT_SYSTEM_PROMPT_LINK=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/prompts/default_system_prompt.txt --PODCAST_URL_VIDEO_PATH=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/data/podcast_youtube_video.csv --TRANSCRIPTION=worldcast_roberto_vaquero --MODEL=togethercomputer/llama-2-7b-chat --EMB_MODEL=BAAI/bge-base-en-v1.5
216
  if __name__ == '__main__':
 
191
  with st.chat_message("assistant"):
192
  if 'GPT' not in genre:
193
  llm_response = qa_chain(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  else:
195
  llm_response = utils.get_gpt_response(TRANSCRIPTION_PATH, prompt)
196
+
197
+ llm_response = utils.process_llm_response(llm_response, nlp)
198
+ st.markdown(llm_response)
199
+
200
+ start_time_str_list = []; start_time_seconds_list = []; end_time_seconds_list = []
201
+ for response in llm_response.split('\n'):
202
+ if re.search(r'(\d{2}:\d{2}:\d{2}(.\d{6})?)', response) != None:
203
+ start_time_str, start_time_seconds, _, end_time_seconds = utils.add_hyperlink_and_convert_to_seconds(response, cleaned_prompt)
204
+ start_time_str_list.append(start_time_str)
205
+ start_time_seconds_list.append(start_time_seconds)
206
+ end_time_seconds_list.append(end_time_seconds)
207
+
208
+ if start_time_str_list:
209
+ for start_time_seconds, start_time_str, end_time_seconds in zip(start_time_seconds_list, start_time_str_list, end_time_seconds_list):
210
+ st.markdown("__Fragmento: " + start_time_str + "__")
211
+ _, container, _ = st.columns([SIDE, WIDTH, SIDE])
212
+ with container:
213
+ st_player(youtube_video_url.replace("?enablejsapi=1", "") + f'?start={start_time_seconds}&end={end_time_seconds}')
214
+
215
  st.session_state.messages.append({"role": "assistant", "content": llm_response})
216
  # -- Sample: streamlit run app.py -- --DEFAULT_SYSTEM_PROMPT_LINK=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/prompts/default_system_prompt.txt --PODCAST_URL_VIDEO_PATH=https://raw.githubusercontent.com/AlbertoUAH/Castena/main/data/podcast_youtube_video.csv --TRANSCRIPTION=worldcast_roberto_vaquero --MODEL=togethercomputer/llama-2-7b-chat --EMB_MODEL=BAAI/bge-base-en-v1.5
217
  if __name__ == '__main__':