fracapuano commited on
Commit
07b1b19
1 Parent(s): cda0f94

add: spinner for querying document, increment in number of sources referenced

Browse files
Files changed (1) hide show
  1. qa/qa.py +13 -11
qa/qa.py CHANGED
@@ -5,10 +5,10 @@ from typing import Text, Union
5
 
6
  multiple_files = True
7
 
8
- def query_pipeline(index:VectorStore, query:Text, stream_answer:bool=False)->Text:
9
  """This function reproduces the querying pipeline considering a given input index."""
10
  # retrieving the most relevant pieces of information within the knowledge base
11
- sources = search_docs(index, query=query)
12
  # getting the answer, all at once
13
  answer = get_answer(sources, query=query, stream_answer=stream_answer)["output_text"]
14
 
@@ -94,7 +94,8 @@ def parse_file(file:Union[PDFFile, DocxFile, TxtFile, CodeFile]) -> None:
94
  # def document_embedding_pipeline(file:Union[PDFFile, DocxFile, TxtFile, CodeFile]) -> None:
95
 
96
  def qa_main():
97
- """Main function for the QA app."""
 
98
  st.write("Just upload something using and start chatting with a version of GPT4 that has read the file!")
99
 
100
  # OpenAI API Key - TODO: consider adding a key valid for everyone
@@ -186,14 +187,15 @@ def qa_main():
186
  full_response += \
187
  f"<i>Asking</i> <b>{chat_document}</b> <i>question</i> <b>{prompt}</b></i><br>"
188
  message_placeholder.markdown(full_response, unsafe_allow_html=True)
189
- # retrieving the vector store associated to the chat document considered
190
- chat_index = indexes[chat_document]
191
- # producing the answer considered, live
192
- for answer_bit in query_pipeline(chat_index, prompt, stream_answer=True):
193
- full_response += answer_bit
194
- message_placeholder.markdown(full_response + "▌", unsafe_allow_html=True)
195
- # appending a final entering
196
- full_response += "<br>"
 
197
  message_placeholder.markdown(full_response, unsafe_allow_html=True)
198
 
199
  # appending the final response obtained after having asked all the documents
 
5
 
6
  multiple_files = True
7
 
8
+ def query_pipeline(index:VectorStore, query:Text, stream_answer:bool=False, n_sources:int=5)->Text:
9
  """This function reproduces the querying pipeline considering a given input index."""
10
  # retrieving the most relevant pieces of information within the knowledge base
11
+ sources = search_docs(index, query=query, k=n_sources)
12
  # getting the answer, all at once
13
  answer = get_answer(sources, query=query, stream_answer=stream_answer)["output_text"]
14
 
 
94
  # def document_embedding_pipeline(file:Union[PDFFile, DocxFile, TxtFile, CodeFile]) -> None:
95
 
96
  def qa_main():
97
+ """Main function for the QA app."""
98
+ st.title("Chat with a file 💬📖")
99
  st.write("Just upload something using and start chatting with a version of GPT4 that has read the file!")
100
 
101
  # OpenAI API Key - TODO: consider adding a key valid for everyone
 
187
  full_response += \
188
  f"<i>Asking</i> <b>{chat_document}</b> <i>question</i> <b>{prompt}</b></i><br>"
189
  message_placeholder.markdown(full_response, unsafe_allow_html=True)
190
+ with st.spinner("Querying the document..."):
191
+ # retrieving the vector store associated to the chat document considered
192
+ chat_index = indexes[chat_document]
193
+ # producing the answer considered, live
194
+ for answer_bit in query_pipeline(chat_index, prompt, stream_answer=True, n_sources=20):
195
+ full_response += answer_bit
196
+ message_placeholder.markdown(full_response + "▌", unsafe_allow_html=True)
197
+ # appending a final entering
198
+ full_response += "<br>"
199
  message_placeholder.markdown(full_response, unsafe_allow_html=True)
200
 
201
  # appending the final response obtained after having asked all the documents