Samarth991 commited on
Commit
3b8e35c
1 Parent(s): f9e518b

Adding sources

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -64,6 +64,7 @@ def document_loader(file_path,api_key,doc_type='pdf',llm='Huggingface',temperatu
64
  embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-base',model_kwargs={"device": DEVICE})
65
 
66
  texts = process_documents(documents=document)
 
67
  vector_db = FAISS.from_documents(documents=texts, embedding= embedding_model)
68
  global qa
69
  qa = RetrievalQA.from_chain_type(llm=chat_application(llm_service=llm,key=api_key,
@@ -114,6 +115,10 @@ def infer(question, history):
114
  # chat_history = res
115
  print("Question in infer :",question)
116
  result = qa({"query": question})
 
 
 
 
117
  return result["result"]
118
 
119
  def bot(history):
@@ -127,7 +132,6 @@ def bot(history):
127
  yield history
128
 
129
  def add_text(history, text):
130
-
131
  history = history + [(text, None)]
132
  return history, ""
133
 
@@ -152,8 +156,8 @@ with gr.Blocks(css=css) as demo:
152
 
153
  with gr.Group():
154
  chatbot = gr.Chatbot(height=300)
155
- # with gr.Row():
156
- # sources = gr.HTML(value = "Source paragraphs where I looked for answers will appear here", height=300)
157
  with gr.Row():
158
  question = gr.Textbox(label="Type your question !",lines=1).style(full_width=True)
159
  submit_btn = gr.Button(value="Send message", variant="primary", scale = 1)
@@ -195,8 +199,9 @@ with gr.Blocks(css=css) as demo:
195
  load_pdf.click(loading_file, None, langchain_status, queue=False)
196
  load_pdf.click(document_loader, inputs=[pdf_doc,API_key,file_extension,LLM_option,temperature,max_new_tokens], outputs=[langchain_status], queue=False)
197
 
198
- question.submit(add_text, [chatbot, question], [chatbot, question]).then(bot, chatbot, chatbot)
199
- submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(bot, chatbot, chatbot)
 
200
  clean_chat_btn.click(clear_chat, [], chatbot)
201
 
202
 
 
64
  embedding_model = SentenceTransformerEmbeddings(model_name='thenlper/gte-base',model_kwargs={"device": DEVICE})
65
 
66
  texts = process_documents(documents=document)
67
+ global vector_db
68
  vector_db = FAISS.from_documents(documents=texts, embedding= embedding_model)
69
  global qa
70
  qa = RetrievalQA.from_chain_type(llm=chat_application(llm_service=llm,key=api_key,
 
115
  # chat_history = res
116
  print("Question in infer :",question)
117
  result = qa({"query": question})
118
+ matching_docs_score = vector_db.similarity_search_with_score(question)
119
+
120
+ print(" Matching_doc ",matching_docs_score)
121
+
122
  return result["result"]
123
 
124
  def bot(history):
 
132
  yield history
133
 
134
  def add_text(history, text):
 
135
  history = history + [(text, None)]
136
  return history, ""
137
 
 
156
 
157
  with gr.Group():
158
  chatbot = gr.Chatbot(height=300)
159
+ with gr.Row():
160
+ sources = gr.HTML(value = "Source paragraphs where I looked for answers will appear here", height=300)
161
  with gr.Row():
162
  question = gr.Textbox(label="Type your question !",lines=1).style(full_width=True)
163
  submit_btn = gr.Button(value="Send message", variant="primary", scale = 1)
 
199
  load_pdf.click(loading_file, None, langchain_status, queue=False)
200
  load_pdf.click(document_loader, inputs=[pdf_doc,API_key,file_extension,LLM_option,temperature,max_new_tokens], outputs=[langchain_status], queue=False)
201
 
202
+ question.submit(add_text, inputs=[chatbot, question], outputs=[chatbot, question]).then(bot, chatbot, chatbot)
203
+ submit_btn.click(add_text, inputs=[chatbot, question], outputs=[chatbot, question]).then(bot, chatbot, chatbot)
204
+ # submit_btn.then(chatf.highlight_found_text, [chatbot, sources], [sources])
205
  clean_chat_btn.click(clear_chat, [], chatbot)
206
 
207