sameemul-haque commited on
Commit
e74be72
1 Parent(s): f151d34

feat: add prompt template to answer only based on the context

Browse files
Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -2,6 +2,7 @@ import pymongo
2
  import os, textwrap
3
  from dotenv import load_dotenv
4
  from langchain.chains import RetrievalQA
 
5
  from langchain_community.vectorstores import FAISS
6
  from langchain_community.llms import HuggingFaceHub
7
  from langchain_community.document_loaders import PyPDFLoader
@@ -68,9 +69,20 @@ def main():
68
 
69
  # Initialize the model
70
  llm=HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.1", model_kwargs={"temperature":0.1 ,"max_length":512})
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  # create the chain to answer questions
73
- qa_chain_instrucEmbed = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True)
74
 
75
  def wrap_text_preserve_newlines(text, width=110):
76
  # Split the input text into lines based on newline characters
@@ -83,11 +95,11 @@ def main():
83
 
84
  llm_response = qa_chain_instrucEmbed(query)
85
  res = wrap_text_preserve_newlines(llm_response['result'])
86
- # print(res)
87
 
88
- index_helpful_answer = res.find("Helpful Answer:")
89
  if index_helpful_answer != -1:
90
- helpful_answer_text = res[index_helpful_answer + len("Helpful Answer:"):]
91
  return(helpful_answer_text.strip())
92
  else:
93
  return("Error")
 
2
  import os, textwrap
3
  from dotenv import load_dotenv
4
  from langchain.chains import RetrievalQA
5
+ from langchain.prompts import PromptTemplate
6
  from langchain_community.vectorstores import FAISS
7
  from langchain_community.llms import HuggingFaceHub
8
  from langchain_community.document_loaders import PyPDFLoader
 
69
 
70
  # Initialize the model
71
  llm=HuggingFaceHub(repo_id="mistralai/Mistral-7B-Instruct-v0.1", model_kwargs={"temperature":0.1 ,"max_length":512})
72
+
73
+ # prompt template
74
+ prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
75
+
76
+ {context}
77
+
78
+ Question: {question}
79
+ """
80
+ PROMPT = PromptTemplate(
81
+ template=prompt_template, input_variables=["context", "question"]
82
+ )
83
 
84
  # create the chain to answer questions
85
+ qa_chain_instrucEmbed = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True, chain_type_kwargs={"prompt": PROMPT})
86
 
87
  def wrap_text_preserve_newlines(text, width=110):
88
  # Split the input text into lines based on newline characters
 
95
 
96
  llm_response = qa_chain_instrucEmbed(query)
97
  res = wrap_text_preserve_newlines(llm_response['result'])
98
+ print(res)
99
 
100
+ index_helpful_answer = res.find("Answer:")
101
  if index_helpful_answer != -1:
102
+ helpful_answer_text = res[index_helpful_answer + len("Answer:"):]
103
  return(helpful_answer_text.strip())
104
  else:
105
  return("Error")