HemanthSai7 commited on
Commit
c035779
1 Parent(s): c682e16

Backend changes for conversational qa

Browse files
StudybotAPI/backend/utils/chain_loader.py CHANGED
@@ -10,6 +10,7 @@ from langchain.chains import (
10
  )
11
  from langchain.llms import Clarifai
12
  from langchain.prompts import PromptTemplate
 
13
 
14
 
15
  async def llm_chain_loader(DATA_PATH: str):
@@ -19,7 +20,9 @@ async def llm_chain_loader(DATA_PATH: str):
19
  with open("backend/utils/prompt.txt", "r", encoding="utf8") as f:
20
  prompt = f.read()
21
 
22
- prompt = PromptTemplate(template=prompt, input_variables=["context", "question"])
 
 
23
 
24
  llm = Clarifai(
25
  pat=config.CLARIFAI_PAT,
@@ -29,12 +32,23 @@ async def llm_chain_loader(DATA_PATH: str):
29
  model_version_id=config.MODEL_VERSION_ID,
30
  )
31
 
32
- qa_chain = RetrievalQA.from_chain_type(
 
 
 
 
 
 
 
 
 
33
  llm=llm,
34
  chain_type="stuff",
35
- retriever=db.as_retriever(search_type="similarity",search_kwargs={"k": 2}),
36
- return_source_documents=True,
37
- chain_type_kwargs={"prompt": prompt},
 
 
38
  )
39
 
40
  app.state.qa_chain = qa_chain
 
10
  )
11
  from langchain.llms import Clarifai
12
  from langchain.prompts import PromptTemplate
13
+ from langchain.memory import ConversationBufferMemory
14
 
15
 
16
  async def llm_chain_loader(DATA_PATH: str):
 
20
  with open("backend/utils/prompt.txt", "r", encoding="utf8") as f:
21
  prompt = f.read()
22
 
23
+ prompt = PromptTemplate(
24
+ template=prompt, input_variables=["context", "chat_history", "question"]
25
+ )
26
 
27
  llm = Clarifai(
28
  pat=config.CLARIFAI_PAT,
 
32
  model_version_id=config.MODEL_VERSION_ID,
33
  )
34
 
35
+ # qa_chain = RetrievalQA.from_chain_type(
36
+ # llm=llm,
37
+ # chain_type="stuff",
38
+ # retriever=db.as_retriever(search_type="similarity",search_kwargs={"k": 2}),
39
+ # return_source_documents=True,
40
+ # chain_type_kwargs={"prompt": prompt},
41
+ # )
42
+
43
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
44
+ qa_chain = ConversationalRetrievalChain.from_llm(
45
  llm=llm,
46
  chain_type="stuff",
47
+ retriever=db.as_retriever(search_type="similarity", search_kwargs={"k": 2}),
48
+ # return_source_documents=True,
49
+ # chain_type_kwargs={"prompt": prompt},
50
+ condense_question_prompt=prompt,
51
+ memory=memory,
52
  )
53
 
54
  app.state.qa_chain = qa_chain
StudybotAPI/backend/utils/prompt.txt CHANGED
@@ -9,4 +9,10 @@ The "SOURCES" part should be a reference to the source of the document from whic
9
  Consider a student engaged in the study of any theoretical subject, where the abundance of concepts and events poses a challenge to memorization. The aim is to overcome this hurdle and be capable of providing brief answers to specific queries. For example, if a student forgets a key concept, date, or event, they can ask the bot a question like "What is [specific query]?" for a concise answer.
10
  Note that students can also ask multiple questions in a single query. For example, "What is [specific query 1]?, What is [specific query 2]?, What is [specific query 3]?".
11
 
12
- {question} [/INST]
 
 
 
 
 
 
 
9
  Consider a student engaged in the study of any theoretical subject, where the abundance of concepts and events poses a challenge to memorization. The aim is to overcome this hurdle and be capable of providing brief answers to specific queries. For example, if a student forgets a key concept, date, or event, they can ask the bot a question like "What is [specific query]?" for a concise answer.
10
  Note that students can also ask multiple questions in a single query. For example, "What is [specific query 1]?, What is [specific query 2]?, What is [specific query 3]?".
11
 
12
+ Chat History:
13
+ {chat_history}
14
+
15
+ Follow Up Input: {question}
16
+ Standalone question:
17
+
18
+ [/INST]