Spaces:
Runtime error
Runtime error
File size: 2,814 Bytes
1969750 112eee5 1969750 3b59724 112eee5 1969750 112eee5 1969750 112eee5 1969750 3b59724 389a362 ebb95ee 112eee5 1969750 112eee5 1969750 3b59724 112eee5 2b129b9 112eee5 28b7aef 112eee5 1969750 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
# from langchain.llms import OpenAI
# from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT,QA_PROMPT
# _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
# standalone question.
#
#
# Chat History:
# {chat_history}
# Follow Up Input: {question}
# Standalone question:"""
# CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
#
# template = """You are given an context and a question find an answer from the given
# context and provide an answer. If the answer is not in the context then
# simply say "No information found". Don't try to make an answer. And also don't use "According to the information
# provided in the given links," Question: {question} ========= {context} ========= Answer in Markdown: """
# QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """Give an answer to the question based on the context below and try to explain in detail and use
bullet points if answer is long.
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
)
def get_chain(vectorstore):
from langchain.chains.llm import LLMChain
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains.question_answering import load_qa_chain
llm = ChatOpenAI(temperature=0)
streaming_llm = ChatOpenAI(streaming=False, temperature=0, max_tokens=300, top_p=1)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT)
# qa_chain = ConversationalRetrievalChain.from_llm(
# llm,
# vectorstore.as_retriever(search_kwargs={"k": 8, "include_metadata": True}),
# condense_question_prompt=CONDENSE_QUESTION_PROMPT,
# qa_prompt=QA_PROMPT
# )
qa_chain = ConversationalRetrievalChain(return_source_documents=True,
retriever=vectorstore.as_retriever(
search_kwargs={"k": 14, "include_metadata": True}),
combine_docs_chain=doc_chain, question_generator=question_generator)
return qa_chain
|