Spaces:
Runtime error
Runtime error
from langchain.prompts.prompt import PromptTemplate | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.chat_models import ChatOpenAI | |
# from langchain.llms import OpenAI | |
# from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT,QA_PROMPT | |
# _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a | |
# standalone question. | |
# | |
# | |
# Chat History: | |
# {chat_history} | |
# Follow Up Input: {question} | |
# Standalone question:""" | |
# CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) | |
# | |
# template = """You are given an context and a question find an answer from the given | |
# context and provide an answer. If the answer is not in the context then | |
# simply say "No information found". Don't try to make an answer. And also don't use "According to the information | |
# provided in the given links," Question: {question} ========= {context} ========= Answer in Markdown: """ | |
# QA_PROMPT = PromptTemplate(template=template, input_variables=["question", "context"]) | |
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a | |
standalone question. | |
Chat History: | |
{chat_history} | |
Follow Up Input: {question} | |
Standalone question:""" | |
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template) | |
prompt_template = """Give an answer to the question based on the context below and try to explain in detail and use | |
bullet points if answer is long. | |
{context} | |
Question: {question} | |
Helpful Answer:""" | |
QA_PROMPT = PromptTemplate( | |
template=prompt_template, input_variables=["context", "question"] | |
) | |
def get_chain(vectorstore): | |
from langchain.chains.llm import LLMChain | |
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
from langchain.chains.question_answering import load_qa_chain | |
llm = ChatOpenAI(temperature=0) | |
streaming_llm = ChatOpenAI(streaming=False, temperature=0, max_tokens=300, top_p=1) | |
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT) | |
doc_chain = load_qa_chain(streaming_llm, chain_type="stuff", prompt=QA_PROMPT) | |
# qa_chain = ConversationalRetrievalChain.from_llm( | |
# llm, | |
# vectorstore.as_retriever(search_kwargs={"k": 8, "include_metadata": True}), | |
# condense_question_prompt=CONDENSE_QUESTION_PROMPT, | |
# qa_prompt=QA_PROMPT | |
# ) | |
qa_chain = ConversationalRetrievalChain(return_source_documents=True, | |
retriever=vectorstore.as_retriever( | |
search_kwargs={"k": 14, "include_metadata": True}), | |
combine_docs_chain=doc_chain, question_generator=question_generator) | |
return qa_chain | |