from langchain.prompts import ChatPromptTemplate from operator import itemgetter from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate from langchain_openai import ChatOpenAI from langchain_core.runnables import RunnableLambda from helper import get_llm, get_retriever def get_answer_using_hyde(link: str, question:str): # HyDE document genration template = """Please write a scientific paper passage to answer the question Question: {question} Passage:""" prompt_hyde = ChatPromptTemplate.from_template(template) retrievar = get_retriever(link) generate_docs_for_retrieval = ( prompt_hyde | ChatOpenAI(temperature=0) | StrOutputParser() ) # Run # question = "What is task decomposition for LLM agents?" generate_docs_for_retrieval.invoke({"question":question}) retrieval_chain = generate_docs_for_retrieval | retrievar retireved_docs = retrieval_chain.invoke({"question":question}) # RAG template = """Answer the following question based on this context: {context} Question: {question} """ llm = get_llm() prompt = ChatPromptTemplate.from_template(template) final_rag_chain = ( prompt | llm | StrOutputParser() ) response = final_rag_chain.invoke({"context":retireved_docs,"question":question}) return response # if __name__ == "__main__": # link = "https://lilianweng.github.io/posts/2023-06-23-agent/" # question = "What is task decomposition for LLM agents?" # answer = get_answer(link, question) # print(answer)