veda_bot_2.0 / crag.py
samlonka
Add new files for Veda Bot 2.0
9a19c9e
import os
import streamlit as st
from vector_tool import ensemble_retriever
from langgraph.prebuilt import ToolInvocation
from langchain_core.messages import ToolMessage
import json
# Set up the tools to execute them from the graph
from langgraph.prebuilt import ToolExecutor
# tools retrieval
from function_tools import tool_chain
from vector_tool import ensemble_retriever
os.environ['OPENAI_API_KEY'] = st.secrets["OPENAI_API_KEY"]
os.environ['TAVILY_API_KEY'] = st.secrets["TAVILY_API_KEY"]
### Retrieval Grader
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
#LLM models
llm_AI4 = ChatOpenAI(model="gpt-4-1106-preview", temperature=0)
# Data model
class GradeDocuments(BaseModel):
"""Binary score for relevance check on retrieved documents."""
binary_score: str = Field(description="Documents are relevant to the question, 'yes' or 'no'")
# LLM with function call
structured_llm_grader = llm_AI4.with_structured_output(GradeDocuments)
# Prompt
system = """You are a grader assessing relevance of a retrieved document to a user question. \n
If the document contains keyword(s) or semantic meaning related to the question, grade it as relevant. \n
Give a binary score 'yes' or 'no' score to indicate whether the document is relevant to the question."""
grade_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "Retrieved document: \n\n {document} \n\n User question: {question}"),
]
)
retrieval_grader = grade_prompt | structured_llm_grader
### Generate
from langchain import hub
from langchain.prompts import MessagesPlaceholder
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain.prompts import MessagesPlaceholder
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages
)
from langchain_core.messages import AIMessage, FunctionMessage, HumanMessage
from langchain_core.output_parsers import StrOutputParser
from typing import Any, List, Union
# Prompt
#prompt = hub.pull("rlm/rag-prompt")
system_message = '''You are an AI assistant for answering questions about vedas and scriptures.
\nYou are given the following extracted documents from Svarupa Knowledge Base (https://svarupa.org/) and other documents and a question.
Provide a conversational answer.\nIf you are not provided with any documents, say \"I did not get any relevant context for this but
I will reply to the best of my knowledge\" and then write your answer\nIf you don't know the answer, just say \"Hmm, I'm not sure. \" Don't try to make up an answer.
\nIf the question is not about vedas and scriptures, politely inform them that you are tuned to only answer questions about that.\n\n'''
'''
prompt = ChatPromptTemplate.from_messages(
[
("system",system_message),
# Please note the ordering of the fields in the prompt!
# The correct ordering is:
# 1. history - the past messages between the user and the agent
# 2. user - the user's current input
# 3. agent_scratchpad - the agent's working space for thinking and
# invoking tools to respond to the user's input.
# If you change the ordering, the agent will not work correctly since
# the messages will be shown to the underlying LLM in the wrong order.
MessagesPlaceholder(variable_name="context"),
("user", "{question}"),
]
)
'''
generate_prompt = ChatPromptTemplate.from_messages(
[
("system", system_message),
("human", "Here is the given context {context}, queation: {question} \n\n Formulate an answer."),
]
)
# LLM
llm_AI = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
# Post-processing
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
# Chain
rag_chain = generate_prompt | llm_AI4 | StrOutputParser() #OpenAIToolsAgentOutputParser()
####-----------------TESTING
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant. Answer all questions to the best of your ability.",
),
MessagesPlaceholder(variable_name="chat_history"),
("human", "{question}"),
]
)
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain.memory import ChatMessageHistory
chat_history_for_chain = ChatMessageHistory()
chain_with_message_history = RunnableWithMessageHistory(
rag_chain,
lambda session_id: chat_history_for_chain,
input_messages_key="question",
history_messages_key="chat_history",
)
### Question Re-writer
# LLM
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
# Prompt
system = """You a question re-writer that converts an input question to a better version that is optimized \n
for a search. Look at the input and try to reason about the underlying sematic intent / meaning."""
re_write_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "Here is the initial question: \n\n {question} \n Formulate an improved question."),
]
)
question_rewriter = re_write_prompt | llm | StrOutputParser()
### Search
from langchain_community.tools.tavily_search import TavilySearchResults
web_search_tool = TavilySearchResults(k=2)
from typing_extensions import TypedDict
from typing import List
from typing import TypedDict, Annotated, Sequence
import operator
from langchain_core.messages import BaseMessage
class GraphState(TypedDict):
"""
Represents the state of our graph.
Attributes:
question: question
generation: LLM generation
web_search: whether to add search
documents: list of documents
"""
question : str
generation : str
web_search : str
messages: List[str] #Union[dict[str, Any]]
from langchain.schema import Document
def retrieve(state):
"""
Retrieve documents
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, documents, that contains retrieved documents
"""
print("---VECTOR RETRIEVE---")
question = state["question"]
# Retrieval
documents = ensemble_retriever.get_relevant_documents(question)
#print(documents)
# Iterate over each document and update the 'metadata' field with the file name
for doc in documents:
try:
file_path = doc.metadata['source']
#print(file_path)
file_name = os.path.split(file_path)[1] # Get the file name from the file path
doc.metadata['source'] = file_name
except KeyError:
# Handle the case where 'source' field is missing in the metadata
doc.metadata['source'] = 'unavailable'
except Exception as e:
# Handle any other exceptions that may occur
print(f"An error occurred while processing document: {e}")
return {"messages": documents, "question": question}
def generate(state):
"""
Generate answer
Args:
state (dict): The current graph state
Returns:
state (dict): New key added to state, generation, that contains LLM generation
"""
print("---GENERATE---")
question = state["question"]
messages = state["messages"]
print(messages)
# RAG generation
generation = chain_with_message_history.invoke({"context": messages, "question": question},{"configurable": {"session_id": "unused"}})
return {"messages": messages, "question": question, "generation": generation}
def grade_documents(state):
"""
Determines whether the retrieved documents are relevant to the question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates documents key with only filtered relevant documents
"""
print("---CHECK DOCUMENT RELEVANCE TO QUESTION---")
question = state["question"]
messages = state["messages"]
# Score each doc
filtered_docs = []
web_search = "No"
for d in messages:
score = retrieval_grader.invoke({"question": question, "document": d.page_content})
grade = score.binary_score
if grade == "yes":
print("---GRADE: DOCUMENT RELEVANT---")
filtered_docs.append(d)
else:
print("---GRADE: DOCUMENT NOT RELEVANT---")
continue
print("---TOOLS RETRIEVE---")
tool_documents = tool_chain.invoke(question)
#print(tool_documents)
if tool_documents:
for item in tool_documents:
filtered_docs.append(Document(page_content=str(item['output']),metadata={"source": 'https://svarupa.org/home',"name":item['name']}))
# If filtered_docs is empty, perform a web search
if not filtered_docs:
print("--PERFORMING WEB SEARCH--")
web_search = "Yes"
return {"messages": filtered_docs, "question": question, "web_search": web_search}
def transform_query(state):
"""
Transform the query to produce a better question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates question key with a re-phrased question
"""
print("---TRANSFORM QUERY---")
question = state["question"]
messages = state["messages"]
# Re-write question
better_question = question_rewriter.invoke({"question": question})
return {"messages": messages, "question": better_question}
def web_search(state):
"""
Web search based on the re-phrased question.
Args:
state (dict): The current graph state
Returns:
state (dict): Updates documents key with appended web results
"""
print("---WEB SEARCH---")
question = state["question"]
messages = state["messages"]
# Web search
docs = web_search_tool.invoke({"query": question})
#web_results = "\n".join([d["content"] for d in docs])
web_results = [Document(page_content=d["content"], metadata={"source": d["url"]}) for d in docs]
print(f"Web Results: {web_results}")
messages.extend(web_results)
return {"messages": messages, "question": question}
### Edges
def decide_to_generate(state):
"""
Determines whether to generate an answer, or re-generate a question.
Args:
state (dict): The current graph state
Returns:
str: Binary decision for next node to call
"""
print("---ASSESS GRADED DOCUMENTS---")
question = state["question"]
web_search = state["web_search"]
filtered_documents = state["messages"]
if web_search == "Yes":
# All documents have been filtered check_relevance
# We will re-generate a new query
print("---DECISION: ALL DOCUMENTS ARE NOT RELEVANT TO QUESTION, TRANSFORM QUERY---")
return "transform_query"
else:
# We have relevant documents, so generate answer
print("---DECISION: GENERATE---")
return "generate"
from langgraph.graph import END, StateGraph
workflow = StateGraph(GraphState)
# Define the nodes
workflow.add_node("retrieve", retrieve) # retrieve
workflow.add_node("grade_documents", grade_documents) # grade documents
workflow.add_node("generate", generate) # generatae
workflow.add_node("transform_query", transform_query) # transform_query
workflow.add_node("web_search_node", web_search) # web search
# Build graph
workflow.set_entry_point("retrieve")
workflow.add_edge("retrieve", "grade_documents")
workflow.add_conditional_edges(
"grade_documents",
decide_to_generate,
{
"transform_query": "transform_query",
"generate": "generate",
},
)
workflow.add_edge("transform_query", "web_search_node")
workflow.add_edge("web_search_node", "generate")
workflow.add_edge("generate", END)
# Compile
crag_app = workflow.compile()