Final_Assignment_Template / langraph_agent.py
Humanlearning's picture
multi agent architecture
fe36046
"""LangGraph Agent"""
import os
from dotenv import load_dotenv
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import tools_condition
from langgraph.prebuilt import ToolNode
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.document_loaders import WikipediaLoader
from langchain_community.document_loaders import ArxivLoader
from langchain_community.vectorstores import SupabaseVectorStore
from langchain_core.messages import SystemMessage, HumanMessage
from langchain_core.tools import tool
from langchain.tools.retriever import create_retriever_tool
from supabase.client import Client, create_client
import requests # NEW: for HTTP requests to scoring API
from dataclasses import dataclass
import time # For timestamp in code agent wrapper
from code_agent import run_agent # Compiled code-interpreter graph helper
from langchain_core.messages import AIMessage
from langfuse.langchain import CallbackHandler
# Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing)
try:
langfuse_handler = CallbackHandler()
except Exception as e:
print(f"Warning: Could not initialize Langfuse handler: {e}")
langfuse_handler = None
# Load environment variables - try multiple files
load_dotenv() # Try .env first
load_dotenv("env.local") # Try env.local as backup
print(f"SUPABASE_URL loaded: {bool(os.environ.get('SUPABASE_URL'))}")
print(f"GROQ_API_KEY loaded: {bool(os.environ.get('GROQ_API_KEY'))}")
# ---------------------------------------------------------------------------
# Lightweight in-memory caches and constants for smarter retrieval/ingest
# ---------------------------------------------------------------------------
import hashlib # NEW: for hashing payloads / queries
TTL = 300 # seconds – how long we keep similarity-search results
SIMILARITY_THRESHOLD = 0.85 # cosine score above which we assume we already know the answer
# (query_hash -> (timestamp, results))
QUERY_CACHE: dict[str, tuple[float, list]] = {}
# task IDs whose attachments we already attempted to download this session
PROCESSED_TASKS: set[str] = set()
# hash_ids of Q/A payloads we have already upserted during this session
SEEN_HASHES: set[str] = set()
# Base URL of the scoring API (duplicated here to avoid circular import with basic_agent)
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@tool
def wiki_search(input: str) -> str:
"""Search Wikipedia for a query and return maximum 2 results.
Args:
input: The search query."""
try:
search_docs = WikipediaLoader(query=input, load_max_docs=2).load()
if not search_docs:
return {"wiki_results": "No Wikipedia results found for the query."}
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata.get("source", "Unknown")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
for doc in search_docs
])
return {"wiki_results": formatted_search_docs}
except Exception as e:
print(f"Error in wiki_search: {e}")
return {"wiki_results": f"Error searching Wikipedia: {e}"}
@tool
def web_search(input: str) -> str:
"""Search Tavily for a query and return maximum 3 results.
Args:
input: The search query."""
try:
# TavilySearchResults.invoke expects the query string as its first ("input") argument.
# Passing it positionally avoids the earlier `missing 1 required positional argument: 'input'` error.
search_docs = TavilySearchResults(max_results=3).invoke(input)
if not search_docs:
return {"web_results": "No web search results found for the query."}
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.get("url", "Unknown")}" />\n{doc.get("content", "No content")}\n</Document>'
for doc in search_docs
])
return {"web_results": formatted_search_docs}
except Exception as e:
print(f"Error in web_search: {e}")
return {"web_results": f"Error searching web: {e}"}
@tool
def arvix_search(input: str) -> str:
"""Search Arxiv for a query and return maximum 3 result.
Args:
input: The search query."""
try:
search_docs = ArxivLoader(query=input, load_max_docs=3).load()
if not search_docs:
return {"arvix_results": "No Arxiv results found for the query."}
formatted_search_docs = "\n\n---\n\n".join(
[
f'<Document source="{doc.metadata.get("source", "Unknown")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
for doc in search_docs
])
return {"arvix_results": formatted_search_docs}
except Exception as e:
print(f"Error in arvix_search: {e}")
return {"arvix_results": f"Error searching Arxiv: {e}"}
@tool
def run_python(input: str) -> str:
"""Execute Python code in a restricted sandbox (code-interpreter).
Pass **any** coding or file-manipulation task here and the agent will
compute the answer by running Python. The entire standard library is NOT
available; heavy networking is disabled. Suitable for: math, data-frames,
small file parsing, algorithmic questions.
"""
return run_agent(input)
# load the system prompt from the file
with open("./prompts/system_prompt.txt", "r", encoding="utf-8") as f:
system_prompt = f.read()
# System message
sys_msg = SystemMessage(content=system_prompt)
# build a retriever
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") # dim=768
# Try to create Supabase client with error handling
try:
supabase_url = os.environ.get("SUPABASE_URL")
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY")
if not supabase_url or not supabase_key:
print("Warning: Supabase credentials not found, vector store will be disabled")
vector_store = None
create_retriever_tool = None
else:
supabase: Client = create_client(supabase_url, supabase_key)
vector_store = SupabaseVectorStore(
client=supabase,
embedding= embeddings,
table_name="documents",
query_name="match_documents_langchain",
)
create_retriever_tool = create_retriever_tool(
retriever=vector_store.as_retriever(),
name="Question Search",
description="A tool to retrieve similar questions from a vector store.",
)
except Exception as e:
print(f"Warning: Could not initialize Supabase vector store: {e}")
vector_store = None
create_retriever_tool = None
tools = [
wiki_search,
web_search,
arvix_search,
run_python,
]
if create_retriever_tool:
tools.append(create_retriever_tool)
# ---------------------------------------------------------------------------
# Code-interpreter integration helpers
# ---------------------------------------------------------------------------
from code_agent import run_agent # Executes the compiled code-interpreter graph
def _needs_code(state: dict) -> bool: # type: ignore[override]
"""Heuristic: does *state* look like a coding request?"""
messages = state.get("messages", [])
if not messages:
return False
last_content = messages[-1].content.lower()
triggers = [
"```python",
"write python",
"run this code",
"file manipulation",
"csv",
"pandas",
"json",
"plot",
"fibonacci",
]
return any(t in last_content for t in triggers)
def _code_exec_wrapper(state: dict): # type: ignore[override]
"""Delegate the user query to the sandboxed Python interpreter."""
# Get the last human message's content (fallback to empty string)
human_msgs = [m.content for m in state.get("messages", []) if m.type == "human"]
query = "\n\n".join(human_msgs)
# Execute code-interpreter with full context (question + attachments)
result = run_agent(query)
# Persist the raw stdout so we can convert it to an AI message downstream
return {"code_result": result}
def _code_to_message(state: dict): # type: ignore[override]
"""Turn the interpreter's stdout into an AIMessage so the LLM can see it."""
from langchain_core.messages import AIMessage # local import to avoid cycles
if not state.get("code_result"):
return {}
return {"messages": [AIMessage(content=state["code_result"])]}
# ---------------------------------------------------------------------------
# NEW: Ingest node – write back to vector store if `should_ingest` flag set
# ---------------------------------------------------------------------------
def ingest(state: MessagesState):
"""Persist helpful Q/A pairs (and any attachment snippet) to the vector DB."""
try:
if not state.get("should_ingest") or not vector_store:
return {}
question_text = state["messages"][0].content
answer_text = state["messages"][-1].content
attach_snippets = "\n\n".join(
m.content for m in state["messages"] if str(m.content).startswith("Attached file content")
)
payload = f"Question:\n{question_text}\n\nAnswer:\n{answer_text}"
if attach_snippets:
payload += f"\n\n{attach_snippets}"
hash_id = hashlib.sha256(payload.encode()).hexdigest()
if hash_id in SEEN_HASHES:
print("Ingest: Duplicate payload within session – skip")
return {}
SEEN_HASHES.add(hash_id)
vector_store.add_texts([payload], metadatas=[{"hash_id": hash_id, "timestamp": time.time()}])
print("Ingest: Stored new Q/A pair in vector store")
except Exception as ing_e:
print(f"Ingest node: Error while upserting – {ing_e}")
return {}
# Build graph function
def build_graph(provider: str = "groq"):
"""Build the graph"""
# Load environment variables from .env file
if provider == "google":
# Google Gemini
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
elif provider == "groq":
# Groq https://console.groq.com/docs/models
llm = ChatGroq(model="qwen-qwq-32b", temperature= 0.6) # optional : qwen-qwq-32b gemma2-9b-it
elif provider == "huggingface":
# TODO: Add huggingface endpoint
llm = ChatHuggingFace(
llm=HuggingFaceEndpoint(
url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
temperature=0,
),
)
else:
raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.")
# Bind tools to LLM
llm_with_tools = llm.bind_tools(tools)
# Node
def assistant(state: MessagesState):
"""Assistant node"""
try:
print(f"Assistant node: Processing {len(state['messages'])} messages")
result = llm_with_tools.invoke(state["messages"])
print(f"Assistant node: LLM returned result type: {type(result)}")
return {"messages": [result]}
except Exception as e:
print(f"Error in assistant node: {e}")
error_msg = AIMessage(content=f"I encountered an error: {e}")
return {"messages": [error_msg]}
def retriever(state: MessagesState):
"""Retriever node (smart fetch + similarity search)"""
try:
print(f"Retriever node: Processing {len(state['messages'])} messages")
if not state["messages"]:
print("Retriever node: No messages in state")
return {"messages": [sys_msg]}
# Extract the *latest* user query content
query_content = state["messages"][-1].content
# ----------------------------------------------------------------------------------
# Similarity search with an in-process cache
# ----------------------------------------------------------------------------------
q_hash = hashlib.sha256(query_content.encode()).hexdigest()
now = time.time()
if q_hash in QUERY_CACHE and now - QUERY_CACHE[q_hash][0] < TTL:
similar_question = QUERY_CACHE[q_hash][1]
print("Retriever node: Cache hit for similarity search")
else:
if vector_store:
print(f"Retriever node: Searching vector store for similar questions …")
try:
similar_question = vector_store.similarity_search_with_relevance_scores(query_content, k=2)
except Exception as vs_e:
print(f"Retriever node: Vector store search error – {vs_e}")
similar_question = []
QUERY_CACHE[q_hash] = (now, similar_question)
else:
similar_question = []
print("Retriever node: Vector store not available, skipping similarity search")
# Decide whether this exchange should later be ingested
top_score = similar_question[0][1] if similar_question else 0.0
state["should_ingest"] = top_score < SIMILARITY_THRESHOLD
# ----------------------------------------------------------------------------------
# Attachment fetch (only once per task_id during this session)
# ----------------------------------------------------------------------------------
attachment_msg = None
matched_task_id = None
try:
resp = requests.get(f"{DEFAULT_API_URL}/questions", timeout=30)
resp.raise_for_status()
questions = resp.json()
for q in questions:
if str(q.get("question")).strip() == str(query_content).strip():
matched_task_id = str(q.get("task_id"))
break
if matched_task_id and matched_task_id not in PROCESSED_TASKS:
print(f"Retriever node: Downloading attachment for task {matched_task_id} …")
file_resp = requests.get(f"{DEFAULT_API_URL}/files/{matched_task_id}", timeout=60)
if file_resp.status_code == 200 and file_resp.content:
try:
file_text = file_resp.content.decode("utf-8", errors="replace")
except Exception:
file_text = "(binary or non-UTF8 file omitted)"
MAX_CHARS = 8000
if len(file_text) > MAX_CHARS:
file_text = file_text[:MAX_CHARS] + "\n… (truncated)"
attachment_msg = HumanMessage(content=f"Attached file content for task {matched_task_id}:\n```python\n{file_text}\n```")
print("Retriever node: Attachment added to context")
state["should_ingest"] = True # ensure we store this new info
else:
print(f"Retriever node: No attachment for task {matched_task_id} (status {file_resp.status_code})")
PROCESSED_TASKS.add(matched_task_id)
except Exception as api_e:
print(f"Retriever node: Error while fetching attachment – {api_e}")
# ----------------------------------------------------------------------------------
# Build message list for downstream LLM
# ----------------------------------------------------------------------------------
msgs = [sys_msg] + state["messages"]
if similar_question:
example_doc = similar_question[0][0] if isinstance(similar_question[0], tuple) else similar_question[0]
example_msg = HumanMessage(content=f"Here I provide a similar question and answer for reference: \n\n{example_doc.page_content}")
msgs.append(example_msg)
print("Retriever node: Added example message from similar question")
if attachment_msg:
msgs.append(attachment_msg)
return {"messages": msgs}
except Exception as e:
print(f"Error in retriever node: {e}")
return {"messages": [sys_msg] + state["messages"]}
builder = StateGraph(MessagesState)
builder.add_node("retriever", retriever)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
builder.add_node("code_exec", _code_exec_wrapper)
builder.add_node("code_to_message", _code_to_message)
builder.add_node("ingest", ingest)
# Edge layout
builder.add_edge(START, "retriever")
builder.add_edge("retriever", "assistant")
# Conditional branch: decide whether to run code interpreter
builder.add_conditional_edges(
"assistant",
_needs_code,
{True: "code_exec", False: "ingest"},
)
# Flow after code execution: inject result then resume chat
builder.add_edge("code_exec", "code_to_message")
builder.add_edge("code_to_message", "assistant")
builder.add_conditional_edges(
"assistant",
tools_condition,
)
builder.add_edge("tools", "assistant")
# Compile graph
return builder.compile()
# test
if __name__ == "__main__":
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?"
# Build the graph
graph = build_graph(provider="groq")
# Run the graph
messages = [HumanMessage(content=question)]
messages = graph.invoke({"messages": messages}, config={"callbacks": [langfuse_handler]})
for m in messages["messages"]:
m.pretty_print()