|
|
"""LangGraph Agent""" |
|
|
import os |
|
|
from dotenv import load_dotenv |
|
|
from langgraph.graph import START, StateGraph, MessagesState |
|
|
from langgraph.prebuilt import tools_condition |
|
|
from langgraph.prebuilt import ToolNode |
|
|
from langchain_google_genai import ChatGoogleGenerativeAI |
|
|
from langchain_groq import ChatGroq |
|
|
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings |
|
|
from langchain_community.tools.tavily_search import TavilySearchResults |
|
|
from langchain_community.document_loaders import WikipediaLoader |
|
|
from langchain_community.document_loaders import ArxivLoader |
|
|
from langchain_community.vectorstores import SupabaseVectorStore |
|
|
from langchain_core.messages import SystemMessage, HumanMessage |
|
|
from langchain_core.tools import tool |
|
|
from langchain.tools.retriever import create_retriever_tool |
|
|
from supabase.client import Client, create_client |
|
|
import requests |
|
|
from dataclasses import dataclass |
|
|
import time |
|
|
from code_agent import run_agent |
|
|
from langchain_core.messages import AIMessage |
|
|
|
|
|
from langfuse.langchain import CallbackHandler |
|
|
|
|
|
|
|
|
try: |
|
|
langfuse_handler = CallbackHandler() |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not initialize Langfuse handler: {e}") |
|
|
langfuse_handler = None |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
load_dotenv("env.local") |
|
|
|
|
|
print(f"SUPABASE_URL loaded: {bool(os.environ.get('SUPABASE_URL'))}") |
|
|
print(f"GROQ_API_KEY loaded: {bool(os.environ.get('GROQ_API_KEY'))}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import hashlib |
|
|
|
|
|
TTL = 300 |
|
|
SIMILARITY_THRESHOLD = 0.85 |
|
|
|
|
|
|
|
|
QUERY_CACHE: dict[str, tuple[float, list]] = {} |
|
|
|
|
|
PROCESSED_TASKS: set[str] = set() |
|
|
|
|
|
SEEN_HASHES: set[str] = set() |
|
|
|
|
|
|
|
|
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" |
|
|
|
|
|
@tool |
|
|
def wiki_search(input: str) -> str: |
|
|
"""Search Wikipedia for a query and return maximum 2 results. |
|
|
|
|
|
Args: |
|
|
input: The search query.""" |
|
|
try: |
|
|
search_docs = WikipediaLoader(query=input, load_max_docs=2).load() |
|
|
if not search_docs: |
|
|
return {"wiki_results": "No Wikipedia results found for the query."} |
|
|
formatted_search_docs = "\n\n---\n\n".join( |
|
|
[ |
|
|
f'<Document source="{doc.metadata.get("source", "Unknown")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' |
|
|
for doc in search_docs |
|
|
]) |
|
|
return {"wiki_results": formatted_search_docs} |
|
|
except Exception as e: |
|
|
print(f"Error in wiki_search: {e}") |
|
|
return {"wiki_results": f"Error searching Wikipedia: {e}"} |
|
|
|
|
|
@tool |
|
|
def web_search(input: str) -> str: |
|
|
"""Search Tavily for a query and return maximum 3 results. |
|
|
|
|
|
Args: |
|
|
input: The search query.""" |
|
|
try: |
|
|
|
|
|
|
|
|
search_docs = TavilySearchResults(max_results=3).invoke(input) |
|
|
if not search_docs: |
|
|
return {"web_results": "No web search results found for the query."} |
|
|
formatted_search_docs = "\n\n---\n\n".join( |
|
|
[ |
|
|
f'<Document source="{doc.get("url", "Unknown")}" />\n{doc.get("content", "No content")}\n</Document>' |
|
|
for doc in search_docs |
|
|
]) |
|
|
return {"web_results": formatted_search_docs} |
|
|
except Exception as e: |
|
|
print(f"Error in web_search: {e}") |
|
|
return {"web_results": f"Error searching web: {e}"} |
|
|
|
|
|
@tool |
|
|
def arvix_search(input: str) -> str: |
|
|
"""Search Arxiv for a query and return maximum 3 result. |
|
|
|
|
|
Args: |
|
|
input: The search query.""" |
|
|
try: |
|
|
search_docs = ArxivLoader(query=input, load_max_docs=3).load() |
|
|
if not search_docs: |
|
|
return {"arvix_results": "No Arxiv results found for the query."} |
|
|
formatted_search_docs = "\n\n---\n\n".join( |
|
|
[ |
|
|
f'<Document source="{doc.metadata.get("source", "Unknown")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>' |
|
|
for doc in search_docs |
|
|
]) |
|
|
return {"arvix_results": formatted_search_docs} |
|
|
except Exception as e: |
|
|
print(f"Error in arvix_search: {e}") |
|
|
return {"arvix_results": f"Error searching Arxiv: {e}"} |
|
|
|
|
|
@tool |
|
|
def run_python(input: str) -> str: |
|
|
"""Execute Python code in a restricted sandbox (code-interpreter). |
|
|
|
|
|
Pass **any** coding or file-manipulation task here and the agent will |
|
|
compute the answer by running Python. The entire standard library is NOT |
|
|
available; heavy networking is disabled. Suitable for: math, data-frames, |
|
|
small file parsing, algorithmic questions. |
|
|
""" |
|
|
return run_agent(input) |
|
|
|
|
|
|
|
|
with open("./prompts/system_prompt.txt", "r", encoding="utf-8") as f: |
|
|
system_prompt = f.read() |
|
|
|
|
|
|
|
|
sys_msg = SystemMessage(content=system_prompt) |
|
|
|
|
|
|
|
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2") |
|
|
|
|
|
|
|
|
try: |
|
|
supabase_url = os.environ.get("SUPABASE_URL") |
|
|
supabase_key = os.environ.get("SUPABASE_SERVICE_KEY") |
|
|
|
|
|
if not supabase_url or not supabase_key: |
|
|
print("Warning: Supabase credentials not found, vector store will be disabled") |
|
|
vector_store = None |
|
|
create_retriever_tool = None |
|
|
else: |
|
|
supabase: Client = create_client(supabase_url, supabase_key) |
|
|
vector_store = SupabaseVectorStore( |
|
|
client=supabase, |
|
|
embedding= embeddings, |
|
|
table_name="documents", |
|
|
query_name="match_documents_langchain", |
|
|
) |
|
|
create_retriever_tool = create_retriever_tool( |
|
|
retriever=vector_store.as_retriever(), |
|
|
name="Question Search", |
|
|
description="A tool to retrieve similar questions from a vector store.", |
|
|
) |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not initialize Supabase vector store: {e}") |
|
|
vector_store = None |
|
|
create_retriever_tool = None |
|
|
|
|
|
tools = [ |
|
|
wiki_search, |
|
|
web_search, |
|
|
arvix_search, |
|
|
run_python, |
|
|
] |
|
|
if create_retriever_tool: |
|
|
tools.append(create_retriever_tool) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from code_agent import run_agent |
|
|
|
|
|
|
|
|
def _needs_code(state: dict) -> bool: |
|
|
"""Heuristic: does *state* look like a coding request?""" |
|
|
messages = state.get("messages", []) |
|
|
if not messages: |
|
|
return False |
|
|
last_content = messages[-1].content.lower() |
|
|
triggers = [ |
|
|
"```python", |
|
|
"write python", |
|
|
"run this code", |
|
|
"file manipulation", |
|
|
"csv", |
|
|
"pandas", |
|
|
"json", |
|
|
"plot", |
|
|
"fibonacci", |
|
|
] |
|
|
return any(t in last_content for t in triggers) |
|
|
|
|
|
|
|
|
def _code_exec_wrapper(state: dict): |
|
|
"""Delegate the user query to the sandboxed Python interpreter.""" |
|
|
|
|
|
human_msgs = [m.content for m in state.get("messages", []) if m.type == "human"] |
|
|
query = "\n\n".join(human_msgs) |
|
|
|
|
|
|
|
|
result = run_agent(query) |
|
|
|
|
|
return {"code_result": result} |
|
|
|
|
|
|
|
|
def _code_to_message(state: dict): |
|
|
"""Turn the interpreter's stdout into an AIMessage so the LLM can see it.""" |
|
|
from langchain_core.messages import AIMessage |
|
|
|
|
|
if not state.get("code_result"): |
|
|
return {} |
|
|
return {"messages": [AIMessage(content=state["code_result"])]} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ingest(state: MessagesState): |
|
|
"""Persist helpful Q/A pairs (and any attachment snippet) to the vector DB.""" |
|
|
try: |
|
|
if not state.get("should_ingest") or not vector_store: |
|
|
return {} |
|
|
|
|
|
question_text = state["messages"][0].content |
|
|
answer_text = state["messages"][-1].content |
|
|
attach_snippets = "\n\n".join( |
|
|
m.content for m in state["messages"] if str(m.content).startswith("Attached file content") |
|
|
) |
|
|
payload = f"Question:\n{question_text}\n\nAnswer:\n{answer_text}" |
|
|
if attach_snippets: |
|
|
payload += f"\n\n{attach_snippets}" |
|
|
|
|
|
hash_id = hashlib.sha256(payload.encode()).hexdigest() |
|
|
if hash_id in SEEN_HASHES: |
|
|
print("Ingest: Duplicate payload within session – skip") |
|
|
return {} |
|
|
SEEN_HASHES.add(hash_id) |
|
|
vector_store.add_texts([payload], metadatas=[{"hash_id": hash_id, "timestamp": time.time()}]) |
|
|
print("Ingest: Stored new Q/A pair in vector store") |
|
|
except Exception as ing_e: |
|
|
print(f"Ingest node: Error while upserting – {ing_e}") |
|
|
return {} |
|
|
|
|
|
|
|
|
def build_graph(provider: str = "groq"): |
|
|
"""Build the graph""" |
|
|
|
|
|
if provider == "google": |
|
|
|
|
|
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0) |
|
|
elif provider == "groq": |
|
|
|
|
|
llm = ChatGroq(model="qwen-qwq-32b", temperature= 0.6) |
|
|
elif provider == "huggingface": |
|
|
|
|
|
llm = ChatHuggingFace( |
|
|
llm=HuggingFaceEndpoint( |
|
|
url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf", |
|
|
temperature=0, |
|
|
), |
|
|
) |
|
|
else: |
|
|
raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.") |
|
|
|
|
|
llm_with_tools = llm.bind_tools(tools) |
|
|
|
|
|
|
|
|
def assistant(state: MessagesState): |
|
|
"""Assistant node""" |
|
|
try: |
|
|
print(f"Assistant node: Processing {len(state['messages'])} messages") |
|
|
result = llm_with_tools.invoke(state["messages"]) |
|
|
print(f"Assistant node: LLM returned result type: {type(result)}") |
|
|
return {"messages": [result]} |
|
|
except Exception as e: |
|
|
print(f"Error in assistant node: {e}") |
|
|
error_msg = AIMessage(content=f"I encountered an error: {e}") |
|
|
return {"messages": [error_msg]} |
|
|
|
|
|
def retriever(state: MessagesState): |
|
|
"""Retriever node (smart fetch + similarity search)""" |
|
|
try: |
|
|
print(f"Retriever node: Processing {len(state['messages'])} messages") |
|
|
if not state["messages"]: |
|
|
print("Retriever node: No messages in state") |
|
|
return {"messages": [sys_msg]} |
|
|
|
|
|
|
|
|
query_content = state["messages"][-1].content |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
q_hash = hashlib.sha256(query_content.encode()).hexdigest() |
|
|
now = time.time() |
|
|
if q_hash in QUERY_CACHE and now - QUERY_CACHE[q_hash][0] < TTL: |
|
|
similar_question = QUERY_CACHE[q_hash][1] |
|
|
print("Retriever node: Cache hit for similarity search") |
|
|
else: |
|
|
if vector_store: |
|
|
print(f"Retriever node: Searching vector store for similar questions …") |
|
|
try: |
|
|
similar_question = vector_store.similarity_search_with_relevance_scores(query_content, k=2) |
|
|
except Exception as vs_e: |
|
|
print(f"Retriever node: Vector store search error – {vs_e}") |
|
|
similar_question = [] |
|
|
QUERY_CACHE[q_hash] = (now, similar_question) |
|
|
else: |
|
|
similar_question = [] |
|
|
print("Retriever node: Vector store not available, skipping similarity search") |
|
|
|
|
|
|
|
|
top_score = similar_question[0][1] if similar_question else 0.0 |
|
|
state["should_ingest"] = top_score < SIMILARITY_THRESHOLD |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
attachment_msg = None |
|
|
matched_task_id = None |
|
|
try: |
|
|
resp = requests.get(f"{DEFAULT_API_URL}/questions", timeout=30) |
|
|
resp.raise_for_status() |
|
|
questions = resp.json() |
|
|
for q in questions: |
|
|
if str(q.get("question")).strip() == str(query_content).strip(): |
|
|
matched_task_id = str(q.get("task_id")) |
|
|
break |
|
|
if matched_task_id and matched_task_id not in PROCESSED_TASKS: |
|
|
print(f"Retriever node: Downloading attachment for task {matched_task_id} …") |
|
|
file_resp = requests.get(f"{DEFAULT_API_URL}/files/{matched_task_id}", timeout=60) |
|
|
if file_resp.status_code == 200 and file_resp.content: |
|
|
try: |
|
|
file_text = file_resp.content.decode("utf-8", errors="replace") |
|
|
except Exception: |
|
|
file_text = "(binary or non-UTF8 file omitted)" |
|
|
MAX_CHARS = 8000 |
|
|
if len(file_text) > MAX_CHARS: |
|
|
file_text = file_text[:MAX_CHARS] + "\n… (truncated)" |
|
|
attachment_msg = HumanMessage(content=f"Attached file content for task {matched_task_id}:\n```python\n{file_text}\n```") |
|
|
print("Retriever node: Attachment added to context") |
|
|
state["should_ingest"] = True |
|
|
else: |
|
|
print(f"Retriever node: No attachment for task {matched_task_id} (status {file_resp.status_code})") |
|
|
PROCESSED_TASKS.add(matched_task_id) |
|
|
except Exception as api_e: |
|
|
print(f"Retriever node: Error while fetching attachment – {api_e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
msgs = [sys_msg] + state["messages"] |
|
|
if similar_question: |
|
|
example_doc = similar_question[0][0] if isinstance(similar_question[0], tuple) else similar_question[0] |
|
|
example_msg = HumanMessage(content=f"Here I provide a similar question and answer for reference: \n\n{example_doc.page_content}") |
|
|
msgs.append(example_msg) |
|
|
print("Retriever node: Added example message from similar question") |
|
|
|
|
|
if attachment_msg: |
|
|
msgs.append(attachment_msg) |
|
|
|
|
|
return {"messages": msgs} |
|
|
except Exception as e: |
|
|
print(f"Error in retriever node: {e}") |
|
|
return {"messages": [sys_msg] + state["messages"]} |
|
|
|
|
|
builder = StateGraph(MessagesState) |
|
|
builder.add_node("retriever", retriever) |
|
|
builder.add_node("assistant", assistant) |
|
|
builder.add_node("tools", ToolNode(tools)) |
|
|
builder.add_node("code_exec", _code_exec_wrapper) |
|
|
builder.add_node("code_to_message", _code_to_message) |
|
|
builder.add_node("ingest", ingest) |
|
|
|
|
|
|
|
|
builder.add_edge(START, "retriever") |
|
|
builder.add_edge("retriever", "assistant") |
|
|
|
|
|
|
|
|
builder.add_conditional_edges( |
|
|
"assistant", |
|
|
_needs_code, |
|
|
{True: "code_exec", False: "ingest"}, |
|
|
) |
|
|
|
|
|
|
|
|
builder.add_edge("code_exec", "code_to_message") |
|
|
builder.add_edge("code_to_message", "assistant") |
|
|
|
|
|
builder.add_conditional_edges( |
|
|
"assistant", |
|
|
tools_condition, |
|
|
) |
|
|
builder.add_edge("tools", "assistant") |
|
|
|
|
|
|
|
|
return builder.compile() |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
question = "When was a picture of St. Thomas Aquinas first added to the Wikipedia page on the Principle of double effect?" |
|
|
|
|
|
graph = build_graph(provider="groq") |
|
|
|
|
|
messages = [HumanMessage(content=question)] |
|
|
messages = graph.invoke({"messages": messages}, config={"callbacks": [langfuse_handler]}) |
|
|
for m in messages["messages"]: |
|
|
m.pretty_print() |