byArMan456's picture
Create agent.py
314150e verified
"""LangGraph‑powered autonomous agent able to use the *BetterThanMe* toolset.
Target: GAIA level 1 competency without external API keys.
Provides `build_graph()` for the evaluation harness.
"""
from __future__ import annotations
import os
from typing import Any, List, TypedDict
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, END
from tools import TOOLS
# ---------------------------------------------------------------------------
# Agent state ----------------------------------------------------------------
# ---------------------------------------------------------------------------
class AgentState(TypedDict, total=False):
messages: List[Any]
tool_calls: list
needs_tool: bool
# ---------------------------------------------------------------------------
# LLM setup ------------------------------------------------------------------
# ---------------------------------------------------------------------------
MODEL = os.getenv("OPENAI_MODEL", "gpt-3.5-turbo")
llm = ChatOpenAI(model_name=MODEL, temperature=0)
SYSTEM_PROMPT = """
You are **BetterThanMe**, a tool‑using assistant.
**Rules**
1. Think step‑by‑step *internally* but **never** reveal your reasoning.
2. Use JSON function‑call format to invoke tools when external data is required.
3. When you are completely confident in the answer, respond with **exactly** one line:
Final Answer: <answer>
– where <answer> is a single concise value (number, word, date, URL, etc.).
– Do **not** add explanations, extra words, or additional lines.
The automated grader will strip the first 14 characters ('Final Answer: ') to obtain the answer, so formatting must be perfect.
"""
# ---------------------------------------------------------------------------
# LangGraph nodes ------------------------------------------------------------
# ---------------------------------------------------------------------------
def agent_node(state: AgentState) -> AgentState: # type: ignore[override]
messages = state.get("messages", [])
if not messages or not isinstance(messages[0], SystemMessage):
messages = [SystemMessage(content=SYSTEM_PROMPT)] + messages
response = llm.invoke(messages, tools=TOOLS)
messages.append(response)
tool_calls = getattr(response, "tool_calls", None)
needs_tool = bool(tool_calls)
return {
"messages": messages,
"tool_calls": tool_calls,
"needs_tool": needs_tool,
}
def tool_executor_node(state: AgentState) -> AgentState: # type: ignore[override]
messages = state["messages"]
tool_calls = state.get("tool_calls", []) or []
for call in tool_calls:
name = call["name"]
args = call.get("arguments", {})
tool = next((t for t in TOOLS if t.name == name), None)
if tool is None:
result = f"Tool '{name}' not found."
else:
try:
result = tool.run(**args)
except Exception as exc: # pylint: disable=broad-except
result = f"Error running tool: {exc}"
messages.append(AIMessage(content=result, name=name))
return {
"messages": messages,
"needs_tool": False,
}
# ---------------------------------------------------------------------------
# Build & compile graph ------------------------------------------------------
# ---------------------------------------------------------------------------
def _compile_graph():
g = StateGraph(AgentState)
g.add_node("agent", agent_node)
g.add_node("executor", tool_executor_node)
g.add_conditional_edges(
"agent",
lambda s: s["needs_tool"],
{True: "executor", False: END},
)
g.add_edge("executor", "agent")
g.set_entry_point("agent")
return g.compile()
_GRAPH = _compile_graph()
def build_graph():
"""Return the compiled LangGraph, conforming to evaluation harness."""
return _GRAPH
# ---------------------------------------------------------------------------
# Convenience single‑turn wrapper (not used by evaluation harness)------------
# ---------------------------------------------------------------------------
def chat_agent(user_input: str, history: List[List[str]] | None = None) -> str:
"""Single‑turn helper for interactive Gradio chat UI."""
history = history or []
messages: List[Any] = []
for user, ai in history:
messages.append(HumanMessage(content=user))
messages.append(AIMessage(content=ai))
messages.append(HumanMessage(content=user_input))
final_state: AgentState = _GRAPH.invoke({"messages": messages})
for msg in reversed(final_state["messages"]):
if isinstance(msg, AIMessage):
return msg.content
return "(no response)"