Spaces:
Sleeping
Sleeping
bugfixing; fixed toolbox; isolated [Base|AI|Human]Message crap logic to the agent interface; implemented tests
Browse files- alfred.py +1 -3
- args.py +7 -4
- graph.py +37 -41
- graph_builder.py +3 -5
- itf_agent.py +40 -4
- llm_factory.py +2 -1
- test.py +255 -77
- toolbox.py +129 -55
alfred.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
from langchain_core.messages import BaseMessage
|
2 |
-
|
3 |
from typing import Any, Dict
|
4 |
|
5 |
from args import Args
|
@@ -35,7 +33,7 @@ class Alfred:
|
|
35 |
"""
|
36 |
initial_state: State = {
|
37 |
"initial_query": query,
|
38 |
-
"messages": [
|
39 |
"task_progress": [], # Solver's context
|
40 |
"audit_interval": Args.AlfredParams.AUDIT_INTERVAL,
|
41 |
"manager_queries": 0,
|
|
|
|
|
|
|
1 |
from typing import Any, Dict
|
2 |
|
3 |
from args import Args
|
|
|
33 |
"""
|
34 |
initial_state: State = {
|
35 |
"initial_query": query,
|
36 |
+
"messages": [query], # Manager's context
|
37 |
"task_progress": [], # Solver's context
|
38 |
"audit_interval": Args.AlfredParams.AUDIT_INTERVAL,
|
39 |
"manager_queries": 0,
|
args.py
CHANGED
@@ -4,6 +4,9 @@ from typing import Optional
|
|
4 |
from logger import Logger
|
5 |
|
6 |
|
|
|
|
|
|
|
7 |
class LLMInterface(Enum):
|
8 |
OPENAI = "OpenAI"
|
9 |
HUGGINGFACE = "HuggingFace"
|
@@ -80,11 +83,11 @@ class Args:
|
|
80 |
primary_llm_interface=LLMInterface.OPENAI
|
81 |
# secondary_llm_interface=LLMInterface.HUGGINGFACE
|
82 |
vlm_interface=LLMInterface.OPENAI
|
83 |
-
primary_model="qwen2.5-qwq-35b-eureka-cubed-abliterated-uncensored"
|
84 |
-
secondary_model="qwen2.5-7b-instruct-1m"
|
85 |
-
vision_model="qwen/qwen2.5-vl-7b"
|
86 |
api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
|
87 |
-
api_key=
|
88 |
token = "" # Not needed when using OpenAILike API
|
89 |
# Agent presets
|
90 |
PRIMARY_AGENT_PRESET = AgentPreset(
|
|
|
4 |
from logger import Logger
|
5 |
|
6 |
|
7 |
+
TEST_MODE = False
|
8 |
+
|
9 |
+
|
10 |
class LLMInterface(Enum):
|
11 |
OPENAI = "OpenAI"
|
12 |
HUGGINGFACE = "HuggingFace"
|
|
|
83 |
primary_llm_interface=LLMInterface.OPENAI
|
84 |
# secondary_llm_interface=LLMInterface.HUGGINGFACE
|
85 |
vlm_interface=LLMInterface.OPENAI
|
86 |
+
primary_model="groot" if TEST_MODE else "qwen2.5-qwq-35b-eureka-cubed-abliterated-uncensored"
|
87 |
+
secondary_model="groot" if TEST_MODE else "qwen2.5-7b-instruct-1m"
|
88 |
+
vision_model="groot" if TEST_MODE else "qwen/qwen2.5-vl-7b"
|
89 |
api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
|
90 |
+
api_key="api_key"
|
91 |
token = "" # Not needed when using OpenAILike API
|
92 |
# Agent presets
|
93 |
PRIMARY_AGENT_PRESET = AgentPreset(
|
graph.py
CHANGED
@@ -1,13 +1,5 @@
|
|
1 |
-
from
|
2 |
-
from langgraph.graph import START, END, StateGraph
|
3 |
-
from langgraph.graph.message import add_messages
|
4 |
-
from langgraph.prebuilt import ToolNode, tools_condition
|
5 |
|
6 |
-
from typing import Annotated, Any, Dict, List, Literal, Optional, TypedDict
|
7 |
-
import logging
|
8 |
-
from pathlib import Path
|
9 |
-
|
10 |
-
from args import Args
|
11 |
from agents import *
|
12 |
from itf_agent import IAgent
|
13 |
|
@@ -15,10 +7,8 @@ from itf_agent import IAgent
|
|
15 |
class State(TypedDict):
|
16 |
"""State class for the agent graph."""
|
17 |
initial_query: str
|
18 |
-
|
19 |
-
|
20 |
-
messages: List[BaseMessage] # Manager's context
|
21 |
-
task_progress: List[BaseMessage] # Solver's context
|
22 |
audit_interval: int
|
23 |
manager_queries: int
|
24 |
solver_queries: int
|
@@ -38,7 +28,7 @@ class Agents:
|
|
38 |
viewer = Viewer()
|
39 |
|
40 |
@classmethod
|
41 |
-
def guard_output(cls, agent: IAgent, messages: List[
|
42 |
response = agent.query(messages)
|
43 |
guarded_response = cls.guardian.query([response])
|
44 |
return guarded_response
|
@@ -65,8 +55,8 @@ class _Helper:
|
|
65 |
return first % second == 0
|
66 |
|
67 |
@staticmethod
|
68 |
-
def
|
69 |
-
response = str(task_progress[-1]
|
70 |
if "to: researcher" in response.lower():
|
71 |
return "researcher"
|
72 |
elif "to: reasoner" in response.lower():
|
@@ -78,6 +68,18 @@ class _Helper:
|
|
78 |
else:
|
79 |
return "unspecified"
|
80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
82 |
class Nodes:
|
83 |
"""
|
@@ -88,10 +90,13 @@ class Nodes:
|
|
88 |
Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
|
89 |
"""
|
90 |
state["manager_queries"] += 1
|
91 |
-
|
|
|
92 |
response = Agents.guard_output(Agents.manager, state["messages"])
|
93 |
state["messages"].append(response)
|
94 |
-
|
|
|
|
|
95 |
|
96 |
return state
|
97 |
|
@@ -99,10 +104,10 @@ class Nodes:
|
|
99 |
"""
|
100 |
Formats and delivers the final response to the user
|
101 |
"""
|
102 |
-
instruction =
|
103 |
state["messages"].append(instruction)
|
104 |
response = Agents.manager.query(state["messages"])
|
105 |
-
state["final_response"] =
|
106 |
return state
|
107 |
|
108 |
def auditor_node(self, state: State) -> State:
|
@@ -119,6 +124,16 @@ class Nodes:
|
|
119 |
"""
|
120 |
response = Agents.guard_output(Agents.solver, state["task_progress"])
|
121 |
state["task_progress"].append(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
return state
|
123 |
|
124 |
def researcher_node(self, state: State) -> State:
|
@@ -155,35 +170,16 @@ class Edges:
|
|
155 |
Conditional edge for manager node.
|
156 |
Returns one of: "solver", "auditor", "final_answer"
|
157 |
"""
|
158 |
-
|
159 |
-
answer_ready = "FINAL ANSWER:" in str(last_message.content)
|
160 |
-
max_interractions_reached = state["manager_queries"] >= state["max_interactions"]
|
161 |
-
if answer_ready or max_interractions_reached:
|
162 |
-
return "final_answer"
|
163 |
-
|
164 |
-
if _Helper._is_divisible(state["manager_queries"], state["audit_interval"]):
|
165 |
-
return "auditor"
|
166 |
-
|
167 |
-
# Prepare task for Solver
|
168 |
-
state["task_progress"] = [last_message]
|
169 |
-
return "solver"
|
170 |
|
171 |
def solver_edge(self, state: State) -> Literal["manager", "researcher", "reasoner", "viewer"]:
|
172 |
"""
|
173 |
Conditional edge for solver node.
|
174 |
Returns one of: "manager", "researcher", "reasoner", "viewer"
|
175 |
"""
|
176 |
-
receiver = _Helper.
|
177 |
|
178 |
if receiver == "unspecified":
|
179 |
-
instruction = BaseMessage("Formulate an answer for the manager with your findings so far !")
|
180 |
-
state["task_progress"].append(instruction)
|
181 |
-
response = Agents.solver.query(state["task_progress"])
|
182 |
-
state["messages"].append(response)
|
183 |
return "manager"
|
184 |
|
185 |
-
if receiver == "manager":
|
186 |
-
response = state["task_progress"][-1]
|
187 |
-
state["messages"].append(response)
|
188 |
-
|
189 |
return receiver
|
|
|
1 |
+
from typing import List, Literal, Optional, TypedDict
|
|
|
|
|
|
|
2 |
|
|
|
|
|
|
|
|
|
|
|
3 |
from agents import *
|
4 |
from itf_agent import IAgent
|
5 |
|
|
|
7 |
class State(TypedDict):
|
8 |
"""State class for the agent graph."""
|
9 |
initial_query: str
|
10 |
+
messages: List[str] # Manager's context
|
11 |
+
task_progress: List[str] # Solver's context
|
|
|
|
|
12 |
audit_interval: int
|
13 |
manager_queries: int
|
14 |
solver_queries: int
|
|
|
28 |
viewer = Viewer()
|
29 |
|
30 |
@classmethod
|
31 |
+
def guard_output(cls, agent: IAgent, messages: List[str]) -> str:
|
32 |
response = agent.query(messages)
|
33 |
guarded_response = cls.guardian.query([response])
|
34 |
return guarded_response
|
|
|
55 |
return first % second == 0
|
56 |
|
57 |
@staticmethod
|
58 |
+
def solver_successor(task_progress: List[str]) -> Literal["manager", "researcher", "reasoner", "viewer", "unspecified"]:
|
59 |
+
response = str(task_progress[-1])
|
60 |
if "to: researcher" in response.lower():
|
61 |
return "researcher"
|
62 |
elif "to: reasoner" in response.lower():
|
|
|
68 |
else:
|
69 |
return "unspecified"
|
70 |
|
71 |
+
@staticmethod
|
72 |
+
def manager_successor(state: State) -> Literal["solver", "auditor", "final_answer"]:
|
73 |
+
last_message = state["messages"][-1]
|
74 |
+
answer_ready = "FINAL ANSWER:" in last_message
|
75 |
+
max_interractions_reached = state["manager_queries"] >= state["max_interactions"]
|
76 |
+
if answer_ready or max_interractions_reached:
|
77 |
+
return "final_answer"
|
78 |
+
|
79 |
+
if _Helper._is_divisible(state["manager_queries"], state["audit_interval"]):
|
80 |
+
return "auditor"
|
81 |
+
|
82 |
+
return "solver"
|
83 |
|
84 |
class Nodes:
|
85 |
"""
|
|
|
90 |
Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
|
91 |
"""
|
92 |
state["manager_queries"] += 1
|
93 |
+
successor = _Helper.manager_successor(state)
|
94 |
+
if successor == "solver":
|
95 |
response = Agents.guard_output(Agents.manager, state["messages"])
|
96 |
state["messages"].append(response)
|
97 |
+
# Prepare task for Solver
|
98 |
+
state["task_progress"] = [response]
|
99 |
+
# else: [wait for auditor's feedback] or [is final answer]
|
100 |
|
101 |
return state
|
102 |
|
|
|
104 |
"""
|
105 |
Formats and delivers the final response to the user
|
106 |
"""
|
107 |
+
instruction = "Formulate a definitive final answer in english. Be very concise and use no redundant words !"
|
108 |
state["messages"].append(instruction)
|
109 |
response = Agents.manager.query(state["messages"])
|
110 |
+
state["final_response"] = response
|
111 |
return state
|
112 |
|
113 |
def auditor_node(self, state: State) -> State:
|
|
|
124 |
"""
|
125 |
response = Agents.guard_output(Agents.solver, state["task_progress"])
|
126 |
state["task_progress"].append(response)
|
127 |
+
|
128 |
+
successor = _Helper.solver_successor(state["task_progress"])
|
129 |
+
if successor == "unspecified":
|
130 |
+
instruction = "Formulate an answer for the manager with your findings so far !"
|
131 |
+
state["task_progress"].append(instruction)
|
132 |
+
response = Agents.solver.query(state["task_progress"])
|
133 |
+
state["messages"].append(response)
|
134 |
+
elif successor == "manager":
|
135 |
+
state["messages"].append(response)
|
136 |
+
|
137 |
return state
|
138 |
|
139 |
def researcher_node(self, state: State) -> State:
|
|
|
170 |
Conditional edge for manager node.
|
171 |
Returns one of: "solver", "auditor", "final_answer"
|
172 |
"""
|
173 |
+
return _Helper.manager_successor(state)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
175 |
def solver_edge(self, state: State) -> Literal["manager", "researcher", "reasoner", "viewer"]:
|
176 |
"""
|
177 |
Conditional edge for solver node.
|
178 |
Returns one of: "manager", "researcher", "reasoner", "viewer"
|
179 |
"""
|
180 |
+
receiver = _Helper.solver_successor(state["task_progress"])
|
181 |
|
182 |
if receiver == "unspecified":
|
|
|
|
|
|
|
|
|
183 |
return "manager"
|
184 |
|
|
|
|
|
|
|
|
|
185 |
return receiver
|
graph_builder.py
CHANGED
@@ -23,16 +23,14 @@ class GraphBuilder:
|
|
23 |
graph.add_node("solver", self.nodes.solver_node)
|
24 |
graph.add_node("researcher", self.nodes.researcher_node)
|
25 |
graph.add_node("reasoner", self.nodes.reasoner_node)
|
26 |
-
graph.add_node("
|
27 |
-
graph.add_node("video_handler", self.nodes.video_handler_node)
|
28 |
|
29 |
graph.add_edge(START, "manager")
|
30 |
graph.add_edge("final_answer", END)
|
31 |
graph.add_edge("auditor", "manager")
|
32 |
graph.add_edge("researcher", "solver")
|
33 |
graph.add_edge("reasoner", "solver")
|
34 |
-
graph.add_edge("
|
35 |
-
graph.add_edge("video_handler", "solver")
|
36 |
|
37 |
graph.add_conditional_edges(
|
38 |
"manager",
|
@@ -45,7 +43,7 @@ class GraphBuilder:
|
|
45 |
"solver",
|
46 |
self.edges.solver_edge,
|
47 |
{
|
48 |
-
"manager": "manager", "researcher": "researcher", "reasoner": "reasoner", "
|
49 |
}
|
50 |
)
|
51 |
|
|
|
23 |
graph.add_node("solver", self.nodes.solver_node)
|
24 |
graph.add_node("researcher", self.nodes.researcher_node)
|
25 |
graph.add_node("reasoner", self.nodes.reasoner_node)
|
26 |
+
graph.add_node("viewer", self.nodes.viewer_node)
|
|
|
27 |
|
28 |
graph.add_edge(START, "manager")
|
29 |
graph.add_edge("final_answer", END)
|
30 |
graph.add_edge("auditor", "manager")
|
31 |
graph.add_edge("researcher", "solver")
|
32 |
graph.add_edge("reasoner", "solver")
|
33 |
+
graph.add_edge("viewer", "solver")
|
|
|
34 |
|
35 |
graph.add_conditional_edges(
|
36 |
"manager",
|
|
|
43 |
"solver",
|
44 |
self.edges.solver_edge,
|
45 |
{
|
46 |
+
"manager": "manager", "researcher": "researcher", "reasoner": "reasoner", "viewer": "viewer"
|
47 |
}
|
48 |
)
|
49 |
|
itf_agent.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from langchain_core.messages import
|
2 |
|
3 |
import logging
|
4 |
import os
|
@@ -13,6 +13,7 @@ class IAgent():
|
|
13 |
def __init__(self, sys_prompt_filename, agent_preset: AgentPreset, tools: List = [], parallel_tool_calls=False):
|
14 |
self.name = self._format_name(sys_prompt_filename)
|
15 |
self.interface = agent_preset.get_interface()
|
|
|
16 |
|
17 |
# Load the system prompt from a file
|
18 |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", sys_prompt_filename)
|
@@ -37,6 +38,34 @@ class IAgent():
|
|
37 |
cleaned_name = re.sub(r'^[^a-zA-Z]+', '', name_without_ext)
|
38 |
return cleaned_name
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
def get_system_prompt(self) -> str:
|
41 |
"""
|
42 |
Retrieves the system prompt.
|
@@ -46,7 +75,7 @@ class IAgent():
|
|
46 |
"""
|
47 |
return self.system_prompt
|
48 |
|
49 |
-
def query(self, messages: List[
|
50 |
"""
|
51 |
Asynchronously queries the agent with a given question and returns the response.
|
52 |
|
@@ -56,15 +85,22 @@ class IAgent():
|
|
56 |
Returns:
|
57 |
str: The response from the agent as a string.
|
58 |
"""
|
|
|
59 |
if Args.LOGGER is None:
|
60 |
raise RuntimeError("LOGGER must be defined before querying the agent.")
|
61 |
|
62 |
separator = "=============================="
|
63 |
Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{messages}\n")
|
64 |
|
|
|
|
|
|
|
|
|
|
|
65 |
system_prompt = self.get_system_prompt()
|
66 |
-
|
67 |
-
|
|
|
68 |
|
69 |
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
|
70 |
return response
|
|
|
1 |
+
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, AIMessage
|
2 |
|
3 |
import logging
|
4 |
import os
|
|
|
13 |
def __init__(self, sys_prompt_filename, agent_preset: AgentPreset, tools: List = [], parallel_tool_calls=False):
|
14 |
self.name = self._format_name(sys_prompt_filename)
|
15 |
self.interface = agent_preset.get_interface()
|
16 |
+
self.mock = (agent_preset.get_model_name() == "groot")
|
17 |
|
18 |
# Load the system prompt from a file
|
19 |
system_prompt_path = os.path.join(os.getcwd(), "system_prompts", sys_prompt_filename)
|
|
|
38 |
cleaned_name = re.sub(r'^[^a-zA-Z]+', '', name_without_ext)
|
39 |
return cleaned_name
|
40 |
|
41 |
+
@staticmethod
|
42 |
+
def _bake_roles(messages: List[str]) -> List[AnyMessage]:
|
43 |
+
"""
|
44 |
+
Assigns roles to messages in reverse order: last message is HumanMessage,
|
45 |
+
previous is AIMessage, and so on, alternating backwards.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
messages (List[str]): List of message strings.
|
49 |
+
|
50 |
+
Returns:
|
51 |
+
List[AnyMessage]: List of messages wrapped with appropriate role classes.
|
52 |
+
|
53 |
+
Raises:
|
54 |
+
ValueError: If messages is empty.
|
55 |
+
"""
|
56 |
+
if not messages:
|
57 |
+
raise ValueError("The list of messages cannot be empty !")
|
58 |
+
messages_with_roles = []
|
59 |
+
total_messages = len(messages)
|
60 |
+
for idx, msg in enumerate(messages):
|
61 |
+
# Assign roles in reverse: last is Human, previous is AI, etc.
|
62 |
+
reverse_idx = total_messages - idx - 1
|
63 |
+
if reverse_idx % 2 == 0:
|
64 |
+
messages_with_roles.append(HumanMessage(content=msg))
|
65 |
+
else:
|
66 |
+
messages_with_roles.append(AIMessage(content=msg))
|
67 |
+
return messages_with_roles
|
68 |
+
|
69 |
def get_system_prompt(self) -> str:
|
70 |
"""
|
71 |
Retrieves the system prompt.
|
|
|
75 |
"""
|
76 |
return self.system_prompt
|
77 |
|
78 |
+
def query(self, messages: List[str]) -> str:
|
79 |
"""
|
80 |
Asynchronously queries the agent with a given question and returns the response.
|
81 |
|
|
|
85 |
Returns:
|
86 |
str: The response from the agent as a string.
|
87 |
"""
|
88 |
+
|
89 |
if Args.LOGGER is None:
|
90 |
raise RuntimeError("LOGGER must be defined before querying the agent.")
|
91 |
|
92 |
separator = "=============================="
|
93 |
Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{messages}\n")
|
94 |
|
95 |
+
if self.mock:
|
96 |
+
response = str("I am GROOT !")
|
97 |
+
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
|
98 |
+
return response
|
99 |
+
|
100 |
system_prompt = self.get_system_prompt()
|
101 |
+
messages_with_roles = self._bake_roles(messages)
|
102 |
+
conversation = [SystemMessage(content=system_prompt)] + messages_with_roles
|
103 |
+
response = str(self.model.invoke(conversation).content)
|
104 |
|
105 |
Args.LOGGER.log(logging.INFO, f"\nAgent '{self.name}' produced OUTPUT:\n{response}\n{separator}\n")
|
106 |
return response
|
llm_factory.py
CHANGED
@@ -27,12 +27,12 @@ class LLMFactory():
|
|
27 |
repeat_penalty = agent_preset.get_repeat_penalty()
|
28 |
|
29 |
kwargs = {
|
|
|
30 |
"model": model_name,
|
31 |
"base_url": Args.api_base,
|
32 |
"api_key": Args.api_key,
|
33 |
"temperature": temperature,
|
34 |
"max_completion_tokens": max_tokens,
|
35 |
-
# "presence_penalty": repeat_penalty,
|
36 |
"frequency_penalty": repeat_penalty
|
37 |
}
|
38 |
|
@@ -48,6 +48,7 @@ class LLMFactory():
|
|
48 |
repeat_penalty = agent_preset.get_repeat_penalty()
|
49 |
|
50 |
kwargs = {
|
|
|
51 |
"model": model_name,
|
52 |
"temperature": temperature,
|
53 |
"max_new_tokens": max_tokens,
|
|
|
27 |
repeat_penalty = agent_preset.get_repeat_penalty()
|
28 |
|
29 |
kwargs = {
|
30 |
+
"name": model_name,
|
31 |
"model": model_name,
|
32 |
"base_url": Args.api_base,
|
33 |
"api_key": Args.api_key,
|
34 |
"temperature": temperature,
|
35 |
"max_completion_tokens": max_tokens,
|
|
|
36 |
"frequency_penalty": repeat_penalty
|
37 |
}
|
38 |
|
|
|
48 |
repeat_penalty = agent_preset.get_repeat_penalty()
|
49 |
|
50 |
kwargs = {
|
51 |
+
"name": model_name,
|
52 |
"model": model_name,
|
53 |
"temperature": temperature,
|
54 |
"max_new_tokens": max_tokens,
|
test.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
from graph import State, Nodes, Edges
|
2 |
from graph_builder import GraphBuilder
|
3 |
|
@@ -20,17 +21,52 @@ class TestAlfredAgent(unittest.TestCase):
|
|
20 |
|
21 |
Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
|
22 |
"""
|
23 |
-
# Create an instance of Nodes class
|
24 |
nodes = Nodes()
|
25 |
|
26 |
# Create a test state
|
27 |
-
test_state = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
# Test the node function
|
30 |
print(f"Testing 'manager' node...")
|
31 |
nodes.manager_node(test_state)
|
32 |
-
|
33 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
print(f"State after node execution: {test_state}")
|
35 |
|
36 |
def test_final_answer_node(self):
|
@@ -39,17 +75,25 @@ class TestAlfredAgent(unittest.TestCase):
|
|
39 |
|
40 |
Formats and delivers the final response to the user
|
41 |
"""
|
42 |
-
# Create an instance of Nodes class
|
43 |
nodes = Nodes()
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
print(f"Testing 'final_answer' node...")
|
50 |
nodes.final_answer_node(test_state)
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
print(f"State after node execution: {test_state}")
|
54 |
|
55 |
def test_auditor_node(self):
|
@@ -58,17 +102,22 @@ class TestAlfredAgent(unittest.TestCase):
|
|
58 |
|
59 |
Reviews manager's outputs for accuracy, safety, and quality
|
60 |
"""
|
61 |
-
# Create an instance of Nodes class
|
62 |
nodes = Nodes()
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
print(f"Testing 'auditor' node...")
|
69 |
nodes.auditor_node(test_state)
|
70 |
-
|
71 |
-
|
72 |
print(f"State after node execution: {test_state}")
|
73 |
|
74 |
def test_solver_node(self):
|
@@ -77,17 +126,22 @@ class TestAlfredAgent(unittest.TestCase):
|
|
77 |
|
78 |
Central problem-solving node that coordinates with specialized experts based on task requirements
|
79 |
"""
|
80 |
-
# Create an instance of Nodes class
|
81 |
nodes = Nodes()
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
print(f"Testing 'solver' node...")
|
88 |
nodes.solver_node(test_state)
|
89 |
-
|
90 |
-
|
91 |
print(f"State after node execution: {test_state}")
|
92 |
|
93 |
def test_researcher_node(self):
|
@@ -96,17 +150,21 @@ class TestAlfredAgent(unittest.TestCase):
|
|
96 |
|
97 |
Retrieves and synthesizes information from various sources to answer knowledge-based questions
|
98 |
"""
|
99 |
-
# Create an instance of Nodes class
|
100 |
nodes = Nodes()
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
print(f"Testing 'researcher' node...")
|
107 |
nodes.researcher_node(test_state)
|
108 |
-
|
109 |
-
# TODO: Add assertions to verify the state changes
|
110 |
print(f"State after node execution: {test_state}")
|
111 |
|
112 |
def test_reasoner_node(self):
|
@@ -115,17 +173,21 @@ class TestAlfredAgent(unittest.TestCase):
|
|
115 |
|
116 |
Performs logical reasoning, inference, and step-by-step problem-solving
|
117 |
"""
|
118 |
-
# Create an instance of Nodes class
|
119 |
nodes = Nodes()
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
print(f"Testing 'reasoner' node...")
|
126 |
nodes.reasoner_node(test_state)
|
127 |
-
|
128 |
-
# TODO: Add assertions to verify the state changes
|
129 |
print(f"State after node execution: {test_state}")
|
130 |
|
131 |
def test_viewer_node(self):
|
@@ -134,17 +196,21 @@ class TestAlfredAgent(unittest.TestCase):
|
|
134 |
|
135 |
Processes, analyzes, and generates vision related information
|
136 |
"""
|
137 |
-
# Create an instance of Nodes class
|
138 |
nodes = Nodes()
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
print(f"Testing 'image_handler' node...")
|
145 |
nodes.viewer_node(test_state)
|
146 |
-
|
147 |
-
# TODO: Add assertions to verify the state changes
|
148 |
print(f"State after node execution: {test_state}")
|
149 |
|
150 |
def test_manager_edge(self):
|
@@ -153,48 +219,160 @@ class TestAlfredAgent(unittest.TestCase):
|
|
153 |
|
154 |
This edge should return one of: "solver", "auditor", "final_answer"
|
155 |
"""
|
156 |
-
# Create an instance of Edges class
|
157 |
edges = Edges()
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
print(f"Testing 'manager' conditional edge...")
|
164 |
result = edges.manager_edge(test_state)
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
def test_solver_edge(self):
|
171 |
"""
|
172 |
Test the conditional edge for solver node.
|
173 |
|
174 |
-
This edge should return one of: "manager", "researcher", "reasoner", "
|
175 |
"""
|
176 |
-
# Create an instance of Edges class
|
177 |
edges = Edges()
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
184 |
result = edges.solver_edge(test_state)
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
|
190 |
def test_full_workflow(self):
|
191 |
"""
|
192 |
Test the Alfred agent full workflow.
|
193 |
"""
|
194 |
-
#
|
|
|
195 |
print("Testing Alfred complete workflow...")
|
196 |
-
|
197 |
-
# Example test
|
198 |
# result = self.graph.invoke({"input": "Test input"})
|
199 |
# self.assertIsNotNone(result)
|
200 |
# print(f"Workflow result: {result}")
|
|
|
1 |
+
from args import Args
|
2 |
from graph import State, Nodes, Edges
|
3 |
from graph_builder import GraphBuilder
|
4 |
|
|
|
21 |
|
22 |
Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
|
23 |
"""
|
|
|
24 |
nodes = Nodes()
|
25 |
|
26 |
# Create a test state
|
27 |
+
test_state: State = {
|
28 |
+
"initial_query": "query",
|
29 |
+
"messages": ["query"], # Manager's context
|
30 |
+
"task_progress": [], # Solver's context
|
31 |
+
"audit_interval": 2,
|
32 |
+
"manager_queries": 0,
|
33 |
+
"solver_queries": 0,
|
34 |
+
"max_interactions": 4,
|
35 |
+
"max_solving_effort": 4,
|
36 |
+
"final_response": None
|
37 |
+
}
|
38 |
|
39 |
# Test the node function
|
40 |
print(f"Testing 'manager' node...")
|
41 |
nodes.manager_node(test_state)
|
42 |
+
|
43 |
+
# Assert that manager_queries has been incremented
|
44 |
+
self.assertEqual(test_state["manager_queries"], 1, "Manager queries should be incremented from 0 to 1")
|
45 |
+
# Assert that a new message has been added to the messages list
|
46 |
+
self.assertEqual(len(test_state["messages"]), 2, "Messages list should contain 2 items: the initial query and a new message from the manager node")
|
47 |
+
|
48 |
+
# Test audit interval behaviour
|
49 |
+
test_state = nodes.manager_node(test_state)
|
50 |
+
|
51 |
+
# Assert that manager_queries has been incremented
|
52 |
+
self.assertEqual(test_state["manager_queries"], 2, "Manager queries should be incremented from 1 to 2")
|
53 |
+
# Assert that a new message has been added to the messages list
|
54 |
+
self.assertEqual(len(test_state["messages"]), 2, "Messages list should contain 2 items: the initial 2 messages and no additional message as it is the audit interval")
|
55 |
+
|
56 |
+
already_tested_messages = test_state["messages"]
|
57 |
+
expected_state: State = {
|
58 |
+
"initial_query": "query",
|
59 |
+
"messages": already_tested_messages, # Manager's context
|
60 |
+
"task_progress": [test_state["messages"][-1]], # Solver's context
|
61 |
+
"audit_interval": 2,
|
62 |
+
"manager_queries": 2,
|
63 |
+
"solver_queries": 0,
|
64 |
+
"max_interactions": 4,
|
65 |
+
"max_solving_effort": 4,
|
66 |
+
"final_response": None
|
67 |
+
}
|
68 |
+
|
69 |
+
self.assertEqual(test_state, expected_state, "The state after manager node execution should match the expected state with manager_queries=2 and no additional messages added during audit interval")
|
70 |
print(f"State after node execution: {test_state}")
|
71 |
|
72 |
def test_final_answer_node(self):
|
|
|
75 |
|
76 |
Formats and delivers the final response to the user
|
77 |
"""
|
|
|
78 |
nodes = Nodes()
|
79 |
+
# Prepare a state with messages and required fields
|
80 |
+
test_state: State = {
|
81 |
+
"initial_query": "What is the capital of France?",
|
82 |
+
"messages": ["What is the capital of France?", "The capital of France is Paris."],
|
83 |
+
"task_progress": [],
|
84 |
+
"audit_interval": 2,
|
85 |
+
"manager_queries": 2,
|
86 |
+
"solver_queries": 0,
|
87 |
+
"max_interactions": 4,
|
88 |
+
"max_solving_effort": 4,
|
89 |
+
"final_response": None
|
90 |
+
}
|
91 |
print(f"Testing 'final_answer' node...")
|
92 |
nodes.final_answer_node(test_state)
|
93 |
+
# The last message should be the instruction
|
94 |
+
self.assertIn("Formulate a definitive final answer", test_state["messages"][-1])
|
95 |
+
# The final_response should be set and not None
|
96 |
+
self.assertIsNotNone(test_state["final_response"])
|
97 |
print(f"State after node execution: {test_state}")
|
98 |
|
99 |
def test_auditor_node(self):
|
|
|
102 |
|
103 |
Reviews manager's outputs for accuracy, safety, and quality
|
104 |
"""
|
|
|
105 |
nodes = Nodes()
|
106 |
+
test_state: State = {
|
107 |
+
"initial_query": "What is the capital of France?",
|
108 |
+
"messages": ["What is the capital of France?", "The capital of France is Paris."],
|
109 |
+
"task_progress": [],
|
110 |
+
"audit_interval": 2,
|
111 |
+
"manager_queries": 2,
|
112 |
+
"solver_queries": 0,
|
113 |
+
"max_interactions": 4,
|
114 |
+
"max_solving_effort": 4,
|
115 |
+
"final_response": None
|
116 |
+
}
|
117 |
print(f"Testing 'auditor' node...")
|
118 |
nodes.auditor_node(test_state)
|
119 |
+
# Auditor appends a message
|
120 |
+
self.assertGreaterEqual(len(test_state["messages"]), 3)
|
121 |
print(f"State after node execution: {test_state}")
|
122 |
|
123 |
def test_solver_node(self):
|
|
|
126 |
|
127 |
Central problem-solving node that coordinates with specialized experts based on task requirements
|
128 |
"""
|
|
|
129 |
nodes = Nodes()
|
130 |
+
test_state: State = {
|
131 |
+
"initial_query": "What is the capital of France?",
|
132 |
+
"messages": ["What is the capital of France?"],
|
133 |
+
"task_progress": ["Solve: What is the capital of France?"],
|
134 |
+
"audit_interval": 2,
|
135 |
+
"manager_queries": 1,
|
136 |
+
"solver_queries": 0,
|
137 |
+
"max_interactions": 4,
|
138 |
+
"max_solving_effort": 4,
|
139 |
+
"final_response": None
|
140 |
+
}
|
141 |
print(f"Testing 'solver' node...")
|
142 |
nodes.solver_node(test_state)
|
143 |
+
# Solver appends to task_progress
|
144 |
+
self.assertGreaterEqual(len(test_state["task_progress"]), 2)
|
145 |
print(f"State after node execution: {test_state}")
|
146 |
|
147 |
def test_researcher_node(self):
|
|
|
150 |
|
151 |
Retrieves and synthesizes information from various sources to answer knowledge-based questions
|
152 |
"""
|
|
|
153 |
nodes = Nodes()
|
154 |
+
test_state: State = {
|
155 |
+
"initial_query": "What is the capital of France?",
|
156 |
+
"messages": ["What is the capital of France?"],
|
157 |
+
"task_progress": ["Research: What is the capital of France?"],
|
158 |
+
"audit_interval": 2,
|
159 |
+
"manager_queries": 1,
|
160 |
+
"solver_queries": 0,
|
161 |
+
"max_interactions": 4,
|
162 |
+
"max_solving_effort": 4,
|
163 |
+
"final_response": None
|
164 |
+
}
|
165 |
print(f"Testing 'researcher' node...")
|
166 |
nodes.researcher_node(test_state)
|
167 |
+
self.assertGreaterEqual(len(test_state["task_progress"]), 2)
|
|
|
168 |
print(f"State after node execution: {test_state}")
|
169 |
|
170 |
def test_reasoner_node(self):
|
|
|
173 |
|
174 |
Performs logical reasoning, inference, and step-by-step problem-solving
|
175 |
"""
|
|
|
176 |
nodes = Nodes()
|
177 |
+
test_state: State = {
|
178 |
+
"initial_query": "What is the capital of France?",
|
179 |
+
"messages": ["What is the capital of France?"],
|
180 |
+
"task_progress": ["Reason: What is the capital of France?"],
|
181 |
+
"audit_interval": 2,
|
182 |
+
"manager_queries": 1,
|
183 |
+
"solver_queries": 0,
|
184 |
+
"max_interactions": 4,
|
185 |
+
"max_solving_effort": 4,
|
186 |
+
"final_response": None
|
187 |
+
}
|
188 |
print(f"Testing 'reasoner' node...")
|
189 |
nodes.reasoner_node(test_state)
|
190 |
+
self.assertGreaterEqual(len(test_state["task_progress"]), 2)
|
|
|
191 |
print(f"State after node execution: {test_state}")
|
192 |
|
193 |
def test_viewer_node(self):
|
|
|
196 |
|
197 |
Processes, analyzes, and generates vision related information
|
198 |
"""
|
|
|
199 |
nodes = Nodes()
|
200 |
+
test_state: State = {
|
201 |
+
"initial_query": "Describe the image.",
|
202 |
+
"messages": ["Describe the image."],
|
203 |
+
"task_progress": ["View: Describe the image."],
|
204 |
+
"audit_interval": 2,
|
205 |
+
"manager_queries": 1,
|
206 |
+
"solver_queries": 0,
|
207 |
+
"max_interactions": 4,
|
208 |
+
"max_solving_effort": 4,
|
209 |
+
"final_response": None
|
210 |
+
}
|
211 |
print(f"Testing 'image_handler' node...")
|
212 |
nodes.viewer_node(test_state)
|
213 |
+
self.assertGreaterEqual(len(test_state["task_progress"]), 2)
|
|
|
214 |
print(f"State after node execution: {test_state}")
|
215 |
|
216 |
def test_manager_edge(self):
|
|
|
219 |
|
220 |
This edge should return one of: "solver", "auditor", "final_answer"
|
221 |
"""
|
|
|
222 |
edges = Edges()
|
223 |
+
# Test for final_answer by FINAL ANSWER in last message
|
224 |
+
test_state: State = {
|
225 |
+
"initial_query": "Q",
|
226 |
+
"messages": ["Q", "FINAL ANSWER: Paris"],
|
227 |
+
"task_progress": [],
|
228 |
+
"audit_interval": 2,
|
229 |
+
"manager_queries": 2,
|
230 |
+
"solver_queries": 0,
|
231 |
+
"max_interactions": 4,
|
232 |
+
"max_solving_effort": 4,
|
233 |
+
"final_response": None
|
234 |
+
}
|
235 |
print(f"Testing 'manager' conditional edge...")
|
236 |
result = edges.manager_edge(test_state)
|
237 |
+
self.assertEqual(result, "final_answer")
|
238 |
+
|
239 |
+
# Test for final_answer by max_interactions
|
240 |
+
test_state2: State = {
|
241 |
+
"initial_query": "Q",
|
242 |
+
"messages": ["Q", "Some message"],
|
243 |
+
"task_progress": [],
|
244 |
+
"audit_interval": 2,
|
245 |
+
"manager_queries": 4,
|
246 |
+
"solver_queries": 0,
|
247 |
+
"max_interactions": 4,
|
248 |
+
"max_solving_effort": 4,
|
249 |
+
"final_response": None
|
250 |
+
}
|
251 |
+
result2 = edges.manager_edge(test_state2)
|
252 |
+
self.assertEqual(result2, "final_answer")
|
253 |
+
|
254 |
+
# Test for auditor
|
255 |
+
test_state3: State = {
|
256 |
+
"initial_query": "Q",
|
257 |
+
"messages": ["Q", "Some message"],
|
258 |
+
"task_progress": [],
|
259 |
+
"audit_interval": 2,
|
260 |
+
"manager_queries": 2,
|
261 |
+
"solver_queries": 0,
|
262 |
+
"max_interactions": 4,
|
263 |
+
"max_solving_effort": 4,
|
264 |
+
"final_response": None
|
265 |
+
}
|
266 |
+
result3 = edges.manager_edge(test_state3)
|
267 |
+
self.assertEqual(result3, "auditor")
|
268 |
+
|
269 |
+
# Test for solver
|
270 |
+
test_state4: State = {
|
271 |
+
"initial_query": "Q",
|
272 |
+
"messages": ["Q", "Some message"],
|
273 |
+
"task_progress": [],
|
274 |
+
"audit_interval": 2,
|
275 |
+
"manager_queries": 1,
|
276 |
+
"solver_queries": 0,
|
277 |
+
"max_interactions": 4,
|
278 |
+
"max_solving_effort": 4,
|
279 |
+
"final_response": None
|
280 |
+
}
|
281 |
+
result4 = edges.manager_edge(test_state4)
|
282 |
+
self.assertEqual(result4, "solver")
|
283 |
+
print(f"Edge decision: {result4}")
|
284 |
|
285 |
def test_solver_edge(self):
|
286 |
"""
|
287 |
Test the conditional edge for solver node.
|
288 |
|
289 |
+
This edge should return one of: "manager", "researcher", "reasoner", "viewer"
|
290 |
"""
|
|
|
291 |
edges = Edges()
|
292 |
+
# researcher
|
293 |
+
test_state: State = {
|
294 |
+
"initial_query": "Q",
|
295 |
+
"messages": ["Q"],
|
296 |
+
"task_progress": ["to: researcher"],
|
297 |
+
"audit_interval": 2,
|
298 |
+
"manager_queries": 1,
|
299 |
+
"solver_queries": 0,
|
300 |
+
"max_interactions": 4,
|
301 |
+
"max_solving_effort": 4,
|
302 |
+
"final_response": None
|
303 |
+
}
|
304 |
result = edges.solver_edge(test_state)
|
305 |
+
self.assertEqual(result, "researcher")
|
306 |
+
|
307 |
+
# reasoner
|
308 |
+
test_state2: State = {
|
309 |
+
"initial_query": "Q",
|
310 |
+
"messages": ["Q"],
|
311 |
+
"task_progress": ["to: reasoner"],
|
312 |
+
"audit_interval": 2,
|
313 |
+
"manager_queries": 1,
|
314 |
+
"solver_queries": 0,
|
315 |
+
"max_interactions": 4,
|
316 |
+
"max_solving_effort": 4,
|
317 |
+
"final_response": None
|
318 |
+
}
|
319 |
+
result2 = edges.solver_edge(test_state2)
|
320 |
+
self.assertEqual(result2, "reasoner")
|
321 |
+
|
322 |
+
# viewer
|
323 |
+
test_state3: State = {
|
324 |
+
"initial_query": "Q",
|
325 |
+
"messages": ["Q"],
|
326 |
+
"task_progress": ["to: viewer"],
|
327 |
+
"audit_interval": 2,
|
328 |
+
"manager_queries": 1,
|
329 |
+
"solver_queries": 0,
|
330 |
+
"max_interactions": 4,
|
331 |
+
"max_solving_effort": 4,
|
332 |
+
"final_response": None
|
333 |
+
}
|
334 |
+
result3 = edges.solver_edge(test_state3)
|
335 |
+
self.assertEqual(result3, "viewer")
|
336 |
+
|
337 |
+
# manager
|
338 |
+
test_state4: State = {
|
339 |
+
"initial_query": "Q",
|
340 |
+
"messages": ["Q"],
|
341 |
+
"task_progress": ["to: manager"],
|
342 |
+
"audit_interval": 2,
|
343 |
+
"manager_queries": 1,
|
344 |
+
"solver_queries": 0,
|
345 |
+
"max_interactions": 4,
|
346 |
+
"max_solving_effort": 4,
|
347 |
+
"final_response": None
|
348 |
+
}
|
349 |
+
result4 = edges.solver_edge(test_state4)
|
350 |
+
self.assertEqual(result4, "manager")
|
351 |
+
|
352 |
+
# unspecified (should append instruction and return manager)
|
353 |
+
test_state5: State = {
|
354 |
+
"initial_query": "Q",
|
355 |
+
"messages": ["Q"],
|
356 |
+
"task_progress": ["no receiver"],
|
357 |
+
"audit_interval": 2,
|
358 |
+
"manager_queries": 1,
|
359 |
+
"solver_queries": 0,
|
360 |
+
"max_interactions": 4,
|
361 |
+
"max_solving_effort": 4,
|
362 |
+
"final_response": None
|
363 |
+
}
|
364 |
+
result5 = edges.solver_edge(test_state5)
|
365 |
+
self.assertEqual(result5, "manager")
|
366 |
+
print(f"Edge decision: {result5}")
|
367 |
|
368 |
def test_full_workflow(self):
|
369 |
"""
|
370 |
Test the Alfred agent full workflow.
|
371 |
"""
|
372 |
+
# This is a placeholder for a full workflow test.
|
373 |
+
# For a real test, you would simulate the entire agent graph.
|
374 |
print("Testing Alfred complete workflow...")
|
375 |
+
# Example test (pseudo, as actual invoke may require more setup)
|
|
|
376 |
# result = self.graph.invoke({"input": "Test input"})
|
377 |
# self.assertIsNotNone(result)
|
378 |
# print(f"Workflow result: {result}")
|
toolbox.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
from duckduckgo_search import DDGS
|
2 |
import pint
|
3 |
import sympy as sp
|
@@ -9,10 +11,10 @@ class _Math:
|
|
9 |
def symbolic_calc(expression: str) -> str:
|
10 |
"""
|
11 |
Evaluates complex mathematical expressions using SymPy.
|
12 |
-
|
13 |
Args:
|
14 |
-
expression: Mathematical expression as string
|
15 |
-
|
16 |
Returns:
|
17 |
Result of the calculation
|
18 |
"""
|
@@ -28,12 +30,12 @@ class _Math:
|
|
28 |
def unit_converter(value: float, from_unit: str, to_unit: str) -> str:
|
29 |
"""
|
30 |
Converts values between different units of measurement.
|
31 |
-
|
32 |
Args:
|
33 |
-
value: The numerical value to convert
|
34 |
-
from_unit: The source unit (e.g., 'meter', 'kg', 'celsius')
|
35 |
-
to_unit: The target unit (e.g., 'feet', 'pound', 'fahrenheit')
|
36 |
-
|
37 |
Returns:
|
38 |
The converted value with appropriate units
|
39 |
"""
|
@@ -55,14 +57,27 @@ class _Math:
|
|
55 |
return f"Error in unit conversion: {str(e)}"
|
56 |
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
class _WebSearch:
|
59 |
@staticmethod
|
60 |
def duckduckgo_text_search(keywords, max_results=5) -> list[dict[str, str]]:
|
61 |
"""DuckDuckGo text search.
|
62 |
|
63 |
Args:
|
64 |
-
keywords: keywords for query.
|
65 |
-
max_results: max number of results. If None, returns results only from the first response. Defaults to 5.
|
66 |
|
67 |
Returns:
|
68 |
List of dictionaries with search results, or None if there was an error.
|
@@ -74,18 +89,17 @@ class _WebSearch:
|
|
74 |
"""
|
75 |
return DDGS().text(keywords, max_results=max_results)
|
76 |
|
77 |
-
|
78 |
@staticmethod
|
79 |
def duckduckgo_images_search(keywords, license = None, max_results=5) -> list[dict[str, str]]:
|
80 |
"""DuckDuckGo images search.
|
81 |
|
82 |
Args:
|
83 |
-
keywords: keywords for query.
|
84 |
-
license: any (All Creative Commons), Public (PublicDomain),
|
85 |
Share (Free to Share and Use), ShareCommercially (Free to Share and Use Commercially),
|
86 |
Modify (Free to Modify, Share, and Use), ModifyCommercially (Free to Modify, Share, and
|
87 |
Use Commercially). Defaults to None.
|
88 |
-
max_results: max number of results. If None, returns results only from the first response. Defaults to 5.
|
89 |
|
90 |
Returns:
|
91 |
List of dictionaries with images search results.
|
@@ -97,15 +111,14 @@ class _WebSearch:
|
|
97 |
"""
|
98 |
return DDGS().images(keywords, license_image=license, max_results=max_results)
|
99 |
|
100 |
-
|
101 |
@staticmethod
|
102 |
def duckduckgo_videos_search(keywords, license = None, max_results=5) -> list[dict[str, str]]:
|
103 |
"""DuckDuckGo videos search.
|
104 |
|
105 |
Args:
|
106 |
-
keywords: keywords for query.
|
107 |
-
license: creativeCommon, youtube. Defaults to None.
|
108 |
-
max_results: max number of results. If None, returns results only from the first response. Defaults to 5.
|
109 |
|
110 |
Returns:
|
111 |
List of dictionaries with videos search results.
|
@@ -118,16 +131,34 @@ class _WebSearch:
|
|
118 |
return DDGS().videos(keywords, license_videos=license, max_results=max_results)
|
119 |
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
class _Encryption:
|
122 |
-
|
123 |
@staticmethod
|
124 |
def ascii_encode(text: str) -> str:
|
125 |
"""
|
126 |
Converts each character in a string to its ASCII value.
|
127 |
-
|
128 |
Args:
|
129 |
-
text: The text to encode
|
130 |
-
|
131 |
Returns:
|
132 |
Space-separated ASCII values
|
133 |
"""
|
@@ -138,15 +169,15 @@ class _Encryption:
|
|
138 |
return result
|
139 |
except Exception as e:
|
140 |
return f"Error in ASCII encoding: {str(e)}"
|
141 |
-
|
142 |
@staticmethod
|
143 |
def ascii_decode(text: str) -> str:
|
144 |
"""
|
145 |
Converts space-separated ASCII values back to characters.
|
146 |
-
|
147 |
Args:
|
148 |
-
text: Space-separated ASCII values
|
149 |
-
|
150 |
Returns:
|
151 |
Decoded string
|
152 |
"""
|
@@ -162,10 +193,10 @@ class _Encryption:
|
|
162 |
def base64_encode(text: str) -> str:
|
163 |
"""
|
164 |
Encodes a string to base64.
|
165 |
-
|
166 |
Args:
|
167 |
-
text: The text to encode
|
168 |
-
|
169 |
Returns:
|
170 |
Base64 encoded string
|
171 |
"""
|
@@ -177,15 +208,15 @@ class _Encryption:
|
|
177 |
return encoded_text
|
178 |
except Exception as e:
|
179 |
return f"Error in base64 encoding: {str(e)}"
|
180 |
-
|
181 |
@staticmethod
|
182 |
def base64_decode(encoded_text: str) -> str:
|
183 |
"""
|
184 |
Decodes a base64 string to plain text.
|
185 |
-
|
186 |
Args:
|
187 |
-
encoded_text: The base64 encoded text
|
188 |
-
|
189 |
Returns:
|
190 |
Decoded string
|
191 |
"""
|
@@ -197,16 +228,16 @@ class _Encryption:
|
|
197 |
return decoded_text
|
198 |
except Exception as e:
|
199 |
return f"Error in base64 decoding: {str(e)}"
|
200 |
-
|
201 |
@staticmethod
|
202 |
def caesar_cipher_encode(text: str, shift: int) -> str:
|
203 |
"""
|
204 |
Encodes text using Caesar cipher with specified shift.
|
205 |
-
|
206 |
Args:
|
207 |
-
text: The text to encode
|
208 |
-
shift: Number of positions to shift each character
|
209 |
-
|
210 |
Returns:
|
211 |
Caesar cipher encoded string
|
212 |
"""
|
@@ -223,42 +254,42 @@ class _Encryption:
|
|
223 |
return result
|
224 |
except Exception as e:
|
225 |
return f"Error in Caesar cipher encoding: {str(e)}"
|
226 |
-
|
227 |
@classmethod
|
228 |
def caesar_cipher_decode(cls, encoded_text: str, shift: int) -> str:
|
229 |
"""
|
230 |
Decodes Caesar cipher text with specified shift.
|
231 |
-
|
232 |
Args:
|
233 |
-
encoded_text: The encoded text
|
234 |
-
shift: Number of positions the text was shifted
|
235 |
-
|
236 |
Returns:
|
237 |
Decoded string
|
238 |
"""
|
239 |
print(f"-> caesar_cipher_decode tool used (input: {encoded_text[:30]}..., shift: {shift}) !")
|
240 |
# To decode, we shift in the opposite direction
|
241 |
return cls.caesar_cipher_encode(encoded_text, -shift)
|
242 |
-
|
243 |
@classmethod
|
244 |
def caesar_cipher_brute_force(cls, text: str) -> str:
|
245 |
"""
|
246 |
Performs a brute force attack on a Caesar cipher by trying all 26 shifts.
|
247 |
-
|
248 |
Args:
|
249 |
-
text: The Caesar cipher encoded text
|
250 |
-
|
251 |
Returns:
|
252 |
All possible decoding results with their respective shifts
|
253 |
"""
|
254 |
print(f"-> caesar_cipher_brute_force tool used (input: {text[:30]}...) !")
|
255 |
results = []
|
256 |
-
|
257 |
# Try all 26 possible shifts for English alphabet
|
258 |
for shift in range(26):
|
259 |
decoded = cls.caesar_cipher_decode(text, shift)
|
260 |
results.append(f"Shift {shift}: {decoded}")
|
261 |
-
|
262 |
output = "\n".join(results)
|
263 |
return output
|
264 |
|
@@ -266,10 +297,10 @@ class _Encryption:
|
|
266 |
def reverse_string(text: str) -> str:
|
267 |
"""
|
268 |
Reverses a string.
|
269 |
-
|
270 |
Args:
|
271 |
-
text: The text to reverse
|
272 |
-
|
273 |
Returns:
|
274 |
Reversed string
|
275 |
"""
|
@@ -278,7 +309,50 @@ class _Encryption:
|
|
278 |
return reversed_text
|
279 |
|
280 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
281 |
class Toolbox:
|
282 |
-
math =
|
283 |
-
web_search =
|
284 |
-
encryption =
|
|
|
1 |
+
from langchain_core.tools.simple import Tool
|
2 |
+
|
3 |
from duckduckgo_search import DDGS
|
4 |
import pint
|
5 |
import sympy as sp
|
|
|
11 |
def symbolic_calc(expression: str) -> str:
|
12 |
"""
|
13 |
Evaluates complex mathematical expressions using SymPy.
|
14 |
+
|
15 |
Args:
|
16 |
+
expression (str): Mathematical expression as string
|
17 |
+
|
18 |
Returns:
|
19 |
Result of the calculation
|
20 |
"""
|
|
|
30 |
def unit_converter(value: float, from_unit: str, to_unit: str) -> str:
|
31 |
"""
|
32 |
Converts values between different units of measurement.
|
33 |
+
|
34 |
Args:
|
35 |
+
value (float): The numerical value to convert
|
36 |
+
from_unit (str): The source unit (e.g., 'meter', 'kg', 'celsius')
|
37 |
+
to_unit (str): The target unit (e.g., 'feet', 'pound', 'fahrenheit')
|
38 |
+
|
39 |
Returns:
|
40 |
The converted value with appropriate units
|
41 |
"""
|
|
|
57 |
return f"Error in unit conversion: {str(e)}"
|
58 |
|
59 |
|
60 |
+
class MathToolbox:
|
61 |
+
symbolic_calc = Tool(
|
62 |
+
name="symbolic_calc",
|
63 |
+
func=_Math.symbolic_calc,
|
64 |
+
description=_Math.symbolic_calc. __doc__ or ""
|
65 |
+
)
|
66 |
+
unit_converter = Tool(
|
67 |
+
name="unit_converter",
|
68 |
+
func=_Math.unit_converter,
|
69 |
+
description=_Math.unit_converter. __doc__ or ""
|
70 |
+
)
|
71 |
+
|
72 |
+
|
73 |
class _WebSearch:
|
74 |
@staticmethod
|
75 |
def duckduckgo_text_search(keywords, max_results=5) -> list[dict[str, str]]:
|
76 |
"""DuckDuckGo text search.
|
77 |
|
78 |
Args:
|
79 |
+
keywords (str): keywords for query.
|
80 |
+
max_results (int): max number of results. If None, returns results only from the first response. Defaults to 5.
|
81 |
|
82 |
Returns:
|
83 |
List of dictionaries with search results, or None if there was an error.
|
|
|
89 |
"""
|
90 |
return DDGS().text(keywords, max_results=max_results)
|
91 |
|
|
|
92 |
@staticmethod
|
93 |
def duckduckgo_images_search(keywords, license = None, max_results=5) -> list[dict[str, str]]:
|
94 |
"""DuckDuckGo images search.
|
95 |
|
96 |
Args:
|
97 |
+
keywords (str): keywords for query.
|
98 |
+
license (str|None): any (All Creative Commons), Public (PublicDomain),
|
99 |
Share (Free to Share and Use), ShareCommercially (Free to Share and Use Commercially),
|
100 |
Modify (Free to Modify, Share, and Use), ModifyCommercially (Free to Modify, Share, and
|
101 |
Use Commercially). Defaults to None.
|
102 |
+
max_results (int): max number of results. If None, returns results only from the first response. Defaults to 5.
|
103 |
|
104 |
Returns:
|
105 |
List of dictionaries with images search results.
|
|
|
111 |
"""
|
112 |
return DDGS().images(keywords, license_image=license, max_results=max_results)
|
113 |
|
|
|
114 |
@staticmethod
|
115 |
def duckduckgo_videos_search(keywords, license = None, max_results=5) -> list[dict[str, str]]:
|
116 |
"""DuckDuckGo videos search.
|
117 |
|
118 |
Args:
|
119 |
+
keywords (str): keywords for query.
|
120 |
+
license (str|None): creativeCommon, youtube. Defaults to None.
|
121 |
+
max_results (int): max number of results. If None, returns results only from the first response. Defaults to 5.
|
122 |
|
123 |
Returns:
|
124 |
List of dictionaries with videos search results.
|
|
|
131 |
return DDGS().videos(keywords, license_videos=license, max_results=max_results)
|
132 |
|
133 |
|
134 |
+
class WebSearchToolbox:
|
135 |
+
duckduckgo_text_search = Tool(
|
136 |
+
name="duckduckgo_text_search",
|
137 |
+
func=_WebSearch.duckduckgo_text_search,
|
138 |
+
description=_WebSearch.duckduckgo_text_search. __doc__ or ""
|
139 |
+
)
|
140 |
+
duckduckgo_images_search = Tool(
|
141 |
+
name="duckduckgo_images_search",
|
142 |
+
func=_WebSearch.duckduckgo_images_search,
|
143 |
+
description=_WebSearch.duckduckgo_images_search. __doc__ or ""
|
144 |
+
)
|
145 |
+
duckduckgo_videos_search = Tool(
|
146 |
+
name="duckduckgo_videos_search",
|
147 |
+
func=_WebSearch.duckduckgo_videos_search,
|
148 |
+
description=_WebSearch.duckduckgo_videos_search. __doc__ or ""
|
149 |
+
)
|
150 |
+
|
151 |
+
|
152 |
class _Encryption:
|
153 |
+
|
154 |
@staticmethod
|
155 |
def ascii_encode(text: str) -> str:
|
156 |
"""
|
157 |
Converts each character in a string to its ASCII value.
|
158 |
+
|
159 |
Args:
|
160 |
+
text (str): The text to encode
|
161 |
+
|
162 |
Returns:
|
163 |
Space-separated ASCII values
|
164 |
"""
|
|
|
169 |
return result
|
170 |
except Exception as e:
|
171 |
return f"Error in ASCII encoding: {str(e)}"
|
172 |
+
|
173 |
@staticmethod
|
174 |
def ascii_decode(text: str) -> str:
|
175 |
"""
|
176 |
Converts space-separated ASCII values back to characters.
|
177 |
+
|
178 |
Args:
|
179 |
+
text (str): Space-separated ASCII values
|
180 |
+
|
181 |
Returns:
|
182 |
Decoded string
|
183 |
"""
|
|
|
193 |
def base64_encode(text: str) -> str:
|
194 |
"""
|
195 |
Encodes a string to base64.
|
196 |
+
|
197 |
Args:
|
198 |
+
text (str): The text to encode
|
199 |
+
|
200 |
Returns:
|
201 |
Base64 encoded string
|
202 |
"""
|
|
|
208 |
return encoded_text
|
209 |
except Exception as e:
|
210 |
return f"Error in base64 encoding: {str(e)}"
|
211 |
+
|
212 |
@staticmethod
|
213 |
def base64_decode(encoded_text: str) -> str:
|
214 |
"""
|
215 |
Decodes a base64 string to plain text.
|
216 |
+
|
217 |
Args:
|
218 |
+
encoded_text (str): The base64 encoded text
|
219 |
+
|
220 |
Returns:
|
221 |
Decoded string
|
222 |
"""
|
|
|
228 |
return decoded_text
|
229 |
except Exception as e:
|
230 |
return f"Error in base64 decoding: {str(e)}"
|
231 |
+
|
232 |
@staticmethod
|
233 |
def caesar_cipher_encode(text: str, shift: int) -> str:
|
234 |
"""
|
235 |
Encodes text using Caesar cipher with specified shift.
|
236 |
+
|
237 |
Args:
|
238 |
+
text (str): The text to encode
|
239 |
+
shift (int): Number of positions to shift each character
|
240 |
+
|
241 |
Returns:
|
242 |
Caesar cipher encoded string
|
243 |
"""
|
|
|
254 |
return result
|
255 |
except Exception as e:
|
256 |
return f"Error in Caesar cipher encoding: {str(e)}"
|
257 |
+
|
258 |
@classmethod
|
259 |
def caesar_cipher_decode(cls, encoded_text: str, shift: int) -> str:
|
260 |
"""
|
261 |
Decodes Caesar cipher text with specified shift.
|
262 |
+
|
263 |
Args:
|
264 |
+
encoded_text (str): The encoded text
|
265 |
+
shift (int): Number of positions the text was shifted
|
266 |
+
|
267 |
Returns:
|
268 |
Decoded string
|
269 |
"""
|
270 |
print(f"-> caesar_cipher_decode tool used (input: {encoded_text[:30]}..., shift: {shift}) !")
|
271 |
# To decode, we shift in the opposite direction
|
272 |
return cls.caesar_cipher_encode(encoded_text, -shift)
|
273 |
+
|
274 |
@classmethod
|
275 |
def caesar_cipher_brute_force(cls, text: str) -> str:
|
276 |
"""
|
277 |
Performs a brute force attack on a Caesar cipher by trying all 26 shifts.
|
278 |
+
|
279 |
Args:
|
280 |
+
text (str): The Caesar cipher encoded text
|
281 |
+
|
282 |
Returns:
|
283 |
All possible decoding results with their respective shifts
|
284 |
"""
|
285 |
print(f"-> caesar_cipher_brute_force tool used (input: {text[:30]}...) !")
|
286 |
results = []
|
287 |
+
|
288 |
# Try all 26 possible shifts for English alphabet
|
289 |
for shift in range(26):
|
290 |
decoded = cls.caesar_cipher_decode(text, shift)
|
291 |
results.append(f"Shift {shift}: {decoded}")
|
292 |
+
|
293 |
output = "\n".join(results)
|
294 |
return output
|
295 |
|
|
|
297 |
def reverse_string(text: str) -> str:
|
298 |
"""
|
299 |
Reverses a string.
|
300 |
+
|
301 |
Args:
|
302 |
+
text (str): The text to reverse
|
303 |
+
|
304 |
Returns:
|
305 |
Reversed string
|
306 |
"""
|
|
|
309 |
return reversed_text
|
310 |
|
311 |
|
312 |
+
class EncryptionToolbox:
|
313 |
+
ascii_encode = Tool(
|
314 |
+
name="ascii_encode",
|
315 |
+
func=_Encryption.ascii_encode,
|
316 |
+
description=_Encryption.ascii_encode. __doc__ or ""
|
317 |
+
)
|
318 |
+
ascii_decode = Tool(
|
319 |
+
name="ascii_decode",
|
320 |
+
func=_Encryption.ascii_decode,
|
321 |
+
description=_Encryption.ascii_decode. __doc__ or ""
|
322 |
+
)
|
323 |
+
base64_encode = Tool(
|
324 |
+
name="base64_encode",
|
325 |
+
func=_Encryption.base64_encode,
|
326 |
+
description=_Encryption.base64_encode. __doc__ or ""
|
327 |
+
)
|
328 |
+
base64_decode = Tool(
|
329 |
+
name="base64_decode",
|
330 |
+
func=_Encryption.base64_decode,
|
331 |
+
description=_Encryption.base64_decode. __doc__ or ""
|
332 |
+
)
|
333 |
+
caesar_cipher_encode = Tool(
|
334 |
+
name="caesar_cipher_encode",
|
335 |
+
func=_Encryption.caesar_cipher_encode,
|
336 |
+
description=_Encryption.caesar_cipher_encode. __doc__ or ""
|
337 |
+
)
|
338 |
+
caesar_cipher_decode = Tool(
|
339 |
+
name="caesar_cipher_decode",
|
340 |
+
func=_Encryption.caesar_cipher_decode,
|
341 |
+
description=_Encryption.caesar_cipher_decode. __doc__ or ""
|
342 |
+
)
|
343 |
+
caesar_cipher_brute_force = Tool(
|
344 |
+
name="caesar_cipher_brute_force",
|
345 |
+
func=_Encryption.caesar_cipher_brute_force,
|
346 |
+
description=_Encryption.caesar_cipher_brute_force. __doc__ or ""
|
347 |
+
)
|
348 |
+
reverse_string = Tool(
|
349 |
+
name="reverse_string",
|
350 |
+
func=_Encryption.reverse_string,
|
351 |
+
description=_Encryption.reverse_string. __doc__ or ""
|
352 |
+
)
|
353 |
+
|
354 |
+
|
355 |
class Toolbox:
|
356 |
+
math = MathToolbox()
|
357 |
+
web_search = WebSearchToolbox()
|
358 |
+
encryption = EncryptionToolbox()
|