24Arys11 commited on
Commit
cc6bd3b
·
1 Parent(s): d26c7f3

built management agents with llama_index, recursive self call for task breakdown, fixed LLMFactory to create LLMs from different interfaces

Browse files
Files changed (11) hide show
  1. .gitignore +3 -0
  2. alfred.py +183 -0
  3. app.py +6 -7
  4. args.py +4 -2
  5. diagrams/diagram.png +0 -0
  6. diagrams/diagram.puml +5 -5
  7. llm_factory.py +7 -6
  8. management.py +154 -0
  9. manager.py +0 -32
  10. solver.py +63 -6
  11. toolbox.py +3 -3
.gitignore CHANGED
@@ -63,6 +63,9 @@ venv.bak/
63
  # Local configuration file
64
  .env
65
 
 
 
 
66
  # Other files
67
  _to_ignore/
68
  .github/
 
63
  # Local configuration file
64
  .env
65
 
66
+ # Rejected by git
67
+ *.png
68
+
69
  # Other files
70
  _to_ignore/
71
  .github/
alfred.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langgraph.graph import END, StateGraph
2
+ from langgraph.graph.state import CompiledStateGraph
3
+
4
+ from typing import Dict, Any, TypedDict, Literal, Optional
5
+ import asyncio
6
+
7
+ from management import Manager, Assistant
8
+
9
+
10
+ # Maximum number of interactions between Assistant and Manager
11
+ MAX_INTERACTIONS = 5
12
+ # Maximum depth of recursion for Manager
13
+ MAX_DEPTH = 3
14
+ # For both Assistant and Manager:
15
+ TEMPERATURE = 0.7
16
+ MAX_TOKENS = 100
17
+
18
+
19
+ class State(TypedDict):
20
+ """State for the agent graph."""
21
+ initial_query: str
22
+ current_message: str
23
+ nr_interactions: int
24
+ final_answer: Optional[str]
25
+
26
+
27
+ class GraphBuilder:
28
+ def __init__(self):
29
+ """
30
+ Initializes the GraphBuilder.
31
+ """
32
+ self.assistant_agent = Assistant(TEMPERATURE, MAX_TOKENS)
33
+ self.manager_agent = Manager(TEMPERATURE, MAX_TOKENS, MAX_DEPTH)
34
+ self.final_answer_hint = "Final answer:"
35
+
36
+ def clear_chat_history(self):
37
+ self.assistant_agent.clear_context()
38
+ self.manager_agent.clear_context()
39
+
40
+ async def assistant_node(self, state: State) -> State:
41
+ """
42
+ Assistant agent that evaluates the query and decides whether to give a final answer
43
+ or continue the conversation with the Manager.
44
+
45
+ Uses the existing Assistant implementation.
46
+ """
47
+ response = await self.assistant_agent.query(state["current_message"])
48
+
49
+ # Check if this is a final answer
50
+ if self.final_answer_hint in response:
51
+ # Extract the text after final answer hint
52
+ state["final_answer"] = response.split(self.final_answer_hint, 1)[1].strip()
53
+
54
+ state["current_message"] = response
55
+ state["nr_interactions"] += 1
56
+
57
+ return state
58
+
59
+ async def manager_node(self, state: State) -> State:
60
+ """
61
+ Manager agent that handles the queries from the Assistant and provides responses.
62
+
63
+ Uses the existing Manager implementation.
64
+ """
65
+ response = await self.manager_agent.query(state["current_message"])
66
+
67
+ state["current_message"] = response
68
+
69
+ return state
70
+
71
+ async def final_answer_node(self, state: State) -> State:
72
+ """
73
+ Final answer node that formats and returns the final response.
74
+
75
+ If there's already a final answer in the state, it uses that.
76
+ Otherwise, it asks the assistant to formulate a final answer.
77
+ """
78
+ # If we already have a final answer, use it
79
+ if state.get("final_answer") is not None:
80
+ return state
81
+
82
+ # Otherwise, have the assistant formulate a final answer
83
+ prompt = f"Based on the conversation so far, provide a final answer to the original query:\n\n{state['initial_query']}"
84
+ state["current_message"] = prompt
85
+ response = await self.assistant_agent.query(state["current_message"])
86
+
87
+ # Format the response
88
+ if self.final_answer_hint not in response:
89
+ response = f"{self.final_answer_hint}{response}"
90
+
91
+ # Extract the text after final answer hint
92
+ state["final_answer"] = response.split(self.final_answer_hint, 1)[1].strip()
93
+
94
+ return state
95
+
96
+ def should_continue(self, state: State) -> Literal["manager", "final_answer"]:
97
+ """
98
+ Decides whether to continue to the Manager or to provide a final answer.
99
+
100
+ Returns:
101
+ "manager": If the Assistant has decided to continue the conversation
102
+ "final_answer": If the Assistant has decided to provide a final answer
103
+ """
104
+ message = state["current_message"]
105
+
106
+ if state["nr_interactions"] >= MAX_INTERACTIONS or self.final_answer_hint in message:
107
+ return "final_answer"
108
+ else:
109
+ return "manager"
110
+
111
+ def build_agent_graph(self) -> CompiledStateGraph:
112
+ """Build and return the agent graph."""
113
+ graph = StateGraph(State)
114
+
115
+ # Convert async functions to sync functions using asyncio.run
116
+ def sync_assistant_node(state: State) -> State:
117
+ return asyncio.run(self.assistant_node(state))
118
+
119
+ def sync_manager_node(state: State) -> State:
120
+ return asyncio.run(self.manager_node(state))
121
+
122
+ # Add the nodes with sync wrappers
123
+ graph.add_node("assistant", sync_assistant_node)
124
+ graph.add_node("manager", sync_manager_node)
125
+ graph.add_node("final_answer", self.final_answer_node)
126
+
127
+ # Add the edges
128
+ graph.add_edge("START", "assistant")
129
+
130
+ graph.add_conditional_edges(
131
+ "assistant",
132
+ self.should_continue,
133
+ {
134
+ "manager": "manager",
135
+ "final_answer": "final_answer"
136
+ }
137
+ )
138
+
139
+ graph.add_edge("manager", "assistant")
140
+
141
+ graph.add_edge("final_answer", END)
142
+
143
+ return graph.compile()
144
+
145
+
146
+ class Alfred:
147
+
148
+ def __init__(self):
149
+ print("Agent initialized.")
150
+ self.graph_builder = GraphBuilder()
151
+ self.agent_graph = self.graph_builder.build_agent_graph()
152
+
153
+ async def __call__(self, question: str) -> str:
154
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
155
+ result = await self.process_query(question)
156
+ response = result["final_answer"]
157
+ print(f"Agent processed the response: {response}")
158
+
159
+ return response
160
+
161
+ async def process_query(self, query: str) -> Dict[str, Any]:
162
+ """
163
+ Process a query through the agent graph.
164
+
165
+ Args:
166
+ query: The initial query to process
167
+
168
+ Returns:
169
+ The final state of the graph execution
170
+ """
171
+ initial_state: State = {
172
+ "initial_query": query,
173
+ "current_message": query,
174
+ "nr_interactions": 0,
175
+ "final_answer": None
176
+ }
177
+ self.graph_builder.clear_chat_history()
178
+
179
+ # Since agent_graph.invoke is synchronous, we don't need to await it
180
+ # But we might need to run it in an executor if it's computationally intensive
181
+ loop = asyncio.get_running_loop()
182
+ result = await loop.run_in_executor(None, self.agent_graph.invoke, initial_state)
183
+ return result
app.py CHANGED
@@ -2,7 +2,8 @@ import os
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
- from manager import Manager
 
6
 
7
 
8
  # --- Constants ---
@@ -12,7 +13,6 @@ QUESTIONS_LIMIT = 3
12
 
13
 
14
  class Application:
15
-
16
  def __init__(self):
17
  self.space_id = os.getenv("SPACE_ID")
18
  self.username = None
@@ -60,13 +60,13 @@ class Application:
60
  print("Fetched questions list is empty.")
61
  return "Fetched questions list is empty or invalid format.", None
62
  print(f"Fetched {len(questions_data)} questions.")
63
- except requests.exceptions.RequestException as e:
64
- print(f"Error fetching questions: {e}")
65
- return f"Error fetching questions: {e}", None
66
  except requests.exceptions.JSONDecodeError as e:
67
  print(f"Error decoding JSON response from questions endpoint: {e}")
68
  print(f"Response text: {response.text[:500]}")
69
  return f"Error decoding server response for questions: {e}", None
 
 
 
70
  except Exception as e:
71
  print(f"An unexpected error occurred fetching questions: {e}")
72
  return f"An unexpected error occurred fetching questions: {e}", None
@@ -190,7 +190,7 @@ class Application:
190
 
191
  # 1. Instantiate the Main Agent
192
  try:
193
- agent = Manager()
194
  except Exception as e:
195
  print(f"Error instantiating agent: {e}")
196
  return f"Error initializing agent: {e}", None
@@ -219,7 +219,6 @@ class Application:
219
 
220
 
221
  class UI:
222
-
223
  app = Application()
224
 
225
  @classmethod
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
+
6
+ from alfred import Alfred
7
 
8
 
9
  # --- Constants ---
 
13
 
14
 
15
  class Application:
 
16
  def __init__(self):
17
  self.space_id = os.getenv("SPACE_ID")
18
  self.username = None
 
60
  print("Fetched questions list is empty.")
61
  return "Fetched questions list is empty or invalid format.", None
62
  print(f"Fetched {len(questions_data)} questions.")
 
 
 
63
  except requests.exceptions.JSONDecodeError as e:
64
  print(f"Error decoding JSON response from questions endpoint: {e}")
65
  print(f"Response text: {response.text[:500]}")
66
  return f"Error decoding server response for questions: {e}", None
67
+ except requests.exceptions.RequestException as e:
68
+ print(f"Error fetching questions: {e}")
69
+ return f"Error fetching questions: {e}", None
70
  except Exception as e:
71
  print(f"An unexpected error occurred fetching questions: {e}")
72
  return f"An unexpected error occurred fetching questions: {e}", None
 
190
 
191
  # 1. Instantiate the Main Agent
192
  try:
193
+ agent = Alfred()
194
  except Exception as e:
195
  print(f"Error instantiating agent: {e}")
196
  return f"Error initializing agent: {e}", None
 
219
 
220
 
221
  class UI:
 
222
  app = Application()
223
 
224
  @classmethod
args.py CHANGED
@@ -2,7 +2,7 @@
2
  from enum import Enum
3
 
4
 
5
- class InterfaceChoice(Enum):
6
  HUGGINGFACE = "HuggingFace"
7
  OPENAILIKE = "OpenAILike"
8
  OPENAI = "OpenAI"
@@ -10,7 +10,9 @@ class InterfaceChoice(Enum):
10
 
11
 
12
  class Args:
13
- INTERFACE = InterfaceChoice.OPENAILIKE
 
 
14
  model_name="Qwen/Qwen2.5-Coder-32B-Instruct"
15
  api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
16
  api_key="api_key"
 
2
  from enum import Enum
3
 
4
 
5
+ class LLMInterface(Enum):
6
  HUGGINGFACE = "HuggingFace"
7
  OPENAILIKE = "OpenAILike"
8
  OPENAI = "OpenAI"
 
10
 
11
 
12
  class Args:
13
+ primary_llm_interface=LLMInterface.OPENAILIKE
14
+ # secondary_llm_interface=LLMInterface.HUGGINGFACE
15
+ vlm_interface=LLMInterface.HUGGINGFACE
16
  model_name="Qwen/Qwen2.5-Coder-32B-Instruct"
17
  api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
18
  api_key="api_key"
diagrams/diagram.png DELETED
Binary file (55.8 kB)
 
diagrams/diagram.puml CHANGED
@@ -11,7 +11,7 @@ hide empty members
11
  ' Agent Definitions (Use class notation for agents with tools as fields)
12
 
13
 
14
- class "Ambassador" as Ambassador <<M>> {
15
  }
16
 
17
  class "Manager" as Manager <<M>> {
@@ -63,10 +63,10 @@ class "VideoHandler" as VideoHandler {
63
  }
64
 
65
  ' Agent-to-Agent Connections
66
- Query --> Ambassador
67
- Ambassador --> Manager : request
68
- Manager --> Ambassador : solution
69
- Ambassador --> Final_Answer : problem solved
70
 
71
  Manager ..> Manager : complex task
72
  Manager ..> Solver : trivial task
 
11
  ' Agent Definitions (Use class notation for agents with tools as fields)
12
 
13
 
14
+ class "Assistant" as Assistant <<M>> {
15
  }
16
 
17
  class "Manager" as Manager <<M>> {
 
63
  }
64
 
65
  ' Agent-to-Agent Connections
66
+ Query --> Assistant
67
+ Assistant --> Manager : request
68
+ Manager --> Assistant : solution
69
+ Assistant --> Final_Answer : problem solved
70
 
71
  Manager ..> Manager : complex task
72
  Manager ..> Solver : trivial task
llm_factory.py CHANGED
@@ -1,21 +1,22 @@
1
  from llama_index.llms.openai_like import OpenAILike
2
  from llama_index.llms.openai import OpenAI
3
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
4
- from args import InterfaceChoice, Args
 
5
 
6
 
7
  class LLMFactory():
8
 
9
  @classmethod
10
- def create(cls, system_prompt, temperature = None, max_tokens = None):
11
- if Args.INTERFACE == InterfaceChoice.OPENAILIKE:
12
  return cls._openailike_create(system_prompt, temperature, max_tokens)
13
- elif Args.INTERFACE == InterfaceChoice.OPENAI:
14
  return cls._openai_create(system_prompt, temperature, max_tokens)
15
- elif Args.INTERFACE == InterfaceChoice.HUGGINGFACE:
16
  return cls._hf_create(system_prompt, temperature, max_tokens)
17
  else:
18
- raise ValueError(f"Invalid interface choice: {Args.INTERFACE}")
19
 
20
  @staticmethod
21
  def _openailike_create(system_prompt, temperature=None, max_tokens=None):
 
1
  from llama_index.llms.openai_like import OpenAILike
2
  from llama_index.llms.openai import OpenAI
3
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
4
+
5
+ from args import LLMInterface, Args
6
 
7
 
8
  class LLMFactory():
9
 
10
  @classmethod
11
+ def create(cls, interface: LLMInterface, system_prompt, temperature = None, max_tokens = None):
12
+ if interface == LLMInterface.OPENAILIKE:
13
  return cls._openailike_create(system_prompt, temperature, max_tokens)
14
+ elif interface == LLMInterface.OPENAI:
15
  return cls._openai_create(system_prompt, temperature, max_tokens)
16
+ elif interface == LLMInterface.HUGGINGFACE:
17
  return cls._hf_create(system_prompt, temperature, max_tokens)
18
  else:
19
+ raise ValueError(f"Interface '{interface}' is not supported !")
20
 
21
  @staticmethod
22
  def _openailike_create(system_prompt, temperature=None, max_tokens=None):
management.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_index.core.agent.workflow import AgentWorkflow
2
+ from llama_index.core.tools import FunctionTool
3
+ from llama_index.core.workflow import Context
4
+
5
+ from typing import List
6
+ import os
7
+
8
+ from llm_factory import LLMFactory
9
+ from solver import Solver, Summarizer
10
+ from args import Args
11
+
12
+
13
+ class Assistant:
14
+ def __init__(self, temperature, max_tokens):
15
+ system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "01_assistant.txt")
16
+ self.system_prompt = ""
17
+ with open(system_prompt_path, "r") as file:
18
+ self.system_prompt = file.read().strip()
19
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
20
+ self.agent = AgentWorkflow.setup_agent(llm=llm)
21
+ self.ctx = Context(self.agent)
22
+
23
+ async def query(self, question: str) -> str:
24
+ """
25
+ Process a user query and return a response using the agent.
26
+
27
+ Args:
28
+ question: The user's question or input text
29
+
30
+ Returns:
31
+ The agent's response as a string
32
+ """
33
+ response = await self.agent.run(question, ctx=self.ctx)
34
+ response = str(response)
35
+ return response
36
+
37
+ def clear_context(self):
38
+ """
39
+ Clears the current context of the agent, resetting any conversation history.
40
+ This is useful when starting a new conversation or when the context needs to be refreshed.
41
+ """
42
+ self.ctx = Context(self.agent)
43
+
44
+
45
+ class Manager:
46
+ def __init__(self, temperature, max_tokens, max_depth):
47
+ self.max_depth = max_depth
48
+ self.current_depth = 0
49
+
50
+ system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "02_manager.txt")
51
+ self.system_prompt = ""
52
+ with open(system_prompt_path, "r") as file:
53
+ self.system_prompt = file.read().strip()
54
+
55
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
56
+ self.agent = AgentWorkflow.from_tools_or_functions(
57
+ [
58
+ FunctionTool.from_defaults(
59
+ name="require_break_up",
60
+ description="Break a complex task into simpler subtasks. Use when a task needs to be divided into manageable parts.",
61
+ fn=self.require_break_up
62
+ ),
63
+ FunctionTool.from_defaults(
64
+ name="require_solution",
65
+ description="Request direct solutions for specific tasks. Use when a task is simple enough to be solved directly.",
66
+ fn=self.require_solution
67
+ )
68
+ ],
69
+ llm=llm
70
+ )
71
+ self.ctx = Context(self.agent)
72
+ self.solver = Solver(temperature, max_tokens)
73
+ self.summarizer = Summarizer(temperature, max_tokens)
74
+
75
+ async def query(self, question: str, remember = True) -> str:
76
+ """
77
+ Process a question using the manager agent and return a response.
78
+
79
+ Args:
80
+ question: The question or task to process
81
+ remember: Whether to maintain context between queries (default: True)
82
+
83
+ Returns:
84
+ The agent's response as a string
85
+ """
86
+ if remember:
87
+ response = await self.agent.run(question, ctx=self.ctx)
88
+ else:
89
+ response = await self.agent.run(question)
90
+ response = str(response)
91
+ return response
92
+
93
+ def clear_context(self):
94
+ """
95
+ Clears the current context of the agent, resetting any conversation history.
96
+ This is useful when starting a new conversation or when the context needs to be refreshed.
97
+ """
98
+ self.ctx = Context(self.agent)
99
+
100
+ async def require_break_up(self, tasks: List[str], try_solving = False) -> str:
101
+ """
102
+ Break down complex tasks into simpler subtasks recursively up to max_depth.
103
+
104
+ Args:
105
+ tasks: List of tasks to break down
106
+ try_solving: Whether to attempt solving tasks at max depth (default: False)
107
+
108
+ Returns:
109
+ Summarized report of the task breakdown
110
+ """
111
+ print(f"-> require_break_up tool used (input: {tasks}) !")
112
+ if not tasks:
113
+ return "Error: No tasks provided to break up. Please provide at least one task."
114
+
115
+ self.current_depth += 1
116
+
117
+ observation = ""
118
+ if self.current_depth < self.max_depth:
119
+ for task in tasks:
120
+ solution = await self.query(task, remember=False)
121
+ response = f"For task:\n\n{task}\n\nThe following break up has been provided:\n\n{solution}\n\n"
122
+ observation += response
123
+ elif try_solving:
124
+ for task in tasks:
125
+ response = await self.solver.query(task)
126
+ else:
127
+ observation = "Maximum depth for `break_up` tool has been reached ! At this point, you may try to break up the task yourself or try `require_solution`."
128
+
129
+ self.current_depth -= 1
130
+ report = await self.summarizer.query(observation.strip())
131
+ return report
132
+
133
+ async def require_solution(self, tasks: List[str]) -> str:
134
+ """
135
+ Request direct solutions for the provided tasks using the Solver.
136
+
137
+ Args:
138
+ tasks: List of tasks to solve
139
+
140
+ Returns:
141
+ Summarized report of solutions for all tasks
142
+ """
143
+ print(f"-> require_solution tool used (input: {tasks}) !")
144
+ if not tasks:
145
+ return "Error: No tasks provided to solve. Please provide at least one task."
146
+
147
+ observation = ""
148
+ for task in tasks:
149
+ solution = await self.solver.query(task)
150
+ response = f"For task:\n\n{task}\n\nThe Solver provided the solution:\n\n{solution}\n\n"
151
+ observation += response
152
+
153
+ report = await self.summarizer.query(observation.strip())
154
+ return report
manager.py DELETED
@@ -1,32 +0,0 @@
1
- import asyncio
2
- import os
3
- from llm_factory import LLMFactory
4
-
5
-
6
- # TODO: Langgraph graph that process the following:
7
- # 1. Thakes the Query Node (Start Node) and sends it to the Assistant Node (has memory - access to the conversation hystory stored in the State)
8
- # 2. The Assistant Node decides whether the querry is ready to be delivered (the solution is available in the conversation hystory, or the `MAX_DEPTH` has been reached)
9
- # - if yes: formulates a concise final answer and sends it to the Final_Answer Node (End Node)
10
- # - if no: formulates a querry (could be the first one or a follow-up) and sends it to the Manager Node (also has access to the conversation hystory)
11
- # (!) This communication happens back and forth until the querry gets solved (or up to a maximum depth defined by a `MAX_DEPTH` variable)
12
-
13
-
14
- class Manager:
15
-
16
- def __init__(self):
17
- print("Agent initialized.")
18
-
19
- async def __call__(self, question: str) -> str:
20
- print(f"Agent received question (first 50 chars): {question[:50]}...")
21
- self.final_answer = ""
22
- response = await self.query(question)
23
- print(f"Agent processed the response: {response}")
24
- return response
25
-
26
- async def query(self, question: str) -> str:
27
- # TODO
28
- pass
29
-
30
-
31
- if __name__ == "__main__":
32
- print("---__main__---")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
solver.py CHANGED
@@ -1,20 +1,70 @@
1
  from llama_index.core.agent.workflow import AgentWorkflow
2
  from llama_index.core.tools import FunctionTool
3
  from llama_index.core.workflow import Context
 
4
  import asyncio
5
  import os
 
6
  from llm_factory import LLMFactory
7
  from toolbox import Toolbox
 
8
 
9
 
10
- class MathExpert:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  def __init__(self, temperature, max_tokens):
13
  system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "06_math_expert.txt")
14
  self.system_prompt = ""
15
  with open(system_prompt_path, "r") as file:
16
  self.system_prompt = file.read().strip()
17
- llm = LLMFactory.create(self.system_prompt, temperature, max_tokens)
18
  self.agent = AgentWorkflow.from_tools_or_functions(
19
  [
20
  Toolbox.math.symbolic_calc,
@@ -32,6 +82,13 @@ class MathExpert:
32
  response = str(response)
33
  return response
34
 
 
 
 
 
 
 
 
35
 
36
  class Researcher:
37
  def __init__(self, temperature, max_tokens):
@@ -39,7 +96,7 @@ class Researcher:
39
  self.system_prompt = ""
40
  with open(system_prompt_path, "r") as file:
41
  self.system_prompt = file.read().strip()
42
- llm = LLMFactory.create(self.system_prompt, temperature, max_tokens)
43
 
44
  self.agent = AgentWorkflow.from_tools_or_functions(
45
  Toolbox.web_search.duck_duck_go_tools,
@@ -62,7 +119,7 @@ class EncryptionExpert:
62
  self.system_prompt = ""
63
  with open(system_prompt_path, "r") as file:
64
  self.system_prompt = file.read().strip()
65
- llm = LLMFactory.create(self.system_prompt, temperature, max_tokens)
66
 
67
  self.agent = AgentWorkflow.from_tools_or_functions(
68
  [
@@ -95,7 +152,7 @@ class RecursiveSolverAgent:
95
  pass
96
 
97
 
98
- class Solver:
99
 
100
  def __init__(self, temperature, max_tokens):
101
  print("Agent initialized.")
@@ -103,7 +160,7 @@ class Solver:
103
  self.system_prompt = ""
104
  with open(system_prompt_path, "r") as file:
105
  self.system_prompt = file.read().strip()
106
- llm = LLMFactory.create(self.system_prompt, temperature, max_tokens)
107
  self.agent = AgentWorkflow.from_tools_or_functions(
108
  [
109
  FunctionTool.from_defaults(self.delegate_to_math_expert),
 
1
  from llama_index.core.agent.workflow import AgentWorkflow
2
  from llama_index.core.tools import FunctionTool
3
  from llama_index.core.workflow import Context
4
+
5
  import asyncio
6
  import os
7
+
8
  from llm_factory import LLMFactory
9
  from toolbox import Toolbox
10
+ from args import Args
11
 
12
 
13
+ class Solver:
14
+ def __init__(self, temperature, max_tokens):
15
+ system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "06_math_expert.txt")
16
+ self.system_prompt = ""
17
+ with open(system_prompt_path, "r") as file:
18
+ self.system_prompt = file.read().strip()
19
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
20
+ self.agent = AgentWorkflow.from_tools_or_functions(
21
+ [
22
+ Toolbox.math.symbolic_calc,
23
+ Toolbox.math.unit_converter,
24
+ ],
25
+ llm=llm
26
+ )
27
+ self.ctx = Context(self.agent)
28
+
29
+ def get_system_prompt(self):
30
+ return self.system_prompt
31
+
32
+ async def query(self, question: str) -> str:
33
+ response = await self.agent.run(question, ctx=self.ctx)
34
+ response = str(response)
35
+ return response
36
 
37
+ def clear_context(self):
38
+ """
39
+ Clears the current context of the agent, resetting any conversation history.
40
+ This is useful when starting a new conversation or when the context needs to be refreshed.
41
+ """
42
+ self.ctx = Context(self.agent)
43
+
44
+
45
+ class Summarizer:
46
+ def __init__(self, temperature, max_tokens):
47
+ system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "01_assistant.txt")
48
+ self.system_prompt = ""
49
+ with open(system_prompt_path, "r") as file:
50
+ self.system_prompt = file.read().strip()
51
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
52
+ self.agent = AgentWorkflow.setup_agent(llm=llm)
53
+ self.ctx = Context(self.agent)
54
+
55
+ async def query(self, question: str) -> str:
56
+ response = await self.agent.run(question, ctx=self.ctx)
57
+ response = str(response)
58
+ return response
59
+
60
+
61
+ class MathExpert:
62
  def __init__(self, temperature, max_tokens):
63
  system_prompt_path = os.path.join(os.getcwd(), "system_prompts", "06_math_expert.txt")
64
  self.system_prompt = ""
65
  with open(system_prompt_path, "r") as file:
66
  self.system_prompt = file.read().strip()
67
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
68
  self.agent = AgentWorkflow.from_tools_or_functions(
69
  [
70
  Toolbox.math.symbolic_calc,
 
82
  response = str(response)
83
  return response
84
 
85
+ def clear_context(self):
86
+ """
87
+ Clears the current context of the agent, resetting any conversation history.
88
+ This is useful when starting a new conversation or when the context needs to be refreshed.
89
+ """
90
+ self.ctx = Context(self.agent)
91
+
92
 
93
  class Researcher:
94
  def __init__(self, temperature, max_tokens):
 
96
  self.system_prompt = ""
97
  with open(system_prompt_path, "r") as file:
98
  self.system_prompt = file.read().strip()
99
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
100
 
101
  self.agent = AgentWorkflow.from_tools_or_functions(
102
  Toolbox.web_search.duck_duck_go_tools,
 
119
  self.system_prompt = ""
120
  with open(system_prompt_path, "r") as file:
121
  self.system_prompt = file.read().strip()
122
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
123
 
124
  self.agent = AgentWorkflow.from_tools_or_functions(
125
  [
 
152
  pass
153
 
154
 
155
+ class Solver_2:
156
 
157
  def __init__(self, temperature, max_tokens):
158
  print("Agent initialized.")
 
160
  self.system_prompt = ""
161
  with open(system_prompt_path, "r") as file:
162
  self.system_prompt = file.read().strip()
163
+ llm = LLMFactory.create(Args.primary_llm_interface, self.system_prompt, temperature, max_tokens)
164
  self.agent = AgentWorkflow.from_tools_or_functions(
165
  [
166
  FunctionTool.from_defaults(self.delegate_to_math_expert),
toolbox.py CHANGED
@@ -1,9 +1,9 @@
1
- import pint
2
- import sympy as sp
3
-
4
  from llama_index.core.tools import FunctionTool
5
  from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
6
 
 
 
 
7
 
8
  class _Math:
9
 
 
 
 
 
1
  from llama_index.core.tools import FunctionTool
2
  from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
3
 
4
+ import pint
5
+ import sympy as sp
6
+
7
 
8
  class _Math:
9