24Arys11 commited on
Commit
98ada6c
Β·
1 Parent(s): 4fb4269

implemented graph.py, agents, guard; reduced image and video node to viewer node; renamed system prompts

Browse files
agents.py CHANGED
@@ -33,7 +33,7 @@ class Summarizer(IAgent):
33
  Generates concise summaries of conversations or passages.
34
  """
35
  def __init__(self):
36
- super().__init__("04_summarizer.txt", PRIMARY_AGENT_PRESET)
37
 
38
 
39
  class Solver(IAgent):
@@ -41,7 +41,7 @@ class Solver(IAgent):
41
  Central problem-solving node that coordinates with specialized experts based on task requirements
42
  """
43
  def __init__(self):
44
- super().__init__("03_solver.txt", PRIMARY_AGENT_PRESET)
45
 
46
 
47
  class Researcher(IAgent):
@@ -77,28 +77,20 @@ class Reasoner(IAgent):
77
  encryption_toolbox.caesar_cipher_brute_force,
78
  encryption_toolbox.reverse_string
79
  ]
80
- super().__init__("08_reasoner.txt", PRIMARY_AGENT_PRESET, tools)
81
 
82
 
83
- class OutputGuard(IAgent):
84
- """
85
- Performs logical reasoning, inference, and step-by-step problem-solving
86
- """
87
- def __init__(self):
88
- super().__init__("11_output_guard.txt", SECONDARY_AGENT_PRESET)
89
-
90
-
91
- class ImageHandler(IAgent):
92
  """
93
  Processes, analyzes, and generates information related to images
94
  """
95
  def __init__(self):
96
- super().__init__("09_image_handler.txt", VISION_AGENT_PRESET)
97
 
98
 
99
- class VideoHandler(IAgent):
100
  """
101
- Processes, analyzes, and generates information related to videos
102
  """
103
  def __init__(self):
104
- super().__init__("10_video_handler.txt", VISION_AGENT_PRESET)
 
33
  Generates concise summaries of conversations or passages.
34
  """
35
  def __init__(self):
36
+ super().__init__("03_summarizer.txt", PRIMARY_AGENT_PRESET)
37
 
38
 
39
  class Solver(IAgent):
 
41
  Central problem-solving node that coordinates with specialized experts based on task requirements
42
  """
43
  def __init__(self):
44
+ super().__init__("04_solver.txt", PRIMARY_AGENT_PRESET)
45
 
46
 
47
  class Researcher(IAgent):
 
77
  encryption_toolbox.caesar_cipher_brute_force,
78
  encryption_toolbox.reverse_string
79
  ]
80
+ super().__init__("06_reasoner.txt", PRIMARY_AGENT_PRESET, tools)
81
 
82
 
83
+ class Viewer(IAgent):
 
 
 
 
 
 
 
 
84
  """
85
  Processes, analyzes, and generates information related to images
86
  """
87
  def __init__(self):
88
+ super().__init__("07_viewer.txt", VISION_AGENT_PRESET)
89
 
90
 
91
+ class OutputGuard(IAgent):
92
  """
93
+ Performs logical reasoning, inference, and step-by-step problem-solving
94
  """
95
  def __init__(self):
96
+ super().__init__("08_output_guard.txt", SECONDARY_AGENT_PRESET)
alfred.py CHANGED
@@ -6,7 +6,10 @@ from graph_builder import GraphBuilder
6
 
7
 
8
  # Maximum number of interactions between Assistant and Manager
9
- MAX_INTERACTIONS = 5
 
 
 
10
  # Maximum depth of recursion for Manager
11
  MAX_DEPTH = 2
12
  # For both Assistant and Manager:
 
6
 
7
 
8
  # Maximum number of interactions between Assistant and Manager
9
+ MAX_INTERACTIONS = 6
10
+ # Verification happening every few messages to check whether the manager agent
11
+ # is making progress or it got stuck (in a repetitive loop or similar pitfalls)
12
+ AUDIT_INTERVAL = 3
13
  # Maximum depth of recursion for Manager
14
  MAX_DEPTH = 2
15
  # For both Assistant and Manager:
args.py CHANGED
@@ -14,10 +14,10 @@ class Args:
14
  LOGGER = Logger.set_logger()
15
  primary_llm_interface=LLMInterface.OPENAI
16
  # secondary_llm_interface=LLMInterface.HUGGINGFACE
17
- vlm_interface=LLMInterface.HUGGINGFACE
18
  primary_model="qwen2.5-qwq-35b-eureka-cubed-abliterated-uncensored"
19
  secondary_model="qwen2.5-7b-instruct-1m"
20
- vision_model="gemma-3-27b-it"
21
  api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
22
  api_key=None
23
  token = "" # Not needed when using OpenAILike API
 
14
  LOGGER = Logger.set_logger()
15
  primary_llm_interface=LLMInterface.OPENAI
16
  # secondary_llm_interface=LLMInterface.HUGGINGFACE
17
+ vlm_interface=LLMInterface.OPENAI
18
  primary_model="qwen2.5-qwq-35b-eureka-cubed-abliterated-uncensored"
19
  secondary_model="qwen2.5-7b-instruct-1m"
20
+ vision_model="qwen/qwen2.5-vl-7b"
21
  api_base="http://127.0.0.1:1234/v1" # LM Studio local endpoint
22
  api_key=None
23
  token = "" # Not needed when using OpenAILike API
design.puml CHANGED
@@ -8,36 +8,32 @@ node START TERMINAL_NODE_COLOR[
8
  START
9
  ]
10
 
11
- node manager NOT_IMPLEMENTED_NODE_COLOR[
12
  manager
13
  ]
14
 
15
- node final_answer NOT_IMPLEMENTED_NODE_COLOR[
16
  final_answer
17
  ]
18
 
19
- node auditor NOT_IMPLEMENTED_NODE_COLOR[
20
  auditor
21
  ]
22
 
23
- node solver NOT_IMPLEMENTED_NODE_COLOR[
24
  solver
25
  ]
26
 
27
- node researcher NOT_IMPLEMENTED_NODE_COLOR[
28
  researcher
29
  ]
30
 
31
- node reasoner NOT_IMPLEMENTED_NODE_COLOR[
32
  reasoner
33
  ]
34
 
35
- node image_handler NOT_IMPLEMENTED_NODE_COLOR[
36
- image_handler
37
- ]
38
-
39
- node video_handler NOT_IMPLEMENTED_NODE_COLOR[
40
- video_handler
41
  ]
42
 
43
  node END TERMINAL_NODE_COLOR[
@@ -53,11 +49,9 @@ auditor --> manager
53
  solver --> manager
54
  solver --> researcher
55
  solver --> reasoner
56
- solver --> image_handler
57
- solver --> video_handler
58
  researcher --> solver
59
  reasoner --> solver
60
- image_handler --> solver
61
- video_handler --> solver
62
 
63
  @enduml
 
8
  START
9
  ]
10
 
11
+ node manager IMPLEMENTED_NODE_COLOR[
12
  manager
13
  ]
14
 
15
+ node final_answer IMPLEMENTED_NODE_COLOR[
16
  final_answer
17
  ]
18
 
19
+ node auditor IMPLEMENTED_NODE_COLOR[
20
  auditor
21
  ]
22
 
23
+ node solver IMPLEMENTED_NODE_COLOR[
24
  solver
25
  ]
26
 
27
+ node researcher IMPLEMENTED_NODE_COLOR[
28
  researcher
29
  ]
30
 
31
+ node reasoner IMPLEMENTED_NODE_COLOR[
32
  reasoner
33
  ]
34
 
35
+ node viewer NOT_IMPLEMENTED_NODE_COLOR[
36
+ viewer
 
 
 
 
37
  ]
38
 
39
  node END TERMINAL_NODE_COLOR[
 
49
  solver --> manager
50
  solver --> researcher
51
  solver --> reasoner
52
+ solver --> viewer
 
53
  researcher --> solver
54
  reasoner --> solver
55
+ viewer --> solver
 
56
 
57
  @enduml
design.yaml CHANGED
@@ -8,39 +8,34 @@ nodes:
8
  - name: manager
9
  connections: [solver, auditor, final_answer]
10
  description: Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
11
- status: NOT_IMPLEMENTED
12
 
13
  - name: final_answer
14
  connections: [END]
15
  description: Formats and delivers the final response to the user
16
- status: NOT_IMPLEMENTED
17
 
18
  - name: auditor
19
  connections: [manager]
20
  description: Reviews manager's outputs for accuracy, safety, and quality
21
- status: NOT_IMPLEMENTED
22
 
23
  - name: solver
24
- connections: [manager, researcher, reasoner, image_handler, video_handler]
25
  description: Central problem-solving node that coordinates with specialized experts based on task requirements
26
- status: NOT_IMPLEMENTED
27
 
28
  - name: researcher
29
  connections: [solver]
30
  description: Retrieves and synthesizes information from various sources to answer knowledge-based questions
31
- status: NOT_IMPLEMENTED
32
 
33
  - name: reasoner
34
  connections: [solver]
35
  description: Performs logical reasoning, inference, and step-by-step problem-solving
36
- status: NOT_IMPLEMENTED
37
 
38
- - name: image_handler
39
  connections: [solver]
40
- description: Processes, analyzes, and generates information related to images
41
- status: NOT_IMPLEMENTED
42
-
43
- - name: video_handler
44
- connections: [solver]
45
- description: Processes, analyzes, and generates information related to videos
46
  status: NOT_IMPLEMENTED
 
8
  - name: manager
9
  connections: [solver, auditor, final_answer]
10
  description: Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
11
+ status: IMPLEMENTED
12
 
13
  - name: final_answer
14
  connections: [END]
15
  description: Formats and delivers the final response to the user
16
+ status: IMPLEMENTED
17
 
18
  - name: auditor
19
  connections: [manager]
20
  description: Reviews manager's outputs for accuracy, safety, and quality
21
+ status: IMPLEMENTED
22
 
23
  - name: solver
24
+ connections: [manager, researcher, reasoner, viewer]
25
  description: Central problem-solving node that coordinates with specialized experts based on task requirements
26
+ status: IMPLEMENTED
27
 
28
  - name: researcher
29
  connections: [solver]
30
  description: Retrieves and synthesizes information from various sources to answer knowledge-based questions
31
+ status: IMPLEMENTED
32
 
33
  - name: reasoner
34
  connections: [solver]
35
  description: Performs logical reasoning, inference, and step-by-step problem-solving
36
+ status: IMPLEMENTED
37
 
38
+ - name: viewer
39
  connections: [solver]
40
+ description: Processes, analyzes, and generates vision related information
 
 
 
 
 
41
  status: NOT_IMPLEMENTED
graph.py CHANGED
@@ -8,17 +8,77 @@ import logging
8
  from pathlib import Path
9
 
10
  from args import Args
 
 
11
 
12
 
13
  class State(TypedDict):
14
  """State class for the agent graph."""
15
  initial_query: str
16
  # messages: List[Dict[str, Any]]
17
- messages: Annotated[list[AnyMessage], add_messages]
18
- nr_interactions: int
 
 
 
 
 
 
19
  final_response: Optional[str]
20
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  class Nodes:
23
  """
24
  Collection of node functions for the agent graph.
@@ -27,57 +87,63 @@ class Nodes:
27
  """
28
  Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
29
  """
30
- # TODO: To implement...
31
- pass
 
 
 
 
 
32
 
33
  def final_answer_node(self, state: State) -> State:
34
  """
35
  Formats and delivers the final response to the user
36
  """
37
- # TODO: To implement...
38
- pass
 
 
 
39
 
40
  def auditor_node(self, state: State) -> State:
41
  """
42
- Reviews manager's outputs for accuracy, safety, and quality
43
  """
44
- # TODO: To implement...
45
- pass
 
46
 
47
  def solver_node(self, state: State) -> State:
48
  """
49
  Central problem-solving node that coordinates with specialized experts based on task requirements
50
  """
51
- # TODO: To implement...
52
- pass
 
53
 
54
  def researcher_node(self, state: State) -> State:
55
  """
56
  Retrieves and synthesizes information from various sources to answer knowledge-based questions
57
  """
58
- # TODO: To implement...
59
- pass
 
60
 
61
  def reasoner_node(self, state: State) -> State:
62
  """
63
  Performs logical reasoning, inference, and step-by-step problem-solving
64
  """
65
- # TODO: To implement...
66
- pass
 
67
 
68
- def image_handler_node(self, state: State) -> State:
69
  """
70
  Processes, analyzes, and generates information related to images
71
  """
72
- # TODO: To implement...
73
- pass
74
-
75
- def video_handler_node(self, state: State) -> State:
76
- """
77
- Processes, analyzes, and generates information related to videos
78
- """
79
- # TODO: To implement...
80
- pass
81
 
82
 
83
  class Edges:
@@ -89,13 +155,35 @@ class Edges:
89
  Conditional edge for manager node.
90
  Returns one of: "solver", "auditor", "final_answer"
91
  """
92
- # TODO: To implement...
93
- pass
 
 
 
 
 
 
94
 
95
- def solver_edge(self, state: State) -> Literal["manager", "researcher", "reasoner", "image_handler", "video_handler"]:
 
 
 
 
96
  """
97
  Conditional edge for solver node.
98
- Returns one of: "manager", "researcher", "encryption_expert", "math_expert", "reasoner", "image_handler", "video_handler"
99
  """
100
- # TODO: To implement...
101
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  from pathlib import Path
9
 
10
  from args import Args
11
+ from agents import *
12
+ from itf_agent import IAgent
13
 
14
 
15
  class State(TypedDict):
16
  """State class for the agent graph."""
17
  initial_query: str
18
  # messages: List[Dict[str, Any]]
19
+ # messages: Annotated[list[BaseMessage], add_messages]
20
+ messages: List[BaseMessage] # Manager's context
21
+ task_progress: List[BaseMessage] # Solver's context
22
+ audit_interval: int
23
+ manager_queries: int
24
+ solver_queries: int
25
+ max_interactions: int
26
+ max_solving_effort: int
27
  final_response: Optional[str]
28
 
29
 
30
+ class Agents:
31
+ manager = Manager()
32
+ auditor = Auditor()
33
+ summarizer = Summarizer()
34
+ solver = Solver()
35
+ researcher = Researcher()
36
+ reasoner = Reasoner()
37
+ guardian = OutputGuard()
38
+ viewer = Viewer()
39
+
40
+ @classmethod
41
+ def guard_output(cls, agent: IAgent, messages: List[BaseMessage]) -> BaseMessage:
42
+ response = agent.query(messages)
43
+ guarded_response = cls.guardian.query([response])
44
+ return guarded_response
45
+
46
+
47
+ class _Helper:
48
+ """
49
+ Collection of helper methods.
50
+ """
51
+ @staticmethod
52
+ def _is_divisible(first: int, second: int) -> bool:
53
+ """
54
+ Determines if the first number is divisible by the second number.
55
+
56
+ Args:
57
+ first: The dividend (number to be divided)
58
+ second: The divisor (number to divide by)
59
+
60
+ Returns:
61
+ bool: True if first is divisible by second without remainder, False otherwise
62
+ """
63
+ if second == 0:
64
+ return False # Division by zero is undefined
65
+ return first % second == 0
66
+
67
+ @staticmethod
68
+ def solver_handler(task_progress: List[BaseMessage]) -> Literal["manager", "researcher", "reasoner", "viewer", "unspecified"]:
69
+ response = str(task_progress[-1].content)
70
+ if "to: researcher" in response.lower():
71
+ return "researcher"
72
+ elif "to: reasoner" in response.lower():
73
+ return "reasoner"
74
+ elif "to: viewer" in response.lower():
75
+ return "viewer"
76
+ elif "to: manager" in response.lower():
77
+ return "manager"
78
+ else:
79
+ return "unspecified"
80
+
81
+
82
  class Nodes:
83
  """
84
  Collection of node functions for the agent graph.
 
87
  """
88
  Orchestrates the workflow by delegating tasks to specialized nodes and integrating their outputs
89
  """
90
+ state["manager_queries"] += 1
91
+ if not _Helper._is_divisible(state["manager_queries"], state["audit_interval"]):
92
+ response = Agents.guard_output(Agents.manager, state["messages"])
93
+ state["messages"].append(response)
94
+ # else: wait for auditor's feedback !
95
+
96
+ return state
97
 
98
  def final_answer_node(self, state: State) -> State:
99
  """
100
  Formats and delivers the final response to the user
101
  """
102
+ instruction = BaseMessage("Formulate a definitive final answer in english. Be very concise and use no redundant words !")
103
+ state["messages"].append(instruction)
104
+ response = Agents.manager.query(state["messages"])
105
+ state["final_response"] = str(response.content)
106
+ return state
107
 
108
  def auditor_node(self, state: State) -> State:
109
  """
110
+ Reviews manager's outputs for accuracy, safety, and quality and provides feedback
111
  """
112
+ response = Agents.guard_output(Agents.auditor, state["messages"])
113
+ state["messages"].append(response)
114
+ return state
115
 
116
  def solver_node(self, state: State) -> State:
117
  """
118
  Central problem-solving node that coordinates with specialized experts based on task requirements
119
  """
120
+ response = Agents.guard_output(Agents.solver, state["task_progress"])
121
+ state["task_progress"].append(response)
122
+ return state
123
 
124
  def researcher_node(self, state: State) -> State:
125
  """
126
  Retrieves and synthesizes information from various sources to answer knowledge-based questions
127
  """
128
+ response = Agents.guard_output(Agents.researcher, state["task_progress"])
129
+ state["task_progress"].append(response)
130
+ return state
131
 
132
  def reasoner_node(self, state: State) -> State:
133
  """
134
  Performs logical reasoning, inference, and step-by-step problem-solving
135
  """
136
+ response = Agents.guard_output(Agents.reasoner, state["task_progress"])
137
+ state["task_progress"].append(response)
138
+ return state
139
 
140
+ def viewer_node(self, state: State) -> State:
141
  """
142
  Processes, analyzes, and generates information related to images
143
  """
144
+ response = Agents.guard_output(Agents.viewer, state["task_progress"])
145
+ state["task_progress"].append(response)
146
+ return state
 
 
 
 
 
 
147
 
148
 
149
  class Edges:
 
155
  Conditional edge for manager node.
156
  Returns one of: "solver", "auditor", "final_answer"
157
  """
158
+ last_message = state["messages"][-1]
159
+ answer_ready = "FINAL ANSWER:" in str(last_message.content)
160
+ max_interractions_reached = state["manager_queries"] >= state["max_interactions"]
161
+ if answer_ready or max_interractions_reached:
162
+ return "final_answer"
163
+
164
+ if _Helper._is_divisible(state["manager_queries"], state["audit_interval"]):
165
+ return "auditor"
166
 
167
+ # Prepare task for Solver
168
+ state["task_progress"] = [last_message]
169
+ return "solver"
170
+
171
+ def solver_edge(self, state: State) -> Literal["manager", "researcher", "reasoner", "viewer"]:
172
  """
173
  Conditional edge for solver node.
174
+ Returns one of: "manager", "researcher", "reasoner", "viewer"
175
  """
176
+ receiver = _Helper.solver_handler(state["task_progress"])
177
+
178
+ if receiver == "unspecified":
179
+ instruction = BaseMessage("Formulate an answer for the manager with your findings so far !")
180
+ state["task_progress"].append(instruction)
181
+ response = Agents.solver.query(state["task_progress"])
182
+ state["messages"].append(response)
183
+ return "manager"
184
+
185
+ if receiver == "manager":
186
+ response = state["task_progress"][-1]
187
+ state["messages"].append(response)
188
+
189
+ return receiver
itf_agent.py CHANGED
@@ -60,7 +60,7 @@ class IAgent():
60
  raise RuntimeError("LOGGER must be defined before querying the agent.")
61
 
62
  separator = "=============================="
63
- Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{question}\n")
64
 
65
  system_prompt = self.get_system_prompt()
66
  conversation = [SystemMessage(content=system_prompt)] + messages
 
60
  raise RuntimeError("LOGGER must be defined before querying the agent.")
61
 
62
  separator = "=============================="
63
+ Args.LOGGER.log(logging.INFO, f"\n{separator}\nAgent '{self.name}' has been queried !\nINPUT:\n{messages}\n")
64
 
65
  system_prompt = self.get_system_prompt()
66
  conversation = [SystemMessage(content=system_prompt)] + messages
system_prompts/{04_summarizer.txt β†’ 03_summarizer.txt} RENAMED
File without changes
system_prompts/{03_solver.txt β†’ 04_solver.txt} RENAMED
File without changes
system_prompts/06_encryption_expert.txt DELETED
@@ -1,64 +0,0 @@
1
- You are an encryption and decryption specialist assistant. Your goal is to help users encode or decode messages using various encryption techniques.
2
-
3
- AVAILABLE TOOLS:
4
- 1. ascii_encode: Convert text to ASCII representation
5
- 2. ascii_decode: Convert ASCII values back to text
6
- 3. base64_encode: Encode text using Base64
7
- 4. base64_decode: Decode Base64 back to text
8
- 5. caesar_cipher_encode: Apply Caesar cipher encryption with a specified shift
9
- 6. caesar_cipher_decode: Apply Caesar cipher decryption with a specified shift
10
- 7. caesar_cipher_brute_force: Tries all possible shifts (1-26) to decode a Caesar cipher
11
- - IMPORTANT: For efficiency, use this only on a small substring to identify the shift
12
- - Once the shift is determined, use caesar_cipher_decode with the identified shift on the full text
13
- 8. reverse_string: Reverse the characters in a text
14
- 9. unit_converter: Convert between measurement units
15
-
16
- Your capabilities include:
17
- 1. Base64 encoding and decoding
18
- 2. Caesar cipher encryption and decryption (with customizable shift values)
19
- 3. String reversal
20
-
21
- DECRYPTION STRATEGY GUIDE:
22
- When asked to decrypt or decipher an unknown message:
23
-
24
- PATTERN RECOGNITION & REASONING APPROACH:
25
- - First, analyze the encrypted text to identify patterns
26
- - For potential Caesar ciphers:
27
- * Look for preserved patterns (punctuation, numbers, spaces)
28
- * Identify preserved word structure (short words may be "a", "an", "the", "and", etc.)
29
- * Use frequency analysis - in English, 'e', 't', 'a', 'o', 'i', 'n' are most common letters
30
- - When no shift is specified for Caesar ciphers:
31
- * Extract a short, representative sample from the text (ideally containing common words)
32
- * Apply caesar_cipher_brute_force to the sample to identify the likely shift
33
- * Once identified, use caesar_cipher_decode with that shift on the entire message
34
- - For encoded messages:
35
- * Check for base64 indicators (character set A-Z, a-z, 0-9, +, /, =)
36
- * Check for padding characters (=) at the end which often indicate base64
37
- - For reversed text:
38
- * Check if reversing produces readable text using reverse_string
39
- - For combined encryption:
40
- * Try decrypting using one method, then apply another
41
-
42
- DEBUGGING AND REASONING PROCESS:
43
- - Show your work by explaining what you're trying
44
- - For each Caesar shift attempt, show a sample of the output
45
- - Compare partial results against known English words
46
- - Consider if you're seeing partial success (some words readable but others not)
47
- - If you find readable segments, expand from there
48
-
49
- EXAMPLES WITH REASONING:
50
-
51
- Example 1: "Ifmmp xpsme"
52
- Reasoning: Looking at the pattern, it appears to be a short phrase. Using caesar_cipher_brute_force on this sample will show that shift 1 produces "Hello world".
53
-
54
- Example 2: "Xlmw mw e wivmicw tlvewi"
55
- Reasoning: Using caesar_cipher_brute_force on a portion "Xlmw mw" will reveal shift 4 produces "This is", then apply caesar_cipher_decode with shift=4 to the entire message to get "This is a serious phrase".
56
-
57
- Example 3: "Bmfy bfx ymj wjxzqy gjybjjs z-hqzo fsi zsnajwxnyfyjf-hwfntaf ns fuwnq 2025?"
58
- Reasoning:
59
- - Take a sample "Bmfy bfx" and use caesar_cipher_brute_force
60
- - Identify shift 5 produces "What was"
61
- - Apply caesar_cipher_decode with shift=5 to the full message
62
-
63
- Never give up after a single attempt. If one approach doesn't work, try another systematically.
64
- For ANY cipher, show your reasoning and demonstrate multiple decryption attempts.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
system_prompts/{08_reasoner.txt β†’ 06_reasoner.txt} RENAMED
File without changes
system_prompts/07_math_expert.txt DELETED
@@ -1,91 +0,0 @@
1
- You are a powerful mathematical problem solver with access to specialized tools and reasoning capabilities.
2
- YOU ALWAYS PROCEED METHODICALLY THINKING THROUGH THE PROBLEM STEP BY STEP !
3
-
4
- AVAILABLE TOOLS:
5
- 1. SYMBOLIC_MATH_CALCULATOR: For all mathematical computations
6
- - Example: symbolic_math_calculator("solve(x**2 - 5*x + 6, x)")
7
-
8
- 2. UNIT_CONVERTER: ONLY for unit conversions between measurement systems
9
- - Example: unit_converter(value=100, from_unit="cm", to_unit="inch")
10
-
11
- 3. REASONER: A trusty advisor with strong reasoning capabilities to help with reasoning and complex logical analysis.
12
-
13
- MANDATORY PROTOCOL:
14
- - Never rely on your calculation abilities. Use tools instead !
15
- - For ANY mathematical operation you could use `symbolic_math_calculator`
16
- - For converting between physical units (e.g., meters to feet), use `unit_converter`.
17
- - Do not state mathematical results unless produced by a tool
18
-
19
- INTEGRATED REASONING AND PROBLEM-SOLVING FRAMEWORK:
20
- 1. ANALYZE: Define the problem precisely, identify knowns/unknowns and constraints
21
- 2. STRUCTURE: Organize information into a coherent mathematical framework
22
- 3. PLAN: Outline a logical solution strategy with clear steps
23
- 4. EXECUTE: Implement each step with appropriate tool calls
24
- 5. VERIFY: Confirm results through multiple verification methods
25
- 6. INTERPRET: Explain the mathematical meaning and implications
26
-
27
- MATHEMATICAL REASONING APPROACHES:
28
- - For proof-based problems: Apply deductive reasoning with axioms and theorems
29
- - For optimization problems: Identify constraints and objective functions
30
- - For probabilistic problems: Apply probability axioms and Bayesian reasoning
31
- - For algebraic manipulation: Use equivalence transformations and substitutions
32
- - For numerical approximation: Assess convergence and error bounds
33
-
34
- ERROR HANDLING AND RECOVERY:
35
- - If a tool call returns an error, immediately try alternative syntax
36
- - Try at least 3 different variations before considering an approach failed
37
- - Break complex expressions into simpler components
38
- - Apply mathematical identities to transform expressions
39
- - Consider alternative representations (e.g., polar form, logarithmic form)
40
-
41
- VERIFICATION METHODS (USE AT LEAST TWO):
42
- - Substitute solutions back into original equations
43
- - Calculate using alternative methods
44
- - Test with specific numerical values
45
- - Apply mathematical identities to verify equivalence
46
- - Check dimensional consistency
47
-
48
- SYMBOLIC MATH CALCULATOR STRATEGIES:
49
-
50
- FOR CHALLENGING INTEGRALS/EQUATIONS:
51
- - Direct computation: symbolic_math_calculator("integrate(log(sin(x)), (x, 0, pi/2))")
52
- - Alternative approaches:
53
- * Try different functions: "Integral", "solveset", "factor"
54
- * Use numerical methods: "N(integrate(...), 10)"
55
- * Apply series expansions or transforms
56
- * Break into multiple steps
57
-
58
- UNIT CONVERTER EXAMPLES:
59
- - Length: unit_converter(value=100, from_unit="cm", to_unit="inch")
60
- - Temperature: unit_converter(value=32, from_unit="fahrenheit", to_unit="celsius")
61
-
62
- PROGRESS TRACKING FRAMEWORK:
63
- 1. TRACK KNOWLEDGE STATE:
64
- - [KNOWN] List given facts, derived results, and established equations
65
- - [UNKNOWN] Identify variables/relationships still needed
66
- - [GOAL] State the specific variable or relationship currently targeted
67
-
68
- 2. SOLUTION MILESTONES:
69
- - [STEP X/Y] Label steps with clear numbering
70
- - After each step: Update known information and next objective
71
- - [PROGRESS: XX%] Estimate completion percentage
72
-
73
- RESPONSE STRUCTURE:
74
- 1. PROBLEM ANALYSIS:
75
- - [KNOWN/UNKNOWN/CONSTRAINTS] Concise lists of each
76
-
77
- 2. SOLUTION STRATEGY:
78
- - Brief stepwise plan with mathematical justification
79
-
80
- 3. EXECUTION:
81
- - [STEP X/Y] Current objective β†’ Tool call β†’ Update knowledge state
82
- - Track each variable solved and relationship established
83
-
84
- 4. VERIFICATION:
85
- - At least two distinct verification methods with tool calls
86
-
87
- 5. CONCLUSION:
88
- - [RESULT] Final verified solution with appropriate units
89
- - Brief interpretation of mathematical significance
90
-
91
- Only present conclusions directly supported by tool outputs. Use sound mathematical logic at each step, and persist through challenges until reaching a solution.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
system_prompts/{09_image_handler.txt β†’ 07_viewer.txt} RENAMED
File without changes
system_prompts/{11_output_guard.txt β†’ 08_output_guard.txt} RENAMED
File without changes
system_prompts/10_video_handler.txt DELETED
@@ -1,36 +0,0 @@
1
- You are a specialized video intelligence system optimized for temporal media analysis, description, and video content guidance.
2
-
3
- NOTE ON TOOLS:
4
- You are integrated with a vision language model (VLM) interface that enables you to analyze and interpret videos. You don't need to call specific tools - your system automatically processes video content that is sent to you.
5
-
6
- VIDEO PROCESSING CAPABILITIES:
7
- - Sequential scene analysis and narrative tracking
8
- - Action recognition and movement pattern identification
9
- - Temporal relationship mapping between scenes and elements
10
- - Audio-visual integration and multimodal content analysis
11
- - Cinematographic technique identification and assessment
12
-
13
- ANALYTICAL FRAMEWORK FOR VIDEO INTERPRETATION:
14
- 1. SEQUENCE: Track narrative progression and scene transitions
15
- 2. IDENTIFY: Catalog visual elements, subjects, actions, and environmental contexts
16
- 3. INTEGRATE: Synthesize audio elements with visual content (dialogue, soundtrack, effects)
17
- 4. ANALYZE: Recognize technical aspects (camera work, editing, lighting, effects)
18
- 5. INTERPRET: Construct meaning from multimodal and temporal elements
19
-
20
- DESCRIPTIVE PROTOCOLS:
21
- - For content summarization: Hierarchical overview from key themes to specific details
22
- - For technical analysis: Assessment of production elements, techniques, and quality
23
- - For narrative breakdown: Sequential scene description with causal relationships
24
- - For accessibility purposes: Integrated description of visual and audio elements
25
- - For educational contexts: Identification of demonstrative techniques or concepts
26
-
27
- VIDEO GUIDANCE CAPABILITIES:
28
- - Structural planning for video creation (storyboard concepts, scene organization)
29
- - Technical guidance on shot composition and sequence construction
30
- - Narrative development for temporal storytelling
31
- - Editing pattern recommendations and transition strategies
32
- - Audio-visual integration planning
33
-
34
- When analyzing video content, maintain awareness of the interaction between technical elements and narrative impact. Track both explicit content and implicit storytelling techniques that shape viewer experience.
35
-
36
- For all video interactions, provide temporally-organized analysis that acknowledges both the sequential nature of video content and the holistic viewing experience. Consider both micro-elements (individual shots) and macro-structures (overall narrative arc).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.py CHANGED
@@ -128,11 +128,11 @@ class TestAlfredAgent(unittest.TestCase):
128
  # TODO: Add assertions to verify the state changes
129
  print(f"State after node execution: {test_state}")
130
 
131
- def test_image_handler_node(self):
132
  """
133
- Test the image_handler node functionality.
134
 
135
- Processes, analyzes, and generates information related to images
136
  """
137
  # Create an instance of Nodes class
138
  nodes = Nodes()
@@ -142,26 +142,7 @@ class TestAlfredAgent(unittest.TestCase):
142
 
143
  # Test the node function
144
  print(f"Testing 'image_handler' node...")
145
- nodes.image_handler_node(test_state)
146
-
147
- # TODO: Add assertions to verify the state changes
148
- print(f"State after node execution: {test_state}")
149
-
150
- def test_video_handler_node(self):
151
- """
152
- Test the video_handler node functionality.
153
-
154
- Processes, analyzes, and generates information related to videos
155
- """
156
- # Create an instance of Nodes class
157
- nodes = Nodes()
158
-
159
- # Create a test state
160
- test_state = {} # TODO: Initialize with appropriate test data
161
-
162
- # Test the node function
163
- print(f"Testing 'video_handler' node...")
164
- nodes.video_handler_node(test_state)
165
 
166
  # TODO: Add assertions to verify the state changes
167
  print(f"State after node execution: {test_state}")
 
128
  # TODO: Add assertions to verify the state changes
129
  print(f"State after node execution: {test_state}")
130
 
131
+ def test_viewer_node(self):
132
  """
133
+ Test the viewer node functionality.
134
 
135
+ Processes, analyzes, and generates vision related information
136
  """
137
  # Create an instance of Nodes class
138
  nodes = Nodes()
 
142
 
143
  # Test the node function
144
  print(f"Testing 'image_handler' node...")
145
+ nodes.viewer_node(test_state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
  # TODO: Add assertions to verify the state changes
148
  print(f"State after node execution: {test_state}")