Spaces:
Runtime error
Runtime error
File size: 4,107 Bytes
cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b fae0e51 cc6bd3b d5ce935 fae0e51 cc6bd3b fae0e51 cc6bd3b d5ce935 cc6bd3b fae0e51 cc6bd3b d5ce935 cc6bd3b e3e865e cc6bd3b d5ce935 cc6bd3b d426005 cc6bd3b d5ce935 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
from llama_index.core.tools import FunctionTool
from typing import List
from itf_agent import IAgent
from solver import Solver, Summarizer
from args import Args
class Assistant(IAgent):
def __init__(self, temperature, max_tokens):
super().__init__(temperature, max_tokens, "01_assistant.txt", Args.primary_llm_interface)
class Manager(IAgent):
def __init__(self, temperature, max_tokens, max_depth):
super().__init__(temperature, max_tokens, "02_manager.txt", Args.primary_llm_interface)
self.max_depth = max_depth
self.current_depth = 0
# We track the current query to forward it to the team when needed.
self.current_query = ""
self.solver = Solver(temperature, max_tokens)
self.summarizer = Summarizer(temperature, max_tokens)
def setup_tools(self) -> List:
return [
FunctionTool.from_defaults(
name="require_break_up",
description="Break a complex task into simpler subtasks. Use when a task needs to be divided into manageable parts.",
fn=self.require_break_up
),
FunctionTool.from_defaults(
name="require_solution",
description="Request direct solutions for specific tasks. Use when a task is simple enough to be solved directly.",
fn=self.require_solution
),
FunctionTool.from_defaults(
name="forward_query",
description="Request direct solutions for the current query. Use as a first attempt and to make the team aware of the task's context.",
fn=self.forward_query
)
]
async def query(self, question: str, has_context=True) -> str:
self.current_query = question
return await super().query(question, has_context)
async def require_break_up(self, tasks: List[str], try_solving = False) -> str:
"""
Break down complex tasks into simpler subtasks recursively up to max_depth.
Args:
tasks: List of tasks to break down
try_solving: Whether to attempt solving tasks at max depth (default: False)
Returns:
Summarized report of the task breakdown
"""
print(f"-> require_break_up tool used (input: {tasks}) !")
if not tasks:
return "Error: No tasks provided to break up. Please provide at least one task."
self.current_depth += 1
observation = ""
if self.current_depth < self.max_depth:
for task in tasks:
solution = await self.query(task, has_context=False)
response = f"For task:\n\n{task}\n\nThe following break up has been provided:\n\n{solution}\n\n"
observation += response
elif try_solving:
for task in tasks:
response = await self.solver.query(task)
else:
observation = "Maximum depth for `break_up` tool has been reached ! At this point, you may try to break up the task yourself or try `require_solution`."
self.current_depth -= 1
report = await self.summarizer.query(observation.strip())
return report
async def require_solution(self, tasks: List[str]) -> str:
"""
Request direct solutions for the provided tasks using the Solver.
Args:
tasks: List of tasks to solve
Returns:
Summarized report of solutions for all tasks
"""
print(f"-> require_solution tool used with input: {tasks} !")
if not tasks:
return "Error: No tasks provided to solve. Please provide at least one task."
observation = ""
for task in tasks:
solution = await self.solver.query(task)
observation += solution
report = await self.summarizer.query(observation.strip())
return report
async def forward_query(self) -> str:
return await self.require_solution([self.current_query])
|