hadadrjt's picture
SearchGPT: Production.
bc87248
#
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
# SPDX-License-Identifier: Apache-2.0
#
import time
from assets.css.reasoning import styles
from ..response.formatter import assistant_response
from ..reasoning.interface import reasoning_interfaces
from ..reasoning.tool_reasoning import tool_reasoning
from .parser import tools_parser
from .executor import tools_call
from config import (
TOOLS_TEMPERATURE,
MAXIMUM_ITERATIONS,
MAX_RETRY_LIMIT,
ITERATION_METRICS
)
def tools_setup(server, model_name, conversation_messages, tool_definitions, search_engine):
if "current_iterations" not in ITERATION_METRICS:
ITERATION_METRICS["current_iterations"] = MAXIMUM_ITERATIONS
while ITERATION_METRICS["current_iterations"] <= MAX_RETRY_LIMIT:
for iteration_index in range(ITERATION_METRICS["current_iterations"]):
try:
if ITERATION_METRICS.get("retry_count", 0) > 0:
time.sleep(ITERATION_METRICS["retry_delays"][min(ITERATION_METRICS.get("retry_count", 0), len(ITERATION_METRICS["retry_delays"]) - 1)] * ITERATION_METRICS["backoff_multiplier"])
response = server.chat.completions.create(
model=model_name,
messages=conversation_messages,
tools=tool_definitions,
tool_choice="auto",
temperature=TOOLS_TEMPERATURE
).choices[0].message
conversation_messages.append(assistant_response(response))
if not (response.tool_calls or []):
if ITERATION_METRICS.get("logs_generator", ""):
ITERATION_METRICS["logs_generator"] = styles(ITERATION_METRICS.get("logs_generator", "").replace('<br>', '\n').strip(), expanded=False)
return conversation_messages, ITERATION_METRICS.get("logs_generator", ""), True
for tool_invocation in (response.tool_calls or []):
if tools_parser(tool_invocation.function.arguments)[1]:
ITERATION_METRICS["error_patterns"][f"{tool_invocation.function.name}_extraction"] = ITERATION_METRICS["error_patterns"].get(f"{tool_invocation.function.name}_extraction", 0) + 1
for i in range(0, len(tool_reasoning(tool_invocation.function.name, None, "error", error=tools_parser(tool_invocation.function.arguments)[1])) + 1):
ITERATION_METRICS["logs_generator"] = styles(reasoning_interfaces(tool_reasoning(tool_invocation.function.name, None, "error", error=tools_parser(tool_invocation.function.arguments)[1]), i), expanded=True)
yield ITERATION_METRICS["logs_generator"]
ITERATION_METRICS["logs_generator"] = styles(tool_reasoning(tool_invocation.function.name, None, "error", error=tools_parser(tool_invocation.function.arguments)[1]), expanded=True)
yield ITERATION_METRICS["logs_generator"]
conversation_messages.append({
"role": "tool",
"tool_call_id": tool_invocation.id,
"name": tool_invocation.function.name,
"content": tools_parser(tool_invocation.function.arguments)[1]
})
else:
for i in range(0, len(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "parsing")) + 1):
ITERATION_METRICS["logs_generator"] = styles(reasoning_interfaces(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "parsing"), i), expanded=True)
yield ITERATION_METRICS["logs_generator"]
time.sleep(ITERATION_METRICS["tools_reasoning_parsing"])
for i in range(0, len(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "executing")) + 1):
ITERATION_METRICS["logs_generator"] = styles(reasoning_interfaces(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "executing"), i), expanded=True)
yield ITERATION_METRICS["logs_generator"]
try:
conversation_messages.append({
"role": "tool",
"tool_call_id": tool_invocation.id,
"name": tool_invocation.function.name,
"content": tools_call(
search_engine,
tool_invocation.function.name,
tools_parser(tool_invocation.function.arguments)[0]
)
})
for i in range(0, len(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "completed", result=conversation_messages[-1]["content"])) + 1):
ITERATION_METRICS["logs_generator"] = styles(reasoning_interfaces(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "completed", result=conversation_messages[-1]["content"]), i), expanded=True)
yield ITERATION_METRICS["logs_generator"]
ITERATION_METRICS["logs_generator"] = styles(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "completed", result=conversation_messages[-1]["content"]), expanded=False)
yield ITERATION_METRICS["logs_generator"]
except Exception as tool_error:
ITERATION_METRICS["error_patterns"][f"{tool_invocation.function.name}_execution"] = ITERATION_METRICS["error_patterns"].get(f"{tool_invocation.function.name}_execution", 0) + 1
for i in range(0, len(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "error", error=str(tool_error))) + 1):
ITERATION_METRICS["logs_generator"] = styles(reasoning_interfaces(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "error", error=str(tool_error)), i), expanded=True)
yield ITERATION_METRICS["logs_generator"]
ITERATION_METRICS["logs_generator"] = styles(tool_reasoning(tool_invocation.function.name, tools_parser(tool_invocation.function.arguments)[0], "error", error=str(tool_error)), expanded=True)
yield ITERATION_METRICS["logs_generator"]
conversation_messages.append({
"role": "tool",
"tool_call_id": tool_invocation.id,
"name": tool_invocation.function.name,
"content": str(tool_error)
})
return conversation_messages, ITERATION_METRICS.get("logs_generator", ""), True
except Exception:
ITERATION_METRICS["failures"] = ITERATION_METRICS.get("failures", 0) + 1
ITERATION_METRICS["retry_count"] = ITERATION_METRICS.get("retry_count", 0) + 1
if ITERATION_METRICS["error_patterns"]:
if max(ITERATION_METRICS["error_patterns"].values()) > 3:
ITERATION_METRICS["current_iterations"] = min(ITERATION_METRICS["current_iterations"] + 2, MAX_RETRY_LIMIT)
else:
ITERATION_METRICS["current_iterations"] = min(ITERATION_METRICS["current_iterations"] + 1, MAX_RETRY_LIMIT)
else:
ITERATION_METRICS["current_iterations"] = min(ITERATION_METRICS["current_iterations"] + 1, MAX_RETRY_LIMIT)
if ITERATION_METRICS["current_iterations"] > ITERATION_METRICS.get("previous_iterations", 0):
for i in range(0, len(f"Retrying with increased iterations: {ITERATION_METRICS['current_iterations']} (attempt {ITERATION_METRICS.get('retry_count', 0) + 1})") + 1):
ITERATION_METRICS["logs_generator"] = styles(reasoning_interfaces(f"Retrying with increased iterations: {ITERATION_METRICS['current_iterations']} (attempt {ITERATION_METRICS.get('retry_count', 0) + 1})", i), expanded=True)
yield ITERATION_METRICS["logs_generator"]
ITERATION_METRICS["previous_iterations"] = ITERATION_METRICS["current_iterations"]
if ITERATION_METRICS["current_iterations"] >= MAX_RETRY_LIMIT:
ITERATION_METRICS["logs_generator"] = styles(f"Maximum retry limit reached after {ITERATION_METRICS.get('attempts', 0)} attempts with {ITERATION_METRICS.get('failures', 0)} failures", expanded=True)
yield ITERATION_METRICS["logs_generator"]
break
ITERATION_METRICS["success_rate"] = (ITERATION_METRICS.get("tool_results_count", 0) / max(ITERATION_METRICS.get("attempts", 1), 1)) * 100
if ITERATION_METRICS.get("logs_generator", ""):
ITERATION_METRICS["logs_generator"] = styles(ITERATION_METRICS.get("logs_generator", "").replace('<br>', '\n').strip(), expanded=False)
return conversation_messages, ITERATION_METRICS.get("logs_generator", ""), ITERATION_METRICS.get("tool_results_count", 0) > 0