agent-and-icl-demo / agent.py
Joar Paganus
updated agent
2b605ad
import inspect
import re
import json
from llama_cpp import Llama
# ------------- TOOLS / FUNCTIONS --------------
# Some of the structure of the agent have been inspired by:
# https://github.com/Pirner/zettelkasten/blob/main/main_notes/1_0_tool_calling_with_llama.py?source=post_page-----23e3d783a6d8---------------------------------------
# ------------- CONIFG ---------------------
LAST_SYSTEM_MESSAGE = """.
GENERAL BEHAVIOR:
- Answer the user’s question clearly, concisely, and in natural language.
- Always speak as a normal assistant. Do not mention tools, tool calls, or system messages.
TOOL RESULTS:
- The system message may include a <tool_results> block.
- A <tool_results> block lists one or more tool outputs in the form:
<tool_results>
- tool_name(args_dict) -> result_value
</tool_results>
INSTRUCTIONS FOR USING TOOL RESULTS:
- Treat the information inside <tool_results> as *absolute ground truth*.
- Use those results to answer the user’s latest question.
- Summarize the results naturally. Do NOT restate the log format.
- NEVER reproduce or invent <tool_results> blocks.
YOUR OUTPUT:
- Your entire reply must be ONLY natural language directed to the user.
- Under no circumstance output raw tool logs or anything resembling them.
"""
# -------------------------------------------
import inspect
import re
def function_to_json(func) -> dict:
type_map = {
str: "string",
int: "integer",
float: "number",
bool: "boolean",
list: "array",
dict: "object",
type(None): "null",
}
try:
signature = inspect.signature(func)
except ValueError as e:
raise ValueError(
f"Failed to get signature for function {func.__name__}: {str(e)}"
)
parameters = {}
for param in signature.parameters.values():
param_type = type_map.get(param.annotation, "string")
parameters[param.name] = {"type": param_type}
required = [
param.name
for param in signature.parameters.values()
if param.default == inspect._empty
]
return {
"type": "function",
"function": {
"name": func.__name__,
"description": func.__doc__ or "",
"parameters": {
"type": "object",
"properties": parameters,
"required": required,
},
},
}
def parse_tool_calls(tool_output: str):
calls = []
for match in re.finditer(r"(\w+)\((.*?)\)", tool_output, re.DOTALL):
func_name, arg_str = match.groups()
func_name = func_name.strip()
kwargs = {}
arg_str = arg_str.strip()
if arg_str:
parts = re.split(r",\s*", arg_str)
for part in parts:
if "=" not in part:
continue
key, val = part.split("=", 1)
key = key.strip()
val = val.strip().strip('"').strip("'")
try:
if "." in val:
parsed_val = float(val)
else:
parsed_val = int(val)
except ValueError:
parsed_val = val
kwargs[key] = parsed_val
calls.append((func_name, kwargs))
return calls
def add_history(user_message, history, system_message):
new_history = [{"role": "system", "content": system_message}]
if history:
for el in history:
if el["role"] == "user":
user = el["content"][0]["text"]
new_history.append({"role": "user", "content": user})
elif el["role"] == "assistant":
user = el["content"][0]["text"]
new_history.append({"role": "assistant", "content": user})
new_history.append({"role": "user", "content": user_message})
return new_history
def generate_chat(llm, messages, max_tokens=256, temperature=0.2, top_p=0.95):
completion_stream = llm.create_chat_completion(
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=True,
)
answer = ""
for chunk in completion_stream:
delta = chunk["choices"][0].get("delta", {})
token = delta.get("content", None)
if token:
answer += token
yield answer
def generate_non_stream_chat(llm, messages, max_tokens=256, temperature=0.2, top_p=0.95):
res = llm.create_chat_completion(
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
stream=False,
)
# Return just the final text
return res["choices"][0]["message"]["content"]
def select_tools_with_llm(llm, user_message: str, tools_schema: list) -> list:
tool_selection_system = f"""You are an expert in composing functions. You are given a question and a set of possible functions.
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
If none of the functions can be used, point it out. If the given question lacks the parameters required by the function, also point it out. You should only return the function call in tools call sections.
If you decide to invoke any of the function(s), you MUST put it in the format of [func_name1(params_name1=params_value1, params_name2=params_value2...), func_name2(params)]
If you call a function, you SHOULD NOT include any other text in the response.
Here is a list of functions in JSON format that you can invoke:
{json.dumps(tools_schema, indent=2)}
"""
messages = [
{"role": "system", "content": tool_selection_system},
{"role": "user", "content": user_message},
]
raw = generate_non_stream_chat(llm, messages)
return parse_tool_calls(raw), raw
def call_tools(tool_calls, tool_registry):
results = []
for func_name, kwargs in tool_calls:
func = tool_registry.get(func_name)
if func is None:
results.append(
{
"name": func_name,
"args": kwargs,
"result": f"Unknown tool '{func_name}'.",
}
)
continue
try:
res = func(**kwargs)
except Exception as e:
res = f"Error while calling {func_name}: {e}"
results.append({"name": func_name, "args": kwargs, "result": res})
return results
def respond(user_message, history, system_message, llm, tools=None):
if tools is None:
tools = []
# 1. Build Schema & Registry
tool_registry = {f.__name__: f for f in tools}
tools_schema = [function_to_json(f) for f in tools]
# 2. Let the LLM select tools based on the message
tool_calls, initial_message = select_tools_with_llm(llm, user_message, tools_schema)
# 3. Call tools if needed, otherwise respond
if tool_calls and tools:
tool_results = call_tools(tool_calls, tool_registry)
tool_info_str = "\n<tool_results>\n"
for tr in tool_results:
tool_info_str += f"- {tr['name']}({tr['args']}) -> {tr['result']}\n"
final_system_message = f"{system_message}{LAST_SYSTEM_MESSAGE} {tool_info_str}</tool_results>\n"
messages = add_history(user_message, history, final_system_message)
stream = generate_chat(llm, messages, temperature=0.7, top_p=0.95)
for out in stream:
yield out
else:
return initial_message