

#     def toolcallS2n(self,user:str,delta,final_tool_calls:dict):
#         for tool_call in delta.tool_calls or []:
#             index = tool_call.index
#             if index not in final_tool_calls:
#                 final_tool_calls[index] = tool_call
#             final_tool_calls[index].function.arguments += tool_call.function.arguments
#             try:
#                 args_obj = json.loads(final_tool_calls[index].function.arguments)
#                 self.messages.append(ChatCompletionMessage(content=None, 
# refusal=None, role='assistant', audio=None, function_call=None,
#  tool_calls=[ChatCompletionMessageToolCall(id=final_tool_calls[index].id, function=Function(arguments=args_obj, name=final_tool_calls[index].function.name), type='function')]))
#                 self.addPntToolRtn(
#                     user,
#                     final_tool_calls[index].function.name,
#                     args_obj,
#                     final_tool_calls[index].id
#                 )
#             except:
#                 pass 
#             yield f'{{"tool_name":"{final_tool_calls[index].function.name}","args":{tool_call.function.arguments}}}\n'           
#     def toolcallS2(self,user:str,delta,tool_stack:list[dict]):
#         if getattr(delta, "tool_calls", None):
#             for tool_call in delta.tool_calls:
#                 tool_name = tool_call.function.name
#                 tool_args = tool_call.function.arguments
#                 tool_id = tool_call.id
#                 # print(f'{{"tool_name":"{tool_name}","args":{tool_args},"id":"{tool_id}"}}')
#                 sys.stdout.flush()
#                 if tool_name:
#                     tool_stack.append({
#                         "tool_name": tool_name,
#                         "args": tool_args,
#                         "id": tool_id
#                     })
#                 else:
#                     if not tool_stack:
#                         continue
#                     tool_stack[-1]["args"] += tool_args
#                     try:
#                         args_obj = json.loads(tool_stack[-1]["args"])
#                         self.messages.append(ChatCompletionMessage(content=None, 
# refusal=None, role='assistant', audio=None, function_call=None,
#  tool_calls=[ChatCompletionMessageToolCall(id=tool_stack[-1]["id"], function=Function(arguments=args_obj, name=tool_stack[-1]["tool_name"]), type='function')]))
#                         self.addPntToolRtn(
#                             user,
#                             tool_stack[-1]["tool_name"],
#                             args_obj,
#                             tool_stack[-1]["id"]
#                         )
#                     except:
#                         pass
#                 yield f'{{"tool_name":"{tool_name}","args":{tool_args}}}\n'
#     def assistS2(self,delta,assist_stack:list[str]):
#         partial_content = getattr(delta, "content", None)
#         if partial_content:
#             assist_stack[0]+=partial_content
#             print(f'{{"assistant_reply":"{partial_content}"}}')
#             sys.stdout.flush()
#             yield f'{{"assistant_reply":"{partial_content}"}}\n'
#     def stepMainS2(self, user: str, task: str):
#         """
#         处理流式响应的主函数，调用前先用 filterMessagesForOpenAI() 过滤掉 tool 消息。
#         """
#         try:
#             filtered_messages = self.filterMessagesForOpenAI()
#             stream = self.LLMservice.funcCallLLMS2(
#                 content="",
#                 tools=AGENT_TOOLS,
#                 messages=filtered_messages
#             )
#             # final_tool_calls:dict = {}
#             tool_stack:list[dict] = []
#             assist_stack:list[str] = [""]
#             for chunk in stream:
#                 if chunk.choices:
#                     delta = chunk.choices[0].delta
#                     yield from self.toolcallS2(user,delta,tool_stack)
#                     yield from self.assistS2(delta,assist_stack)
#             if assist_stack[0]:
#                 print(colored("[Assistant:] \n"+assist_stack[0],'yellow'))
#                 sys.stdout.flush()
#             if tool_stack:
#                 yield from self.stepMainS2(user,task)
#             yield '{"STREAMING_END":true}\n'.encode("utf-8")
#             print('{"STREAMING_END":true}')
#             sys.stdout.flush()

#         except Exception as e:
#             yield f'{{"error":"{traceback.format_exc()}"}}\n'.encode("utf-8")
#             print(f"Error in stepMainYS: {traceback.format_exc()}+\n+{e}")
#             sys.stdout.flush()
#             return
# def stepMainS1(self,user:str,task:str)->Generator[bytes,None,None]:
#     try:
#         # 过滤掉 tool 消息后再调用
#         reply = self.LLMservice.funcCallLLMS1(
#             "",
#             AGENT_TOOLS,
#             self.messages
#         )
#         if hasattr(reply, "tool_calls") and reply.tool_calls:
#             if reply.content:
#                 print(colored("WARNING, UNSENT ASSISTANT REPLY ",'yellow')+reply.content)
#             stream ={
#                 "type": "toolcalls",
#                 "content": reply.content,
#                 "toolcalls": [{
#                     "id": tool.id,
#                     "function": { 
#                         "name": tool.function.name,
#                         "arguments": tool.function.arguments
#                     }
#                     } for tool in reply.tool_calls]
#             }
#         else:
#             stream = {
#                 "type": "assistant",
#                 "content": reply.content or ""
#             }
#         yield json.dumps(stream).encode("utf-8")
        
#         if reply.tool_calls:
#             for tool_call in reply.tool_calls:
#                 tool_result = self.crudagent.router(user, tool_call.function.name, json.loads(tool_call.function.arguments))
#                 if "PERMISSION_IN_THE_LOOP" in tool_result.get("message",""):
#                     node=self.tree._findNodeById(-int(tool_result.get("success",-1)))
#                     self.tree.pendingPermissions[(user,node.id)]="pending"
#                     yield json.dumps({"type":"permission","content":"Do you agree agent " + tool_call.function.name + " on " + self.tree.pNode(node,style="ja") + "?","node id":node.id}).encode("utf-8")
#                     startTime=time.time()
#                     while True:
#                         time.sleep(5)
#                         print(self.tree.pendingPermissions)
#                         if time.time()-startTime>100:
#                             self.tree.pendingPermissions[(user,node.id)]="rejected"
#                             return#stop the whole execution since the client is persumably dead 
#                         if self.tree.pendingPermissions.get((user,node.id),"")=="resolved" or self.tree.pendingPermissions.get((user,node.id),"")=="rejected":
#                             tool_result=self.crudagent.router(user, tool_call.function.name, json.loads(tool_call.function.arguments))
#                             self.tree.pendingPermissions.pop((user,node.id))
#                             break
#                 self.addPntToolRtn(tool_result,tool_call)
#                 ytoolrtn={
#                     "type":"toolreturn",
#                     "content":tool_result
#                 }
#                 yield json.dumps(ytoolrtn).encode("utf-8")
#             yield from self.stepMainS1(user,task)
#             yield json.dumps({"type":"end"}).encode("utf-8")
#             print("STREAMING_END")
#             sys.stdout.flush()
#         return
#     except Exception as e:
#         error_response = {
#             "type":"error",
#             "content": traceback.format_exc(),
#         }
#         yield json.dumps(error_response).encode("utf-8")
#         print(f"Error in stepMainS1: {traceback.format_exc()}")
#         return
# class PrettyMessages(list):
#     def __str__(self):
#         msg = self[-1]
#         rtnstr=""
#         if msg.get('role') == 'system':
#             rtnstr=f"[System]\n{msg['content']}\n"
#         elif msg.get('role') == 'tool':
#             tool_content = msg.get('content')
#             if isinstance(tool_content, str):
#                 try:
#                     tool_content_dict = json.loads(tool_content)
#                 except json.JSONDecodeError:
#                     tool_content_dict = {
#                         "tool_name": "N/A",
#                         "message": tool_content
#                     }
#             else:
#                 tool_content_dict = tool_content
#             rtnstr=f"[Tool return] {tool_content_dict.get('tool_name', 'N/A')}:\n{escapeLn(tool_content)}\n"

#         # 其他角色默认直接json.dumps
#         elif msg.get('role').lower()=='user' or msg.get('role').lower()=='assistant':
#             rtnstr=f"[{msg.get('role')}]\n{escapeLn(msg['content'])}\n"   
#         else:
#             rtnstr=f"{escapeLn(msg)}\n"
#         return colored(rtnstr,"light_blue")

# class PrettyCompletion:
#     def __init__(self, cm: ChatCompletionMessage):
#         self.cm = cm  # 存储原 ChatCompletionMessage 对象
#     def __str__(self) -> str:
#         """
#         对 ChatCompletionMessage 对象进行可读性包装，便于在控制台查看。
#         如有 function_call/工具调用等字段，则提取并展示；否则默认显示 role 与 content。
#         """
#         lines = []
#         # 检查是否存在 function_call 字段
#         if hasattr(self.cm, "tool_calls") and self.cm.tool_calls:
#             for tool_call in self.cm.tool_calls:
#                 function_name = tool_call.function.name
#                 arguments_dict = json.loads(tool_call.function.arguments)
#                 if function_name == "editDocModel":
#                     # 如果是 editDocModel，则针对 arguments 中的 edits 进行特别格式化
#                     lines.append(colored(f"[Tool Calling]\n  Name: {function_name}", "cyan"))
#                     if "edits" in arguments_dict and isinstance(arguments_dict["edits"], list):
#                         for edit_item in arguments_dict["edits"]:
#                             start_line = edit_item.get("start_line", "N/A")
#                             end_line = edit_item.get("end_line", "N/A")
#                             content_str = edit_item.get("content", "")
#                             lines.append(colored(
#                                 f"  line {start_line} - line {end_line}:\n{content_str}\n",
#                                 "cyan"
#                             ))
#                     # 如果有其他需要展示的字段，比如 doc_int，可以在此追加
#                     for key, val in arguments_dict.items():
#                         if key != "edits":
#                             lines.append(colored(f"  {key}: {val}\n", "cyan"))
#                 else:
#                     # 默认处理方式
#                     raw_string = json.dumps(arguments_dict, ensure_ascii=False)
#                     raw_string = raw_string.replace('\\n', '\n')
#                     lines.append(colored(
#                         f"[Tool Calling]\n  Name: {function_name}\n  Arguments:{raw_string}\n",
#                         "cyan"
#                     ))

#         # 如果没有 function_call，则检查是否有普通消息
#         role = getattr(self.cm, "role", "")
#         content = getattr(self.cm, "content", "")
#         if content:
#             lines.append(colored(f"{role.capitalize()}:\n{content}\n", "yellow")) 
#         return "\n".join(lines)
    # def filterMessagesForOpenAI(self):
    #     """
    #     过滤掉 role 为 'tool' 的消息，因为下一次再向 LLM 发送对话时，
    #     OpenAI 不允许出现没有对应 tool_call 的工具响应消息。
    #     """
    #     filtered = []
    #     print(json.dumps(self.messages, indent=1, ensure_ascii=False).replace('\\n', '\n'))
    #     for msg in self.messages:
    #         if msg.get('role') in ['system', 'user', 'assistant']:
    #             filtered.append(msg)
    #     return filtered