
'''
agent + qwen2
react
'''
import json

from infer import get_llm_infer


def get_tool():
    tools_config = [
        {
            'function-name': 'google_search',
            'description_for_function': '谷歌搜索是一个通用搜索引擎。',
            'arguments':  {
                    'name': 'search_query',
                    'description': '搜索关键词或短语',
                    'required': True,
                    'schema': {'type': 'string'},
                },
        },
        {
            'function-name': 'open_sofware', # tool_id
            'description_for_function': 'open_sofware是一个工具，用来打开某种软件。',
            'arguments':  {
                    'name': '要打开的软件的名称',
                },
        }
    ]

    def tool(tool_id, tool_args):  # 执行逻辑

        res = {
            'google_search':'唐纳德·特朗普（Donald Trump）出生于1946年6月14日',
            "hudie_search": '蝴蝶是一种昆虫，通常以其美丽的翅膀和翅膀上的图案而闻名。'
        }
        # return res[tool_id.lower()]
        return res['google_search']

    return tool, tools_config

def get_agent():

    tool, tools_config = get_tool()
    llm_infer = get_llm_infer(model_path = r'D:\code\other\LLMs\models\DeepSeek-R1-Distill-Qwen-1.5B')

    def encode_2llmSys(tools_config):
        # 1 编码tools 逻辑
        TOOL_DESC = """{function-name}: Call this tool to interact with the {function-name} API. What is the {function-name} API useful for? {description_for_function} Parameters: {arguments} ."""
        REACT_PROMPT = """You are a helpful assistant. Answer questions using the following format:
        Question: the input question you must answer
        Thought: you should always think about what to do
        Action: the action to take, should be one of [{tool_names}]
        Action Input: the input to the action
        Observation: the result of the action,
        Thought: I now know the final answer
        Final Answer: the final answer to the original input question
        
        You have access to the following actions:{tool_descs}
        """
        # tool_descs, tool_names = [], []
        # for tool in tools_config:
        #     tool_descs.append(TOOL_DESC.format(**tool)) # 编码逻辑，
        #     tool_names.append(tool['name_for_model'])
        # tool_descs = '\n\n'.join(tool_descs)
        # tool_names = ','.join(tool_names)
        # sys_prompt = REACT_PROMPT.format(tool_descs=tool_descs, tool_names=tool_names)

        sys_prompt = "You are a helpful assistant."
        sys_prompt += "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:"
        # sys_prompt += "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n"
        sys_prompt += "\n<tools>"+json.dumps(tools_config,ensure_ascii=False) + "</tools>"
        sys_prompt += """\nAnswer questions using the following format:
        Question: the input question you should always output
        Thought: the plan to answer the questions, if there is tool_response tag, then output nothing in this filed
        Function: the function name to call, if there is tool_response tag,  then output nothing in this filed
        Arguments: the arguments of the function name, if there is tool_response tag,  then output nothing in this filed
        Observation: the result of the function from tool_response tag in input, if there is no tool_response tag,  then output nothing in this filed
        Thought2: the plan to answer the questions based on the Observation, if there is no Observation,  then output nothing in this filed
        Final Answer: the final answer to the original input question"""


        # sys_prompt += "\n以下python函数逻辑是什么"
        # sys_prompt += """
        # \ndef format_to_answer_questions():
        #     Question= "the input question you must answer" #
        #     Thought = "the plan to answer the questions"
        #     Function ="the function name to call"
        #     Arguments = "the arguments of the function name"
        #     Observation = "the result of the function from tool_response tag"
        #     Thought2 = "the plan to answer the questions based on the Observation"
        #     Final_Answer = "the final answer to the original input question"
        #     print(f'Question:{Question}')
        #     print(f'Thought:{Thought}')
        #     print(f'Function:{Function}')
        #     print(f'Arguments:{Arguments}')
        #     is_done = "输入里面是否有tool_response标签" # bool
        #     if is_done:
        #         print(f'Observation:{Observation}')
        #         print(f'Thought2:{Thought2}')
        #         print(f'Final_Answer:{Final_Answer}')
        # """





        return sys_prompt

    def decode_2tool(text):
        # llm结果解码给tool, 和编码逻辑，llm相关
        plugin_name, plugin_args = '', ''
        i = text.rfind('\nAction:')
        j = text.rfind('\nAction Input:')
        k = text.rfind('\nObservation:')
        if 0 <= i < j:  # If the text has `Action` and `Action input`,
            if k < j:  # but does not contain `Observation`,
                text = text.rstrip() + '\nObservation:'  # Add it back.
            k = text.rfind('\nObservation:')
            plugin_name = text[i + len('\nAction:'): j].strip()
            plugin_args = text[j + len('\nAction Input:'): k].strip()
            text = text[:k]
        return plugin_name, plugin_args, text


    def agent(input_str):
        # input_str = '特朗普哪一年出生的？'
        # 1 input_str和tools信息 编码进prompt
        print("1 tools信息 编码进sys_prompt")
        sys_prompt = encode_2llmSys(tools_config)
        messages = [
            {"role": "system", "content": sys_prompt},
            # {"role": "system", "content": 'You are a helpful assistant.'},
            {"role": "user", "content": input_str},
            # {"role": "assistant", "tool_calls": [{"function": {"arguments": {'x':0}, "name": plugin_name} }], },
            # {"role": "tool", "content": response_str},
        ]

        # 2 推理
        print("2 推理")
        res = llm_infer(messages=messages,tools = None)[0] #tools字段 将tools_config信息 编码进sys_prompt
        print(res)

        # 3 解码
        print("3 解码")
        plugin_name, plugin_args, text = decode_2tool(res)

        # 4 执行
        print("4 执行")
        response_str = tool(plugin_name, plugin_args) #

        # 5 再编码
        print("5 再编码")
        # prompt = prompt + f'\n{plugin_name}执行结果如下:' + response_str
        # print(prompt)
        mess = [
            {"role": "assistant", "tool_calls": [{"function": {"arguments": {'x':0}, "name": plugin_name} }], 'content':res },
            {"role": "tool", "content": response_str},
        ]
        messages.extend(mess)
        print(messages)
        # 6 再推理
        print("6 再推理")
        res = llm_infer(messages=messages,tools=None)[0]
        print(res)

        return res
    return agent

if __name__ == '__main__':
    agent = get_agent()
    input_str = '特朗普哪一天出生的？'
    # input_str = '打开pycharm软件？'
    res = agent(input_str)