import os
import json
import datetime
from tempfile import TemporaryDirectory
from langchain_community.tools import TavilySearchResults
from langchain_community.agent_toolkits import FileManagementToolkit
from openai import OpenAI

def setup_tools():
    """
    注册并初始化所有的工具，返回工具列表。
    """
    # 初始化Tavily搜索引擎工具
    tavily = TavilySearchResults(max_results=5)
    tavily.description = '这是一个类似谷歌和百度的搜索引擎，支持搜索知识、天气、股票、电影、小说、百科等内容。'

    # 创建一个临时工作目录，用于文件管理
    working_directory = TemporaryDirectory()
    
    # 初始化文件管理工具，设置为写文件模式
    write_tool = FileManagementToolkit(
        root_dir=str(working_directory.name),
        selected_tools=["write_file"],
    ).get_tools()[0]
    write_tool.description = '这是一个写文件的工具'

    # 返回工具列表
    return [tavily, write_tool]

def get_prompt_template(tool_descs, tool_names):
    """
    返回用于LLM的Prompt模板。

    参数:
    - tool_descs (str): 工具的描述信息
    - tool_names (str): 工具名称的列表，使用'or'连接

    返回:
    - prompt_tpl (str): Prompt模板字符串
    """
    prompt_tpl = '''Today is {today}. Please Answer the following questions as best you can. You have access to the following tools:

{tool_descs}

These are chat history before:
{chat_history}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question

Begin!

Question: {query}
{agent_scratchpad}
'''
    return prompt_tpl

class Agent:
    """
    智能代理类，负责处理用户查询并与LLM和工具交互。
    """
    def __init__(self, llm, tools):
        """
        初始化Agent实例。

        参数:
        - llm (function): 用于调用LLM的函数
        - tools (list): 可用的工具列表
        """
        self.llm = llm
        self.tools = tools
        self.tool_names = 'or'.join([tool.name for tool in tools])
        self.tool_descs = self._generate_tool_descriptions()
        print(self.tool_descs)
        self.prompt_tpl = get_prompt_template(self.tool_descs, self.tool_names)

    def _generate_tool_descriptions(self):
        """
        生成工具的描述信息。
        """
        tool_descs = []
        for t in self.tools:
            args_desc = []
            for name, info in t.args.items():
                args_desc.append({'name': name, 'description': info.get('description', ''), 'type': info['type']})
            args_desc = json.dumps(args_desc, ensure_ascii=False)
            tool_descs.append(f'{t.name}: {t.description}, args: {args_desc}')
        return '\n'.join(tool_descs)

    def agent_execute(self, query, chat_history=[]):
        """
        执行智能代理，通过LLM思考下一步行动并调用工具进行操作。

        参数:
        - query (str): 用户输入的查询问题
        - chat_history (list): 之前的聊天历史，格式为 [(user_query, assistant_response), ...]

        返回:
        - (bool, str, list): 是否成功、最终回答、更新后的聊天历史
        """
        agent_scratchpad = ''  # agent执行过程的记录

        while True:
            # 1) 使用历史记录和当前问题生成prompt，并调用LLM生成下一步行动
            history = '\n'.join([f'Question:{his[0]}\nAnswer:{his[1]}' for his in chat_history])
            today = datetime.datetime.now().strftime('%Y-%m-%d')
            prompt = self.prompt_tpl.format(
                today=today,
                chat_history=history,
                tool_descs=self.tool_descs,
                tool_names=self.tool_names,
                query=query,
                agent_scratchpad=agent_scratchpad
            )
            print(f'\033[32m---等待LLM返回... ...\n{prompt}\n\033[0m', flush=True)
            response = self.llm(prompt, user_stop_words=['Observation:'])
            print(f'\033[34m---LLM返回---\n{response}\n---\033[34m', flush=True)

            # 2) 解析LLM的回应，获取thought、action、action input和observation
            thought_i = response.rfind('Thought:')
            final_answer_i = response.rfind('\nFinal Answer:')
            action_i = response.rfind('\nAction:')
            action_input_i = response.rfind('\nAction Input:')
            observation_i = response.rfind('\nObservation:')

            # 3) 如果得到了最终答案，返回最终结果
            if final_answer_i != -1 and thought_i < final_answer_i:
                final_answer = response[final_answer_i + len('\nFinal Answer:'):].strip()
                chat_history.append((query, final_answer))
                return True, final_answer, chat_history

            # 4) 解析action，如果格式异常，返回错误
            if not (thought_i < action_i < action_input_i):
                return False, 'LLM回复格式异常', chat_history

            # 如果没有observation部分，默认补充Observation字段
            if observation_i == -1:
                observation_i = len(response)
                response = response + '\nObservation: '

            thought = response[thought_i + len('Thought:'):action_i].strip()
            action = response[action_i + len('\nAction:'):action_input_i].strip()
            action_input = response[action_input_i + len('\nAction Input:'):observation_i].strip()

            # 5) 匹配对应的工具
            the_tool = None
            for t in self.tools:
                if t.name == action:
                    the_tool = t
                    break

            if the_tool is None:
                observation = 'the tool does not exist'
                agent_scratchpad = agent_scratchpad + response + 'Observation: ' + observation + '\n'
                continue

            # 6) 调用工具并处理结果
            try:
                action_input = json.loads(action_input)
                print("action_input: ", action_input)
                tool_ret = the_tool.invoke(input=json.dumps(action_input, ensure_ascii=False))
            except Exception as e:
                observation = f'the tool has error: {e}'
            else:
                observation = str(tool_ret)

            # 更新agent执行过程
            agent_scratchpad = agent_scratchpad + response + 'Observation: ' + observation + '\n'

    def agent_execute_with_retry(self, query, chat_history=[], retry_times=3):
        """
        带重试机制的agent执行函数，最多重试retry_times次。

        参数:
        - query (str): 用户输入的查询问题
        - chat_history (list): 之前的聊天历史
        - retry_times (int): 最大重试次数

        返回:
        - (bool, str, list): 是否成功、最终回答、更新后的聊天历史
        """
        for i in range(retry_times):
            success, result, chat_history = self.agent_execute(query, chat_history=chat_history)
            if success:
                return success, result, chat_history
        return success, result, chat_history

def llm(query, history=[], user_stop_words=[]):
    """
    调用OpenAI的聊天模型生成回答。

    参数:
    - query (str): 用户输入的查询问题
    - history (list): 聊天历史记录，格式为 [(user_query, assistant_response), ...]
    - user_stop_words (list): 用户希望模型停止生成内容的标识词列表

    返回:
    - content (str): LLM生成的回答
    """
    try:
        # 初始化OpenAI客户端，使用环境变量中的API密钥
        client = OpenAI(
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        # 构建消息历史，添加系统角色和用户历史对话
        messages = [{'role': 'system', 'content': 'You are a helpful assistant.'}]
        for hist in history:
            messages.append({'role': 'user', 'content': hist[0]})
            messages.append({'role': 'assistant', 'content': hist[1]})
        messages.append({'role': 'user', 'content': query})

        # 生成聊天完成的响应
        completion = client.chat.completions.create(
            model="qwen-plus",
            messages=messages,
            stop=user_stop_words,
        )
        print(completion)
        content = completion.choices[0].message.content
        return content

    except Exception as e:
        return str(e)

def main():
    """
    主函数，负责初始化Agent并处理用户输入。
    """
    tools = setup_tools()
    agent = Agent(llm=llm, tools=tools)
    my_history = []
    while True:
        query = input('query:')
        success, result, my_history = agent.agent_execute_with_retry(query, chat_history=my_history)
        if success:
            print(f'回答: {result}')
        else:
            print(f'出错: {result}')
        my_history = my_history[-10:]  # 保持最多10条历史记录

if __name__ == "__main__":
    main()
