import json
import asyncio
import os
from ast import literal_eval
from fastapi import APIRouter, UploadFile, Form
from fastapi.responses import StreamingResponse
from tools.tools_select import tools
from typing import List, AsyncIterable
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain.agents import create_react_agent, AgentExecutor
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate
from configs.prompt import PROMPT_TEMPLATES
from configs.setting import chat_model_name, api_key, base_url, TIME_OUT, TEMP_FILE_STORAGE_DIR
from utils.callback import CustomAsyncIteratorCallbackHandler
from db_server.base import session
from db_server.knowledge_base_repository import list_kb_from_db
from tools.code_interpreter import code_interpreter
from utils.load_docs import get_file_content

# 初始化 FastAPI 应用
chat_router = APIRouter(prefix="/chat", tags=["Chat 对话"])


def files_rag(files, uuid):
    # 定义存储路径
    kb_file_storage_path = os.path.join(TEMP_FILE_STORAGE_DIR, uuid)
    result = []
    # 确保存储目录存在
    os.makedirs(kb_file_storage_path, exist_ok=True)
    if files:
        for file in files:
            # 保存文件到指定路径
            file_path = os.path.join(kb_file_storage_path, file.filename)

            with open(file_path, "wb") as f:
                f.write(file.file.read())

    # 遍历目录中的所有文件并读取内容
    for filename in os.listdir(kb_file_storage_path):
        file_path = os.path.join(kb_file_storage_path, filename)
        if os.path.isfile(file_path):
            file_content = get_file_content(file_path)

            # 直接将路径和内容拼接到结果字符串
            result.append(f"document:.{file_path}\ncontent:{file_content}\n")
    return "".join(result)


@chat_router.post("/agent_chat")
async def agent_chat(
        files: List[UploadFile] = None,
        query: str = Form(..., description="用户输入"),
        sys_prompt: str = Form("You are a helpful assistant.", description="系统提示"),
        history_len: int = Form(-1, description="保留历史消息的数量"),
        history: List[str] = Form([], description="历史对话"),
        temperature: float = Form(0.5, description="LLM采样温度"),
        max_tokens: int = Form(1024, description="LLM最大token数配置"),
        session_id: str = Form(None, description="会话标识"),
):
    documents = files_rag(files, session_id)
    history = [literal_eval(item) for item in history]
    # 控制历史记录长度，保留指定数量的消息
    histories = ""
    if history_len > 0:
        history = history[-2 * history_len:]
    
    # 在消息列表中加入历史消息
    for msg in history:
        role = msg.get('role')
        content = msg.get('content')
        histories += f"{role}:{content}\n\n"

    async def agent_chat_iterator() -> AsyncIterable[str]:
        # 使用自己定义的回调处理函数
        callback = CustomAsyncIteratorCallbackHandler()
        callbacks = [callback]

        # 定义聊天模型
        chat_model = ChatOpenAI(
            model=chat_model_name,
            api_key=api_key,
            base_url=base_url,
            temperature=temperature,
            max_tokens=max_tokens,
            streaming=True,
            callbacks=callbacks
        )

        # 定义系统提示，用于设置行为规则
        system_prompt = SystemMessagePromptTemplate.from_template(
            sys_prompt
        )

        # 定义人类提示，用于用户输入
        human_prompt = HumanMessagePromptTemplate.from_template(
            PROMPT_TEMPLATES["agent"]
        )
        # 组装成完整的 ChatPromptTemplate
        chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])

        # 创建代理
        agent = create_react_agent(chat_model, tools, chat_prompt, stop_sequence=["\nObserv"])
        agent_executor = AgentExecutor.from_agent_and_tools(
            agent=agent,
            tools=tools,
            verbose=True,
        )

        # 列出所有的知识库
        knowledgebases = list_kb_from_db(session)
        # 整理成字符串
        kbs = "".join([f"{kb['kb_name']} - {kb['kb_info']} \n" for kb in knowledgebases])
        code_interpreter.output_files, code_interpreter.output_codes = "", ""
        # 开启异步任务执行agent
        task = asyncio.create_task(
            agent_executor.acall(
                inputs={"input": query, "history": histories, "knowledgebases": kbs, "documents": documents})
        )

        # 流式输出
        async for token in callback.aiter():
            response_data = {
                "answer": token,
            }
            yield json.dumps(response_data).encode('utf-8')

        await asyncio.wait_for(task, TIME_OUT)
        output_files, output_codes = code_interpreter.get_outputs()

        # 最后将输出文件列表发送
        if output_files:
            yield json.dumps({"answer": f'\n\n{output_codes}\n\n![](http://localhost:6605{output_files})'}).encode(
                'utf-8')
        else:
            yield json.dumps({"answer": f'\n\n{output_codes}'}).encode('utf-8')

    # 返回 StreamingResponse，以流的形式发送数据
    return StreamingResponse(agent_chat_iterator(), media_type="application/json")
