from langchain.memory import ConversationBufferWindowMemory

from server.agent.custom_agent.ChatGLM3Agent import initialize_glm3_agent
from server.agent.tools_select import tools, tool_names
from server.agent.callbacks import CustomAsyncIteratorCallbackHandler, Status
from langchain.agents import LLMSingleActionAgent, AgentExecutor
from server.agent.custom_template import CustomOutputParser, CustomPromptTemplate
from fastapi import Body
from fastapi.responses import StreamingResponse
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE, HISTORY_LEN, Agent_MODEL, FAQ_MODEL, MODEL_PATH, QA_FILE_PATH, KV_FILE_PATH
from server.utils import wrap_done, get_ChatOpenAI, get_prompt_template
from langchain.chains import LLMChain
from typing import AsyncIterable, Optional
import asyncio
from typing import List
from server.chat.utils import History
import json
import uuid
import redis
import time
import datetime
import numpy
import copy
from server.chat.config import Config
from server.chat.cs_utils_2 import intent_recognition, load_json_file
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from server.agent import model_container
from server.knowledge_base.kb_service.base import get_kb_details

args = Config()

# FAQ模型
FAQ = pipeline(Tasks.faq_question_answering, MODEL_PATH["faq_model"][FAQ_MODEL])

# 创建一个Redis连接池
pool = redis.ConnectionPool(host=args.redis_host, port=args.redis_port, db=args.redis_db,
                            max_connections=args.redis_max_connections)

# 创建一个Redis连接对象，使用连接池
redis_client = redis.Redis(connection_pool=pool)

chinese_weekday = {
    0: "星期一",
    1: "星期二",
    2: "星期三",
    3: "星期四",
    4: "星期五",
    5: "星期六",
    6: "星期日"
}
QA_file_path = args.QA_file_path
KV_file_path = args.KV_file_path

# 初始数据
try:
    QA_datas = load_json_file(QA_FILE_PATH)
    KV_datas = load_json_file(KV_FILE_PATH)

except:
    QA_datas = None
    KV_datas = None
    print("The FAQ json file is faulty. The FAQ will be skipped.")


def generate_unique_id():
    return str(uuid.uuid4())


class ChatHistory:
    def __init__(self, uid):
        self.uid = uid
        self.history_json = None

    def get_history(self):
        # 用户本轮的输入所对应的时间戳
        user_input_timestamp = time.time()
        try:
            # 从Redis中获取历史对话记录
            self.history_json = redis_client.get(self.uid)
        except:
            return []

        if self.history_json:
            history = json.loads(self.history_json)

            try:
                self.old_history = copy.deepcopy(history)
                history = numpy.concatenate(
                    [i[:2] for i in self.old_history[-HISTORY_LEN:] if i[1].get("content") != '']).tolist()
            except:
                self.clear_history()
                history = []
                # 对话历史是倒序的，需要反转列表使其按照时间顺序排列
            return history
        else:
            history = []
            return history

    def add_to_history(self, message):
        # 添加对话消息到历史记录
        if self.history_json:
            history = self.old_history
        else:
            history = []
        history.append(message)
        redis_client.set(self.uid, json.dumps(history))

    def clear_history(self):
        # 清空当前uid的Redis历史对话记录
        matching_keys = redis_client.keys(f"{self.uid}-*")

        if matching_keys:
            for key in matching_keys:
                redis_client.delete(key)


async def agent_chat(uid: str = Body(..., description="用户id"),
                     messages: List[dict] = Body(..., description="请求参数，必填，prompt为用户输入内容",
                                                 examples=[[{"role": "user", "prompt": "你好"}]]),
                     stream: bool = Body(False, description="流式输出"),
                     model_name: str = Body(LLM_MODELS[0], description="LLM 模型名称。"),
                     temperature: float = Body(TEMPERATURE, description="LLM 采样温度", ge=0.0, le=1.0),
                     max_tokens: Optional[int] = Body(None, description="限制LLM生成Token数量，默认None代表模型最大值"),
                     prompt_name: str = Body("default",
                                             description="使用的prompt模板名称(在configs/prompt_config.py中配置)"),
                     # top_p: float = Body(TOP_P, description="LLM 核采样。勿与temperature同时设置", gt=0.0, lt=1.0),
                     ):
    async def return_content(answer, instruction, unique_id, room_id):
        yield json.dumps({"choices": [
            {
                "message": {
                    "role": "assistant",
                    "response": answer,
                    "instruction": instruction,
                    "unique_id": unique_id,
                    "room_id": room_id
                }
            }
        ]}, ensure_ascii=False)

    query = messages[0]["prompt"]
    unique_id = generate_unique_id()
    current_timestamp = time.time()
    instruction = None
    answer = None
    room_id = None
    now = datetime.datetime.now()
    now_time = now.strftime("%Y-%m-%d %H:%M:%S")
    if args.go_FAQ:
        try:
            FAQ_start_time = time.time()
            # FAQ模型计算
            output = FAQ({"query_set": [query], "support_set": QA_datas})
            # FAQ的概率
            FAQ_probability = output['output'][0][0]['score']
            # FAQ的标签
            FAQ_label = output['output'][0][0]['label']
            FAQ_end_time = time.time()
            FAQ_spend_time = FAQ_end_time - FAQ_start_time
            print(f'FAQ耗时:{FAQ_spend_time}, FAQ概率:{FAQ_probability}, FAQ标签:{FAQ_label}')
            # 判断FAQ的概率是否达到阈值
            if FAQ_label == "error_correction":
                original_threshold = 0.9
            else:
                original_threshold = args.FAQ_threshold
            if FAQ_probability >= original_threshold:
                answer = KV_datas[FAQ_label]
            else:
                instruction_start_time = time.time()
                # 意图识别
                results = await intent_recognition(query)
                instruction_end_time = time.time()
                instruction_spend_time = instruction_end_time - instruction_start_time
                print(f"指令耗时：{instruction_spend_time}")
                # 判断results是不是元组是因为意图里可能返回两个参数，如果是元组的话说明返回参数量为2，否则的话results不是元组
                if isinstance(results, tuple):
                    instruction, room_id = results[0], results[1]
                else:
                    instruction = results
                    if instruction == '附近':
                        answer = '小卡现在还没办法了解您身边的环境和您的位置，后续我会努力变得更好的！'
                        instruction = None
        except:
            instruction_start_time = time.time()
            results = await intent_recognition(query)
            instruction_end_time = time.time()
            instruction_spend_time = instruction_end_time - instruction_start_time
            print(f"指令耗时：{instruction_spend_time}")
            if isinstance(results, tuple):
                instruction, room_id = results[0], results[1]
            else:
                instruction = results
                if instruction == '附近':
                    answer = '小卡现在还没办法了解您身边的环境和您的位置，后续我会努力变得更好的！'
                    instruction = None
    else:
        instruction_start_time = time.time()
        results = await intent_recognition(query)
        instruction_end_time = time.time()
        instruction_spend_time = instruction_end_time - instruction_start_time
        print(f"指令耗时：{instruction_spend_time}")
        if isinstance(results, tuple):
            instruction, room_id = results[0], results[1]
        else:
            instruction = results
            if instruction == '附近':
                answer = '小卡现在还没办法了解您身边的环境和您的位置，后续我会努力变得更好的！'
                instruction = None
    # 检查是否识别到了意图
    if instruction is not None and answer is None:
        if instruction == "date":
            # 日期
            date = datetime.datetime.now()
            # 年 月 日
            year, month, day = date.year, date.month, date.day
            # 星期几
            week_what = chinese_weekday[date.weekday()]
            answer = f"今天是: {year}年 {month}月{day}日 {week_what}"
            instruction = '-1'
        else:
            answer = None  # 没有需要返回给用户的回复
        unique_id = 'I_' + unique_id
        log = "[" + now_time + '], uid:"' + f"{uid}" + '", prompt:"' + query + '", answer:"' + repr(
            answer) + '", instruction:"' + instruction + '"'
        print(log)
        if stream:
            return EventSourceResponse(return_content(answer=answer,
                                                      instruction=instruction,
                                                      unique_id=unique_id,
                                                      room_id=room_id),
                                       media_type="text/event-stream")
        else:
            return {"choices": [
                {
                    "message": {
                        "role": "assistant",
                        "response": answer,
                        "instruction": instruction,
                        "unique_id": unique_id,
                        "room_id": room_id
                    }
                }
            ]}
    elif instruction is None and answer:
        instruction = "-1"
        unique_id = 'D_' + unique_id
        chat_history = ChatHistory(f"{uid}")  # 创建ChatHistory对象
        chat_history.add_to_history(
            [{"role": "user", "content": query}, {"role": "assistant", "content": answer}, current_timestamp])
        log = "[" + now_time + '], uid:"' + f"{uid}" + '", prompt:"' + query + '", response:"' + repr(
            answer) + '", instruction:"' + instruction + '"'
        print(log)

        if stream:
            return EventSourceResponse(return_content(answer=answer,
                                                      instruction=instruction,
                                                      unique_id=unique_id,
                                                      room_id=room_id),
                                       media_type="text/event-stream")
        else:
            return {"choices": [
                {
                    "message": {
                        "role": "assistant",
                        "response": answer,
                        "instruction": instruction,
                        "unique_id": unique_id,
                        "room_id": room_id
                    }
                }
            ]}
    else:
        instruction = "-1"
        chat_history = ChatHistory(f"{uid}")  # 创建ChatHistory对象
        history = chat_history.get_history()  # 获取历史对话记录
        try:
            history = [History.from_data(h) for h in history]
        except:
            chat_history.clear_history()
            history = [History.from_data(h) for h in history]

        async def agent_chat_iterator(
                query: str,
                history: Optional[List[History]],
                model_name: str = LLM_MODELS[0],
                prompt_name: str = prompt_name,
        ) -> AsyncIterable[str]:
            nonlocal max_tokens
            callback = CustomAsyncIteratorCallbackHandler()
            if isinstance(max_tokens, int) and max_tokens <= 0:
                max_tokens = None

            model = get_ChatOpenAI(
                model_name=model_name,
                temperature=temperature,
                max_tokens=max_tokens,
                callbacks=[callback],
            )

            ## 传入全局变量来实现agent调用
            kb_list = {x["kb_name"]: x for x in get_kb_details()}
            model_container.DATABASE = {name: details['kb_info'] for name, details in kb_list.items()}

            if Agent_MODEL:
                ## 如果有指定使用Agent模型来完成任务
                model_agent = get_ChatOpenAI(
                    model_name=Agent_MODEL,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    callbacks=[callback],
                )
                model_container.MODEL = model_agent
            else:
                model_container.MODEL = model

            prompt_template = get_prompt_template("agent_chat", prompt_name)
            prompt_template_agent = CustomPromptTemplate(
                template=prompt_template,
                tools=tools,
                input_variables=["input", "intermediate_steps", "history"]
            )
            output_parser = CustomOutputParser()
            llm_chain = LLMChain(llm=model, prompt=prompt_template_agent)
            # 把history转成agent的memory
            memory = ConversationBufferWindowMemory(k=HISTORY_LEN * 2)
            for message in history:
                # 检查消息的角色
                if message.role == 'user':
                    # 添加用户消息
                    memory.chat_memory.add_user_message(message.content)
                else:
                    # 添加AI消息
                    memory.chat_memory.add_ai_message(message.content)

            if "chatglm3" in model_container.MODEL.model_name:
                agent_executor = initialize_glm3_agent(
                    llm=model,
                    tools=tools,
                    callback_manager=None,
                    # Langchain Prompt is not constructed directly here, it is constructed inside the GLM3 agent.
                    prompt=prompt_template,
                    input_variables=["input", "intermediate_steps", "history"],
                    memory=memory,
                    verbose=True,
                )
            else:
                agent = LLMSingleActionAgent(
                    llm_chain=llm_chain,
                    output_parser=output_parser,
                    stop=["\nObservation:", "Observation"],
                    allowed_tools=tool_names,
                )
                agent_executor = AgentExecutor.from_agent_and_tools(agent=agent,
                                                                    tools=tools,
                                                                    verbose=True,
                                                                    memory=memory,
                                                                    )
            while True:
                try:
                    task = asyncio.create_task(wrap_done(
                        agent_executor.acall(query, callbacks=[callback], include_run_info=True),
                        callback.done))
                    break
                except:
                    pass

            if stream:
                async for chunk in callback.aiter():
                    tools_use = []
                    # Use server-sent-events to stream the response
                    data = json.loads(chunk)
                    if data["status"] == Status.start or data["status"] == Status.complete:
                        continue
                    elif data["status"] == Status.error:
                        tools_use.append("\n```\n")
                        tools_use.append("工具名称: " + data["tool_name"])
                        tools_use.append("工具状态: " + "调用失败")
                        tools_use.append("错误信息: " + data["error"])
                        tools_use.append("重新开始尝试")
                        tools_use.append("\n```\n")
                        yield json.dumps({"tools": tools_use}, ensure_ascii=False)
                    elif data["status"] == Status.tool_finish:
                        tools_use.append("\n```\n")
                        tools_use.append("工具名称: " + data["tool_name"])
                        tools_use.append("工具状态: " + "调用成功")
                        tools_use.append("工具输入: " + data["input_str"])
                        tools_use.append("工具输出: " + data["output_str"])
                        tools_use.append("\n```\n")
                        yield json.dumps({"tools": tools_use}, ensure_ascii=False)
                    elif data["status"] == Status.agent_finish:
                        yield json.dumps({"final_answer": data["final_answer"]}, ensure_ascii=False)
                    else:
                        yield json.dumps({"answer": data["llm_token"]}, ensure_ascii=False)


            else:
                answer = ""
                final_answer = ""
                async for chunk in callback.aiter():
                    # Use server-sent-events to stream the response
                    data = json.loads(chunk)
                    if data["status"] == Status.start or data["status"] == Status.complete:
                        continue
                    if data["status"] == Status.error:
                        answer += "\n```\n"
                        answer += "工具名称: " + data["tool_name"] + "\n"
                        answer += "工具状态: " + "调用失败" + "\n"
                        answer += "错误信息: " + data["error"] + "\n"
                        answer += "\n```\n"
                    if data["status"] == Status.tool_finish:
                        answer += "\n```\n"
                        answer += "工具名称: " + data["tool_name"] + "\n"
                        answer += "工具状态: " + "调用成功" + "\n"
                        answer += "工具输入: " + data["input_str"] + "\n"
                        answer += "工具输出: " + data["output_str"] + "\n"
                        answer += "\n```\n"
                    if data["status"] == Status.agent_finish:
                        final_answer = data["final_answer"]
                    else:
                        answer += data["llm_token"]
                new_content = [{"role": "user", "content": query}, {"role": "assistant", "content": final_answer},
                               current_timestamp]
                chat_history.add_to_history(new_content)
                log = "[" + now_time + '], uid:"' + f"{uid}" + '", prompt:"' + query + '", response:"' + repr(
                    final_answer) + '", instruction:"' + instruction + '"'
                print(log)
                unique_id_ = 'D_' + unique_id
                yield json.dumps({"choices": [
                    {
                        "message": {
                            "role": "assistant",
                            "response": final_answer,
                            "instruction": instruction,
                            "unique_id": unique_id_,
                            "room_id": room_id
                        }
                    }
                ]},
                    ensure_ascii=False)

            await task

    # return EventSourceResponse(agent_chat_iterator(query=query,
    #                                                history=history,
    #                                                model_name=model_name,
    #                                                prompt_name=prompt_name),
    #                            )
    if stream:
        return EventSourceResponse(agent_chat_iterator(query=query,
                                                       history=history,
                                                       model_name=model_name,
                                                       prompt_name=prompt_name),
                                   media_type="text/event-stream")
    else:
        return StreamingResponse(agent_chat_iterator(query=query,
                                                     history=history,
                                                     model_name=model_name,
                                                     prompt_name=prompt_name),
                                 media_type="text/event-stream")
