import chainlit as cl
import os
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, AIMessageChunk, ToolMessage, FunctionMessage
import time
import random
from langchain_core.messages import RemoveMessage
from prompt_list import prompt_list
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from tools import tools_list

GITEE_AI_API_KEY = os.environ.get(
    "GITEE_AI_API_KEY", "")

model_name = "Qwen2.5-72B-Instruct"
base_url = "https://ai.gitee.com/v1"

llm = ChatOpenAI(model=model_name, api_key=GITEE_AI_API_KEY, base_url=base_url, streaming=True, temperature=0.7, presence_penalty=1.05, top_p=0.8,
                 max_tokens=None, extra_body={


                 })


# @cl.set_chat_profiles
# async def chat_profile():
#     print([item['title'] for item in prompt_list])
#     return [cl.ChatProfile(
#         name=item['title'],
#         markdown_description=item['title'],
#     ) for item in prompt_list]

memory = MemorySaver()

system_message = prompt_list[0]['prompt']

fix_latex_message = """ 
- 若有明确数学公式，用 **LaTeX** 展示；  
必须使用双 '$' 符号将 LaTeX/KaTeX 公式封装在 KaTeX 语法中
错误语法 示例：
- [ y = ax^2 + bx + c ]
- （ y = ax^2 + bx + c ）
- \（ y = ax^2 + bx + c \）
正确语法示例：
- $$ y = ax^2 + bx + c $$。
"""


@cl.on_chat_start
async def main():
    roles = []
    for item in prompt_list:
        roles.append(cl.Action(
            name=item['title'], value=item['title'], label=item["title"], description=item['prompt'][:50]+'...'))
    res = await cl.AskActionMessage(
        content="请选择你想使用的工具：",
        actions=roles,
        timeout=180,
    ).send()

    if res and res.get("value"):
        role = res.get("value")
        cl.user_session.set("sys_role", role)
        await cl.Message(
            content=f"已选择：【{role}】，请开始聊天",
        ).send()


@cl.on_message
async def on_message(message: cl.Message):
    sys_role = cl.user_session.get("sys_role")
    if (len(message.content) > 6000):
        await cl.Message(content="输入长度不能超过 6000，请重新输入").send()
        return
    message_history = cl.user_session.get(
        "message_history") or []
    message_history.append({"role": "user", "content": message.content})
    # chat_profile = cl.user_session.get("chat_profile")
    # print('chat_profile')
    config = {"configurable": {
        "thread_id": cl.user_session.get("id"),  "recursion_limit": 14}}
    current_sys_prompt = [
        item for item in prompt_list if item["title"] == sys_role]
    if len(current_sys_prompt) > 0:
        system_message = current_sys_prompt[0]['prompt']
    else:
        system_message = prompt_list[0]['prompt']

    first = True
    ai_res_msg = ''
    tools_msg_ids = []
    print(sys_role)
    MAX_ROUNDS = 4
    if len(message_history) > MAX_ROUNDS:
        message_history = message_history[:MAX_ROUNDS //
                                          2] + message_history[-MAX_ROUNDS // 2:]
    agent_executor = create_react_agent(
        llm, checkpointer=memory, tools=tools_list, debug=False)  # checkpointer=memory,
    cl_msg = cl.Message(content="")
    time_start = time.time()
    print("input:", message.content, end='\n')
    async with cl.Step(name=sys_role) as step:
        async for ai_msg, metadata in agent_executor.astream(
            {"messages": [system_message+fix_latex_message, HumanMessage(content=message.content+"")]}, config, stream_mode="messages"
        ):
            if ai_msg.content and isinstance(ai_msg, AIMessage):
                await cl_msg.stream_token(ai_msg.content)
                if len(ai_res_msg) <= 1:
                    print("首字耗时:", time.time()-time_start)
                ai_res_msg += ai_msg.content

            if isinstance(ai_msg, AIMessageChunk):
                if first:
                    gathered = ai_msg
                    first = False
                else:
                    gathered = gathered + ai_msg
                if ai_msg.tool_call_chunks:
                    step.input = gathered.tool_calls
                    print("调用函数:", gathered.tool_calls)
            if isinstance(ai_msg, ToolMessage):
                if ai_msg.id:
                    tools_msg_ids.append(ai_msg.id)
                step.output = ai_msg.content
        print(ai_res_msg)
        for tool_msg_id in tools_msg_ids:
            if tool_msg_id:
                agent_executor.update_state(
                    config, {"messages": RemoveMessage(id=tool_msg_id)})
                memory_messages = agent_executor.get_state(
                    config).values["messages"]
                if len(memory_messages) > MAX_ROUNDS:
                    print(type(memory_messages))
                    discard_msg_list = memory_messages[2: -MAX_ROUNDS // 2]
                    for discard_msg in discard_msg_list:
                        print("删除历史消息")
                        agent_executor.update_state(
                            config, {"messages": RemoveMessage(id=discard_msg.id)})
        await cl_msg.send()
