import init_env_impl

import asyncio

from langchain.chat_models import init_chat_model
from langchain_core.messages import HumanMessage, SystemMessage

model = init_chat_model("deepseek-chat")

def test_chat_model():
    messages = [
        SystemMessage("你是一个笑话大师"),
        HumanMessage("讲一个关于小明的笑话")
    ]
    res = model.invoke(messages).content
    print(res, "\n")

    res = model.invoke("hello").content
    print(res, "\n")

    res = model.invoke([{"role":"user", "content":"你好"}]).content
    print(res, "\n")

    res = model.invoke([HumanMessage("what is your name")]).content
    print(res, "\n")

    for token in model.stream(messages):
        # print(token.content, end="|")
        print(token.content, end="")


from langchain_core.prompts import MessagesPlaceholder,ChatPromptTemplate
def test_prompt_template():

    system_template = "translate then following from english into {language}"
    prompt_template = ChatPromptTemplate.from_messages(
        [("system",system_template), ("user", "{text}")]
    )

    prompt = prompt_template.invoke({"language":"chinese", "text":"what is your name?"})
    print(prompt)
    prompt.to_messages()
    print(prompt)
    res = model.invoke(prompt).content
    print(res)


# 模型如何利用对话上下文来回答问题（应该能正确回答"Bob"）
from langchain_core.messages import AIMessage
def test_ai_message():
    res = model.invoke(
        [
            HumanMessage(content="hi i'm bob"),
            # 第二条是AI的回复消息，回应"Hello Bob! How can I assist you today?"
            AIMessage(content="Hello Bob! How can I assist you today?"),
            HumanMessage(content="What is my name?")
        ]
    ).content
    print(res)


# 代码创建了一个简单的LangGraph工作流，用于处理对话消息
# 工作流只有一个节点("model")，从START节点直接连接到它
# 使用MemorySaver来保存对话状态，使得在多次调用中可以记住上下文
# 演示了两轮对话：第一轮用户自我介绍，第二轮用户询问自己的名字
# 通过thread_id来标识不同的对话线程
# pretty_print()用于格式化输出消息内容
from langgraph.checkpoint.memory import MemorySaver  # 内存检查点保存器，用于保存对话状态
from langgraph.graph import START, MessagesState, StateGraph  # 导入图相关的类和常量

# 创建一个状态图工作流，指定状态模式为MessagesState
workflow = StateGraph(state_schema=MessagesState)

# 定义一个测试函数，用于处理消息状态
def test_memory(state: MessagesState):
    # 调用模型处理当前消息状态中的消息
    response = model.invoke(state["messages"])
    # 返回处理后的消息
    return {"messages": response}

# 定义测试工作流的函数
def test_call_workflow():
    # 添加从开始节点到"model"节点的边
    workflow.add_edge(START, "model")
    # 添加"model"节点，指定处理函数为test_memory
    workflow.add_node("model", test_memory)

    # 添加内存检查点保存器
    memory = MemorySaver()
    # 编译工作流，并指定使用内存检查点
    app = workflow.compile(checkpointer=memory)
    
    # 配置参数，指定线程ID
    config = {"configurable": {"thread_id": "abc123"}}
    
    # 第一轮对话
    query = "Hi! I'm Bob."
    # 创建人类消息输入
    input_messages = [HumanMessage(query)]
    # 调用工作流处理输入消息
    output = app.invoke({"messages": input_messages}, config)
    # 打印输出中的最后一条消息（格式化输出）
    output["messages"][-1].pretty_print()  # 输出包含状态中的所有消息
    
    # 第二轮对话
    query = "What's my name?"
    # 创建新的人类消息输入
    input_messages = [HumanMessage(query)]
    # 再次调用工作流处理新消息
    output = app.invoke({"messages": input_messages}, config)
    # 打印输出中的最后一条消息
    output["messages"][-1].pretty_print()

    """
        测试修改线程id 是不是还能记住上线文，实时上 换了 线程id 就是不同的上下文了，
    """
    config = {"configurable": {"thread_id": "abc234"}}
    input_messages = [HumanMessage(query)]
    output = app.invoke({"messages": input_messages}, config)
    output["messages"][-1].pretty_print()

    """  切换回来还能记住  """
    config = {"configurable": {"thread_id": "abc123"}}
    input_messages = [HumanMessage(query)]
    output = app.invoke({"messages": input_messages}, config)
    output["messages"][-1].pretty_print()



"""----------------使用异步版本----------------"""
async def test_memory_async(state: MessagesState):
    response = model.invoke(state["messages"])
    return {"messages": response}

async def call_async_test():
    # Define graph as before:
    workflow = StateGraph(state_schema=MessagesState)
    workflow.add_edge(START, "model")
    workflow.add_node("model", test_memory_async)
    # workflow.set_entry_point(START)               #  <----------- 不能这样操作 它不能作为流程的结束节点 开始节点
    workflow.set_finish_point("model")              #  <----------- 结束节点 
    return workflow.compile(checkpointer=MemorySaver())

async def call_workflow_func():
    query = "Hi! I'm Bob."
    config = {"configurable": {"thread_id": "abc123"}}
    input_messages = [HumanMessage(query)]
    app = await call_async_test()
    # Async invocation:
    output = await app.ainvoke({"messages": input_messages}, config)
    output["messages"][-1].pretty_print()


# 定义一个测试函数，用于处理消息状态
def test_memory2(prompt):
    # 调用模型处理当前消息状态中的消息
    response = model.invoke(prompt["messages"])
    # 返回处理后的消息
    return {"messages": response}



def test_msg_place_holder():
    prompt_template = ChatPromptTemplate.from_messages(
        [
            (
                "system", "You are a helpful assistant. Answer all questions to the best of your ability in {language}."
            ),
            MessagesPlaceholder(variable_name="messages")
        ]
    )
    
    workflow.add_edge(START, "model")
    workflow.add_node("model", test_memory2)
    memory = MemorySaver()
    app = workflow.compile(checkpointer=memory)
    config = {"configurable": {"thread_id": "abc345"}}
    query = "Hi! I'm Jim."
    input_messages = [
        HumanMessage(query)
    ]
    prompt = prompt_template.invoke({
        "messages":input_messages,
        "language":"chinese"
    })
    print("---------> ", prompt)
    output = app.invoke(prompt, config)
    output["messages"][-1].pretty_print()

    query = "What is my name?"
    prompt = prompt_template.invoke({
        "messages":input_messages,
        "language":"chinese"
    })
    print("---------> ", prompt)
    output = app.invoke(prompt, config)
    output["messages"][-1].pretty_print()


# TODO:
# import tiktoken
# def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
#     """Return the number of tokens used by a list of messages."""
#     try:
#         encoding = tiktoken.encoding_for_model(model)
#     except KeyError:
#         encoding = tiktoken.get_encoding("cl100k_base")
#     num_tokens = 0
#     for message in messages:
#         num_tokens += 4  # every message follows <im_start>{role/name}\n{content}<im_end>\n
#         for key, value in message.items():
#             num_tokens += len(encoding.encode(value))
#             if key == "name":  # if there's a name, the role is omitted
#                 num_tokens += -1  # role is always required and always 1 token
#     num_tokens += 2  # every reply is primed with <im_start>assistant
#     return num_tokens


# # 自定义 token 计数函数
# def custom_token_counter(messages):
#     encoding = tiktoken.get_encoding("cl100k_base")
#     num_tokens = 0
#     for message in messages:
#         num_tokens += len(encoding.encode(message.content))
#     return num_tokens
    

# from langchain_core.messages import trim_messages
# def test_trim_messages():
#     trimmer = trim_messages(
#         max_tokens=65,
#         strategy="last",
#         token_counter=model,
#         include_system=True,
#         allow_partial=False,
#         start_on="human",
#     )

#     messages = [
#         SystemMessage(content="you're a good assistant"),
#         HumanMessage(content="hi! I'm bob"),
#         AIMessage(content="hi!"),
#         HumanMessage(content="I like vanilla ice cream"),
#         AIMessage(content="nice"),
#         HumanMessage(content="whats 2 + 2"),
#         AIMessage(content="4"),
#         HumanMessage(content="thanks"),
#         AIMessage(content="no problem!"),
#         HumanMessage(content="having fun?"),
#         AIMessage(content="yes!"),
#     ]
#     model.get_num_tokens_from_messages = custom_token_counter
#     trimmer.invoke(messages)




if __name__ == "__main__":
    # 创建一个简单的聊天
    # test_chat_model()
    # 使用prompt模板
    # test_prompt_template()
    # 加入ai对话的上下文
    # test_ai_message()
    # 使用工作流，同时使用messagememory记录上下文，不同线程id的上下文不同
    # test_call_workflow()
    # 使用异步调用测试工作流
    # asyncio.run(call_workflow_func())   # 异步调用方式
    # 更加灵活的prompt同时运用在工作流上
    # test_msg_place_holder()
    # test_trim_messages()   #TODO:
    pass

