import os
from dotenv import load_dotenv
from langchain.chat_models import init_chat_model
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, \
    HumanMessagePromptTemplate
from langchain_core.chat_history import InMemoryChatMessageHistory
from langchain.memory.chat_message_histories import RedisChatMessageHistory,SQLChatMessageHistory
from langchain.memory import ConversationBufferMemory

# 通过手动定义列表来记录对话历史，实现连续对话
def continuous_chat_with_manually_memory():
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    prompt = ChatPromptTemplate.from_messages([
        SystemMessage(content="你叫永雏塔菲，是一个猫娘女仆，你的作用是和我闲聊，且你的每次回复最后都要加上喵~"),
        MessagesPlaceholder(variable_name="messages"),
    ])
    chain = prompt | model | StrOutputParser()
    history_msg_list = []

    while True:
        user_query = input("你：\n")
        if user_query.lower() in {'exit','quit'}:
            print("退出对话")
            break

        # 将本次对话加入历史
        history_msg_list.append(HumanMessage(content=user_query))

        # 调用模型
        ai_response = chain.invoke({"messages": history_msg_list})
        print(f"永雏塔菲：\n {ai_response}")

        # 将AI回复加入历史
        history_msg_list.append(AIMessage(content=ai_response))

        # 可选，仅保留最近50条
        if len(history_msg_list) > 50:
            history_msg_list = history_msg_list[-50:]


# 通过手动定义列表来记录对话历史，实现连续对话
def continuous_chat_with_manually_memory_streaming_output():
    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    prompt = ChatPromptTemplate.from_messages([
        SystemMessage(content="你叫永雏塔菲，是一个猫娘女仆，你的作用是和我闲聊，且你的每次回复最后都要加上喵~"),
        MessagesPlaceholder(variable_name="messages"),
    ])
    chain = prompt | model | StrOutputParser()
    history_msg_list = []

    while True:
        user_query = input("\n你：")
        if user_query.lower() in {'exit','quit'}:
            print("退出对话")
            break

        # 将本次对话加入历史
        history_msg_list.append(HumanMessage(content=user_query))

        # 调用模型
        answer_chunks = []
        print("永雏塔菲：")
        for chunk in  chain.stream({"messages": history_msg_list}):
            answer_chunks.append(chunk)
            print(f"{chunk}",end="",flush=True)

        full_answer = "".join(answer_chunks)
        # 将AI回复加入历史
        history_msg_list.append(AIMessage(content=full_answer))

        # 可选，仅保留最近50条
        if len(history_msg_list) > 50:
            history_msg_list = history_msg_list[-50:]

# 使用Langchain的记忆模块来记录对话历史，该记忆可以存在内存/redis/mysql当中，实现连续对话【注：LangChain推荐若使用记忆模块需升级到LangGraph，因此这个API】
def continuous_chat_with_langchain_memory_streaming_output():
    # 存内存中
    local_memory = ConversationBufferMemory(chat_memory=InMemoryChatMessageHistory(), return_messages=True)
    # 存redis中
    redis_memory = RedisChatMessageHistory(session_id="langchain-learning-123", url="redis://127.0.0.1:6379/1")
    # 存mysql中
    mysql_memory = SQLChatMessageHistory(
        session_id="user_1",
        connection_string="mysql+pymysql://root:fsc970628@localhost:3306/rag_flow?charset=utf8mb4",
    )

    # 选择使用哪种记忆
    # 1. ConversationBufferMemory 基础对话内存，保存完整的对话内容，适用于简单直接的小会话，但当会话历史无限增长时，可能会爆token
    # 2. ConversationSummaryMemory 摘要对话内存，保存对话的摘要信息，而非原始信息，可以有效控制token数量，但可能会丢失部分细节
    # 3. ConversationBufferWindowMemory 窗口对话内存，只保存最近N轮的对话内容，适用于需要保留近期上下文的场景，但会丢失较早的对话信息
    memory = ConversationBufferMemory(chat_memory=redis_memory, return_messages=True)

    model = init_chat_model(model="deepseek-chat", model_provider="deepseek")
    prompt = ChatPromptTemplate.from_messages([
        SystemMessagePromptTemplate.from_template("你叫永雏塔菲，是一个猫娘女仆，你的作用是和我闲聊，且你的每次回复最后都要加上喵~"),
        HumanMessagePromptTemplate.from_template("{input}"),
        MessagesPlaceholder(variable_name="chat_history"),
    ])
    chain = prompt | model | StrOutputParser()

    while True:
        # 每次对话先加载历史
        chat_history = memory.load_memory_variables({}).get('history')

        print(f"\n历史对话内容:\n{chat_history}")

        user_query = input("\n你：")
        if user_query.lower() in {'exit','quit'}:
            print("退出对话")
            break

        # 调用模型
        answer_chunks = []
        print("永雏塔菲：")
        for chunk in  chain.stream({"input": user_query,"chat_history":chat_history}):
            answer_chunks.append(chunk)
            print(f"{chunk}",end="",flush=True)

        full_answer = "".join(answer_chunks)
        # 将本次对话用户和AI的回复存储到记忆中
        memory.save_context({"input": user_query}, {"output": full_answer})


if __name__ == '__main__':
    load_dotenv(override=True)
    DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
    print(f"DEEPSEEK_API_KEY: {DEEPSEEK_API_KEY}")

    # continuous_chat_with_manually_memory()
    # continuous_chat_with_manually_memory_streaming_output()
    continuous_chat_with_langchain_memory_streaming_output()







