from operator import itemgetter
import dotenv
from langchain.memory import ConversationBufferMemory  # 换用简单记忆组件
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_openai import ChatOpenAI

dotenv.load_dotenv()

# 1. 创建提示模板
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是聊天机器人，请根据上下文回复用户问题"),
    MessagesPlaceholder("history"),  # 关联记忆中的 history
    ("human", "{query}"),
])

# 2. 创建 Kimi 模型（必须配置 base_url）
llm = ChatOpenAI(
    model="kimi-k2-0711-preview",
    base_url="https://api.moonshot.cn/v1"  # 关键：Kimi API 端点
)

# 3. 初始化记忆组件（简单存储对话，不涉及 Token 计数）
memory = ConversationBufferMemory(
    memory_key="history",
    return_messages=True
)

# 4. 构建链应用
chain = RunnablePassthrough.assign(
    history=RunnableLambda(memory.load_memory_variables) | itemgetter("history")
) | prompt | llm | StrOutputParser()

# 5. 对话循环
while True:
    query = input("Human: ")
    if query == "q":
        exit(0)

    chain_input = {"query": query}
    response = chain.stream(chain_input)

    print("AI: ", flush=True, end="")
    output = ""
    for chunk in response:
        output += chunk
        print(chunk, flush=True, end="")

    # 保存对话到记忆（此时无需 Token 计数）
    memory.save_context(chain_input, {"output": output})
    print("\nhistory: ", memory.load_memory_variables({}), "\n")
