import { ChatPromptTemplate, MessagesPlaceholder } from "@langchain/core/prompts";
import { useModel } from "../model";
import { HumanMessage, AIMessage } from "@langchain/core/messages";
import { ChatMessageHistory } from "langchain/memory";
import { config } from "dotenv";


//  这里是一个手动维护history 的 demo
export const useChainDemo = async () => {
    // 任何一个需要独立run 的命令，不要忘了加载环境变量
    await config();
    const model = useModel()
    const prompt = ChatPromptTemplate.fromMessages([
        ["system", `You are a helpful assistant. Answer all questions to the best of your ability.
        You are talkative and provides lots of specific details from its context. 
        If the you does not know the answer to a question, it truthfully says you do not know.`],
        // 其中 MessagesPlaceholder 就是创建一个名为 history_message 的插槽，chain 中对应的参数将会替换这部分。
        new MessagesPlaceholder("history_message"),
    ]);
    const chain = prompt.pipe(model);

    const history = new ChatMessageHistory();
    await history.addMessage(new HumanMessage("hi, my name is Kai"));
    //  AI 的 第一条回复, 给他也加入 history
    const res1 = await chain.invoke({
        history_message: await history.getMessages()
    })
    await history.addMessage(res1);

    // 然后开始问下一个 human 的 msg

    await history.addMessage(new HumanMessage("what is my name?"));
    const res2 = await chain.invoke({
        history_message: await history.getMessages()
    })
    // 我们来看一下
    console.log(res2);
    /**
     * 
     * AIMessage {
        "id": "chatcmpl-05cf2428-445f-4181-8df9-dff960aa5bac",
        "content": "Your name is Kai! You just told me that a moment ago. Is there anything else you'd like to chat about or any other questions you have?",
        "additional_kwargs": {},
        "response_metadata": {
            "tokenUsage": {
            "promptTokens": 145,
            "completionTokens": 31,
            "totalTokens": 176
            },
            "finish_reason": "stop",
            "model_name": "qwen-max"
        },
        "tool_calls": [],
        "invalid_tool_calls": [],
        "usage_metadata": {
            "output_tokens": 31,
            "input_tokens": 145,
            "total_tokens": 176,
            "input_token_details": {
            "cache_read": 0
            },
            "output_token_details": {}
        }
       }
     */

    // 不过这种手动管理 history的方法只是为了我们去理解，实际上会有一个更聪明的方案：RunnableWithMessageHistory 去自动维护
}

useChainDemo();