// 隔壁的 manul 展示在 chain中手动管理history的demo，这个demo展示在 chain中自动管理history的demo

import { config } from "dotenv";
import { useModel } from "../model";
import {
  ChatPromptTemplate,
  MessagesPlaceholder,
} from "@langchain/core/prompts";
import {
  RunnablePassthrough,
  RunnableSequence,
  RunnableWithMessageHistory,
} from "@langchain/core/runnables";
import { ChatMessageHistory, getBufferString } from "langchain/memory";
import { StringOutputParser } from "@langchain/core/output_parsers";

export const useChainDemo = async () => {
  await config();
  const model = useModel();
  const prompt = ChatPromptTemplate.fromMessages([
    [
      "system",
      `You are a helpful assistant. Answer all questions to the best of your ability.
        You are talkative and provides lots of specific details from its context. 
        If the you does not know the answer to a question, it truthfully says you do not know.`,
    ],
    // 其中 MessagesPlaceholder 就是创建一个名为 history_message 的插槽，chain 中对应的参数将会替换这部分。
    new MessagesPlaceholder("history_message"),
    ["human", "{input}"],
  ]);
  const history = new ChatMessageHistory();

  const chain = prompt.pipe(model);

  const chainWithHistory = new RunnableWithMessageHistory({
    // 被包裹的 chain，可以是任意的 chain
    runnable: chain,
    // getMessageHistory 接收一个函数，函数需要根据传入的 _sessionId，去获取对应的 ChatMessageHistory 对象，这里我们没有 session 管理，所以就返回默认的对象
    getMessageHistory: (_sessionId) => history,
    //inputMessagesKey 用户传入的信息 key 的名称，因为 RunnableWithMessageHistory 要自动记录用户和 llm 发送的信息，所以需要在这里声明用户以什么 key 传入信息
    inputMessagesKey: "input",
    // historyMessagesKey，聊天记录在 prompt 中的 key，因为要自动的把聊天记录注入到 prompt 中。
    historyMessagesKey: "history_message",
    // outputMessagesKey，因为我们的 chain 只有一个输出就省略了，如果有多个输出需要指定哪个是 llm 的回复，也就是需要存储的信息。
  });

  const res1 = await chainWithHistory?.invoke(
    { input: "hi, my name is Kai" },
    { configurable: { sessionId: "1111" } }
  );
  const res2 = await chainWithHistory.invoke(
    {
      input: "我的名字叫什么？",
    },
    {
      configurable: { sessionId: "1111" },
    }
  );
  //   console.log("res1: ", res1);
  //   console.log("res2: ", res2);
  console.log("history msgs: ", await history.getMessages());
};

// useChainDemo();
/**
 * history msgs:  [
  HumanMessage {
    "content": "hi, my name is Kai",
    "additional_kwargs": {},
    "response_metadata": {}
  },
  AIMessage {
    "id": "chatcmpl-2c450811-1305-4f77-83e1-3143e32ada4c",
    "content": "Hello Kai! It's great to meet you. I'm an AI assistant here to help with any questions or topics you'd like to discuss. Is there anything in particular on your mind today? Or would you like to just chat and get to know each other a bit? I'm happy to talk about pretty much any subject - from current events, to hobbies, to science and technology, to pop culture. The floor is yours!",
    "additional_kwargs": {},
    "response_metadata": {
      "tokenUsage": {
        "promptTokens": 73,
        "completionTokens": 87,
        "totalTokens": 160
      },
      "finish_reason": "stop",
      "model_name": "qwen-max"
    },
    "tool_calls": [],
    "invalid_tool_calls": [],
    "usage_metadata": {
      "output_tokens": 87,
      "input_tokens": 73,
      "total_tokens": 160,
      "input_token_details": {
        "cache_read": 0
      },
      "output_token_details": {}
    }
  },
  HumanMessage {
    "content": "我的名字叫什么？",
    "additional_kwargs": {},
    "response_metadata": {}
  },
  AIMessage {
    "id": "chatcmpl-6009a9ee-275b-4295-bdd4-26d3d189d4bf",
    "content": "你的名字是Kai。刚才你告诉我你的名字是Kai。有什么其他的事情你想聊一聊吗？或者你有其他问题需要帮助？",
    "additional_kwargs": {},
    "response_metadata": {
      "tokenUsage": {
        "promptTokens": 175,
        "completionTokens": 32,
        "totalTokens": 207
      },
      "finish_reason": "stop",
      "model_name": "qwen-max"
    },
    "tool_calls": [],
    "invalid_tool_calls": [],
    "usage_metadata": {
      "output_tokens": 32,
      "input_tokens": 175,
      "total_tokens": 207,
      "input_token_details": {
        "cache_read": 0
      },
      "output_token_details": {}
    }
  }
]
 */

// 可是就到此为止吗？ 实际场景中，我们不会每次都带上所有的 history，而是对 llm 的历史记录进行更多操作，例如只传递最近的 k 条历史记录等
// 实现一个自动对当前聊天历史记录进行总结，然后让 llm 根据总结的信息回复用户的 chain
// 入参：summary，new_lines
const useChainDemoWithSummary = async () => {
  await config();
  const model = useModel();
  const summaryPrompt = ChatPromptTemplate.fromTemplate(`
        Progressively summarize the lines of conversation provided, adding onto the previous summary returning a new summary
        
        Current summary:
        {summary}
        
        New lines of conversation:
        {new_lines}
        
        New summary:
        `);
  const summaryChain = RunnableSequence.from([
    summaryPrompt,
    model,
    new StringOutputParser(),
  ]);
  const history = new ChatMessageHistory();
  const chatPrompt = ChatPromptTemplate.fromMessages([
    [
      "system",
      `You are a helpful assistant. Answer all questions to the best of your ability.

    Here is the chat history summary:
    {history_summary}
    `,
    ],
    ["human", "{input}"],
  ]);
  let summary = "";

  const chatChain = RunnableSequence.from([
    // 第一步传入一个对象，那么他的输出就是一个 { input: ...} 的对象，第二步的assign 会把 history_summary 塞进这个对象中
    // 不然的话，assign会报错：❌ TypeError: Cannot assign to string
    {
      input: new RunnablePassthrough({
        func: (input: any) => history.addUserMessage(input),
      }),
    },
    RunnablePassthrough.assign({
      history_summary: () => summary,
    }),
    chatPrompt,
    model,
    new StringOutputParser(),
    new RunnablePassthrough({
      func: async (input: any) => {
        history.addAIMessage(input);
        const messages = await history.getMessages();
        const new_lines = getBufferString(messages);
        const newSummary = await summaryChain.invoke({
          summary,
          new_lines,
        });
        // 这里把 history清理了，也就是其实 history 承担的只是当前这条对话的记录，真正的 history 已经被转成 string 然后放到 summary里面了
        history.clear();
        summary = newSummary;
      },
    }),
  ]);
};

// 其实还可以看看ConversationSummaryMemory和ConversationSummaryBufferMemory的方案，都是比较新的 api