/**
 * 构建prompt，提交模型，接收响应流，处理响应流。
 */
const { ChatZhipuAI } = require("@langchain/community/chat_models/zhipuai");
const { HumanMessage } = require("@langchain/core/messages");
const { GlmModelProvider } = require("./provider/model/glm");
const { RunnableConfig } = require("@langchain/core/runnables");
const { parseAST } = require("./provider/utils");

class ASTService {
  /**
   * 实现了一个类似RAG的方法，RAG是检索增强，会有一个知识库，但是目前我们还没有搭建起来这个知识库，
     一旦搭建好这个知识库，我们可以通过向量检索的方式实现模型的prompt，那么生成能力和准确能力将得到大大提升。
     保证模型的输出符合我们的要求
     暂时只是构建了一个用于低代码组件开发的提示（prompt），用于调整模型的角色以及提供一些必要的提示
   * @param {*} param
   * @returns
   */
  async buildGeneratePrompt({ message }) {
    const codePrompt = `
      You are an expert in generating abstract syntax trees (AST).
      Your task is to generate an AST based on the following description.

      Description of the component in Chinese is: #{message}

      Please ensure the AST is structured properly and represents the component accurately.
      The AST should include the following details:
      1. The component type.
      2. The properties it accepts.
      3. The events it can handle.
      4. Any child components it may include.

      Please return the AST in a JSON format, following this structure:

      {
        "type": "ComponentType",
        "props": {
          "propName1": "propValue1",
          "propName2": "propValue2"
        },
        "events": {
          "eventName": "eventHandler"
        },
        "children": [
          {
            "type": "ChildComponentType",
            "props": {
              "childPropName": "childPropValue"
            }
          }
        ]
      }

      Make sure to be as detailed as possible, and avoid any additional explanations or comments. Just return the AST structure as specified.
    `;

    const prompt = codePrompt.replace("#{message}", message);
    return prompt;
  }

  async astGenerate(message, response) {
    // 实例化一个模型的提供者
    const modelProvider = new GlmModelProvider();

    const aiRunnableAbortController = new AbortController();
    // 创建runnable
    const aiRunnable = await modelProvider.createRunnable({
      signal: aiRunnableAbortController.signal,
    });

    // 创建sessionId
    const sessionId = `code_session_${Date.now()}`;

    const aiRunnableConfig = {
      configurable: {
        sessionId,
      },
    };

    const sessionIdHistoriesMap = await GlmModelProvider.sessionIdHistoriesMap;

    const isSessionHistoryExists = !!sessionIdHistoriesMap[sessionId];

    // 创建prompt
    const prompt = await this.buildGeneratePrompt({ message });

    // 创建一个响应流
    const buildStream = async () => {
      let aiStream = null;
      // 检查会话历史记录是否存在，如果不存在
      if (!isSessionHistoryExists) {
        // 从 sessionIdHistoriesMap 中删除与当前 sessionId 对应的历史记录
        delete sessionIdHistoriesMap[sessionId];
        // 使用 aiRunnable.stream 方法创建 AI 流，并传递 prompt 作为输入参数和 aiRunnableConfig 作为配置参数
        aiStream = aiRunnable.stream(
          {
            input: prompt,
          },
          aiRunnableConfig
        );
      } else {
        // 如果会话历史记录存在，则使用 aiRunnable.stream 方法创建 AI 流，并传递一个包含继续提示的字符串作为输入参数，以及 aiRunnableConfig 作为配置参数
        aiStream = aiRunnable.stream(
          {
            input: `
                          continue, please do not reply with any text other than the code, and do not use markdown syntax.
                          go continue.
                      `,
          },
          aiRunnableConfig
        );
      }
      return aiStream;
    };

    // 流式返回
    // let result = [];
    const aiStream = await buildStream();
    if (aiStream) {
      for await (const chunk of aiStream) {
        const text = GlmModelProvider.answerContentToText(chunk.content);
        console.log(text);

        // result.push(text);
        response.write(`data: ${JSON.stringify(text)}\n\n`); // 使用 SSE 格式逐步返回
      }
    }

    // 结束响应
    response.end();
  }
}

module.exports = new ASTService();
