import { TaskNode } from "src/engine";
import { ActionResult } from "src/engine/typesd";
import { ChatOpenAI } from "@langchain/openai";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { ActionStatus } from "src/engine/constant/constant";

export default class OpenaiNode extends TaskNode {
  static nodeTypeName = 'llm.OpenaiNode';

  /**
   * 节点的执行逻辑
   * @overridable 可以自定义节点重写此方法。
   * @param params.executionId 流程执行记录ID
   * @param params.actionId 此节点执行记录ID
   * @param params.nodeId 节点ID
   * @returns 返回下一步的执行参数
   * 当不返回时，表示此节点执行成功，流程会继续执行下一步。
   * 当返回时，返回格式
   */
  public async action(params: {
    executionId: string;
    actionId: string;
    nodeId: string;
  }): Promise<ActionResult> {
    const { modelName = 'gpt-3.5-turbo-1106', temperature = 0.7, messages } = this.properties

    const chatModel = new ChatOpenAI({
      openAIApiKey: 'sk-bOfEm5HGdsmwFXtz051e65967c0741238dE04b47478749Bd',
      modelName,
      temperature,
      callbacks: [
        {
          handleLLMEnd(output) {
            console.log('handleLLMEnd', output.generations, output.llmOutput.tokenUsage);
          },
        },
      ],
    }, {
      baseURL: 'https://oneapi.starringshop.com/v1'
    })

    const chatPrompt = ChatPromptTemplate.fromMessages(messages);
    // Format the messages
    const formattedChatPrompt = await chatPrompt.formatMessages(this.globalData);

    // console.log('OpenaiNode', this.globalData, this.context, formattedChatPrompt)
    const ret = await chatModel.invoke(formattedChatPrompt);
    return { status: ActionStatus.SUCCESS, detail: { content: ret.content } };
  }
}
