import { ChatOpenAI } from "@langchain/openai";
import dotenv from "dotenv";
dotenv.config();
import { fileLoggingHandler } from "./fileLog.js";
import { consoleLoggingHandler } from "./consoleLog.js";

const llm = new ChatOpenAI({
  model: "gpt-4o-mini",
  temperature: 0,
  apiKey: process.env.API_KEY,
  streaming: true,
});

const stream = await llm.stream("你好", {
  callbacks: [
    fileLoggingHandler,
    consoleLoggingHandler,
    // {
    //   handleLLMStart(llm, prompts) {
    //     console.log("llm>>>", llm);
    //     console.log("prompts>>>", prompts);
    //   },
    //   //   handleLLMEnd(output) {
    //   //     console.log("\n模型执行完毕");
    //   //     console.log("output>>>", output.generations);
    //   //   },
    //   //   handleLLMNewToken() {
    //   //     console.log("\n生成了一个新token\n");
    //   //   },
    // },
  ],
});

for await (const chunk of stream) {
  process.stdout.write(chunk.content);
}
