/*
 * @Author: 布偶 941532058@qq.com
 * @Date: 2025-10-09 16:41:35
 * @LastEditors: 布偶 941532058@qq.com
 * @LastEditTime: 2025-10-09 16:41:40
 * @FilePath: /langchain-tutorial/src/streaming-response.ts
 * @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
 */
import { ChatOpenAI } from "@langchain/openai";
import { HumanMessage } from "@langchain/core/messages";
import * as dotenv from "dotenv";

dotenv.config();

const model = new ChatOpenAI({
  openAIApiKey: process.env.OPENAI_API_KEY,
  modelName: "gpt-3.5-turbo",
  streaming: true, // 启用流式响应
});

async function streamingExample() {
  try {
    console.log("AI 正在思考中...");
    console.log("回复：");

    const stream = await model.stream([
      new HumanMessage("请详细介绍 LangChain.js 的主要特性和使用场景。")
    ]);

    // 逐块处理流式响应
    for await (const chunk of stream) {
      process.stdout.write(chunk.content);
    }

    console.log("\n\n--- 回复完成 ---");

  } catch (error) {
    console.error("流式处理失败：", error);
  }
}

streamingExample();
