import { ChatOpenAI } from "@langchain/openai";
import { ChatOllama } from "@langchain/ollama";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables";
import dotenv from "dotenv";
dotenv.config();

// 1. 创建 3 个链条

// 主模型
const primaryLLM = new ChatOpenAI({
  model: "gpt-4o-mini",
  temperature: 0,
  // apiKey: process.env.API_KEY,
  streaming: true,
});

// 备用模型
const secondaryLLM = new ChatOpenAI({
  model: "gpt-3.5-turbo",
  temperature: 0,
  // apiKey: process.env.API_KEY,
  streaming: true,
});

// 本地模型
const localLLM = new ChatOllama({
  model: "llama3",
  temperature: 0,
  streaming: true,
});

// 提示词
const pt = ChatPromptTemplate.fromTemplate(
  "请用中文、最多三句话回答：{question}"
);

// 解析器
const parser = new StringOutputParser();

// 一个中间件
// source 就是来源于哪个链：主链、备用链、本地链
function tagOutput(source) {
  return new RunnableLambda({
    func: async (text) => `[from=${source}] ${text}`,
  });
}

// 组装链条
const primaryChain = RunnableSequence.from([
  pt,
  primaryLLM,
  parser,
  // tagOutput("主链"),
]);
const secondaryChain = RunnableSequence.from([
  pt,
  secondaryLLM,
  parser,
  // tagOutput("备用链"),
]);
const localChain = RunnableSequence.from([
  pt,
  localLLM,
  parser,
  // tagOutput("本地链"),
]);

// 根据上面的三条链，组装成一条可回退的链条
const canFallbackChain = primaryChain.withFallbacks([
  secondaryChain,
  localChain,
]);

const stream = await canFallbackChain.stream({
  question: "RAG的核心是什么？",
});

for await (const chunk of stream) {
  process.stdout.write(chunk);
}
