import { ChatOllama } from "@langchain/ollama";
import { PromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { RunnableMap } from "@langchain/core/runnables";

// 创建模型
const model = new ChatOllama({
  model: "llama3",
  temperature: 0.7,
});

// 解析器
const parser = new StringOutputParser();

const chain1 = PromptTemplate.fromTemplate("用中文讲一个关于 {topic} 的笑话")
  .pipe(model)
  .pipe(parser);
const chain2 = PromptTemplate.fromTemplate("用中文写一首关于 {topic} 的两行诗")
  .pipe(model)
  .pipe(parser);

const chain = RunnableMap.from({
  joke: chain1,
  poem: chain2,
});

const res = await chain.invoke({
  topic: "小狗",
});
console.log(res);
