import { StructuredOutputParser } from "@langchain/core/output_parsers";
import { PromptTemplate } from "@langchain/core/prompts";
import { ChatOllama } from "@langchain/ollama";

// 创建解析器
const parser = StructuredOutputParser.fromNamesAndDescriptions({
  answer: "用户问题的答案",
  evidence: "你回答用户问题所依据的答案",
  confidence: "问题答案的可信度评分，格式是百分数",
});

const pt = PromptTemplate.fromTemplate(
  "请回答问题：\n{instructions} \n{question}"
);

const model = new ChatOllama({
  model: "llama3",
  temperature: 0.7,
});

const chain = pt.pipe(model);

const res = await chain.stream({
  question: "蒙娜丽莎的作者是谁？是什么时候绘制的",
  instructions: parser.getFormatInstructions(),
});

console.log("原始输出：");
let output = ""; // 存储流式输出的值
for await (const chunk of res) {
  process.stdout.write(chunk.content);
  output += chunk.content;
}

const result = await parser.invoke(output);
console.log("\n结构化后的输出：", result);
