// 文件：src/ch09/nodes.ts
import { RunnableLambda, RunnableSequence } from "@langchain/core/runnables";
import { ChatOllama } from '@langchain/ollama';
import { JsonOutputParser } from "@langchain/core/output_parsers";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { RagState } from "./state";
import { hybridRetriever, timeAwareRetriever, topKRetriever, RetrievedDoc } from "./retrievers";

export const retrieveNode = async (s: RagState): Promise<Partial<RagState>> => {
  try {
    let retrievedDocs: RetrievedDoc[] = [];

    // 根据 s.retriever 选择对应的检索器
    switch (s.retriever) {
      case "hybrid":
        // 示例：混合检索时过滤来源为 "laozhou.txt" 的文档
        retrievedDocs = await hybridRetriever(s.question, { source: "laozhou.txt" });
        break;
      case "time":
        // 示例：时间感知检索取最近 30 天文档
        retrievedDocs = await timeAwareRetriever(s.question, 30);
        break;
      case "user":
        // 若有 userAwareRetriever，可在此添加逻辑（示例）
        // retrievedDocs = await userAwareRetriever(s.question, { id: s.userId, dept: s.dept });
        // 暂时使用 TopK 作为 fallback
        retrievedDocs = await topKRetriever(s.question, 3);
        break;
      default:
        // 默认使用 TopK 检索，取前 3 篇
        retrievedDocs = await topKRetriever(s.question, 3);
    }
    
    // 将 RetrievedDoc[] 转换为 hits 格式，以便后续节点使用
    const hits = retrievedDocs.map((doc, index) => ({
      id: `doc_${index}_${doc.source}`,
      text: doc.content,
      meta: { source: doc.source, chunkIndex: index },
      score: doc.score,
    }));

    return {
      retrievedDocs,
      hits,
      logs: [...s.logs, `检索完成，共获取 ${retrievedDocs.length} 篇文档`]
    };
  } catch (error: any) {
    console.error("❌ retrieveNode 执行失败:", error);
    return {
      error: `检索失败: ${error?.message || String(error)}`,
      hits: [],
      retrievedDocs: [],
      logs: [...s.logs, `检索失败: ${error?.message || String(error)}`]
    };
  }
};

export const fuseNode = async (s: RagState): Promise<Partial<RagState>> => {
  // 简化的去冗余合并
  const seen = new Set();
  const fused = [] as RagState["fused"];
  for (const h of s.hits || []) {
    const key = `${h.meta?.source}#${h.meta?.chunkIndex}`;
    if (seen.has(key)) continue; seen.add(key);
    fused.push({ id: key, text: h.text, meta: h.meta });
  }
  return { fused, logs: ["已融合去重"] };
};

const prompt = ChatPromptTemplate.fromMessages([
  ["system", `你是严谨的企业知识助手，仅依据候选片段回答，引用必须给出来源。输出 JSON：
{"answer": string, "citations": [{"source": string, "chunkId": string}], "confidence": number}`],
  ["human", `问题：{q}\n候选：\n{chunks}\n输出 JSON：`],
]);

export const answerNode = async (s: RagState): Promise<Partial<RagState>> => {
  const llm = new ChatOllama({
    baseUrl: 'http://192.168.31.159:11434',
    model: 'llama3.1:latest', // 使用您想要的模型
  });
  const seq = RunnableSequence.from([
    new RunnableLambda((i: any) => ({
      q: i.question,
      chunks: (i.fused || []).map((c: any, i: number) => `#${i} [${c.meta?.source}] ${String(c.text).slice(0, 300)}`).join("\n"),
    })),
    prompt,
    llm,
    new JsonOutputParser(),
  ]);
  const ans = await seq.invoke(s);
  return { answer: ans, logs: ["已生成回答"] };
};

export const guardNode = async (s: RagState): Promise<Partial<RagState>> => {
  if (!s.answer) return { error: "NO_ANSWER", logs: ["回答为空"] };
  const ok = Array.isArray(s.answer.citations) && s.answer.citations.length > 0;
  if (!ok) return { error: "INVALID_CITATIONS", logs: ["缺少引用"] };
  return { logs: ["引用校验通过"] };
};

export const feedbackNode = async (s: RagState): Promise<Partial<RagState>> => {
  // 这里可以把反馈写入数据库/日志系统
  return { logs: ["已记录反馈"] };
};