const { Ollama } = require('@langchain/ollama');
const { StringOutputParser } = require('@langchain/core/output_parsers');
const { PromptTemplate } = require('@langchain/core/prompts');
const { RunnableSequence } = require('@langchain/core/runnables');
const { SystemMessage, HumanMessage } =  require('@langchain/core/messages');
const VectorStoreService = require('../core/vectorStore');
const ResultWrapper = require('../utils/resultWrapper');
const { FileRagService, WebRagService, GithubRagService} = require('../core/rag');

// 初始化 Ollama 模型
const ollama = new Ollama({
  baseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434',
  model: process.env.OLLAMA_MAIN_MODEL || "deepseek-r1:8b",
  temperature: 0.7, // 控制输出的随机性
});

// 初始化 Ollama 视觉模型
const ollamaVision = new Ollama({
  baseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434',
  model: 'llama3.2-vision:latest',
  temperature: 0.7,
});

// 创建输出解析器
const outputParser = new StringOutputParser();

class ChatService {
  static COLLECTION_NAME = 'documents';

  static async visionChat(question, imageBase64, mimeType) {
    try {
      // 构建多模态输入
      const multiModalMessage = [
        new SystemMessage({
          content: "你是一个能够理解图像的AI助手，请根据用户的问题和图片内容提供准确的回答。"
        }),
        new HumanMessage({
          content: [
            {
              type: "image",
              data: `data:${mimeType};base64,${imageBase64}`
            },
            {
              type: "text",
              data: question
            }
          ]
        })
      ];

      // 调用模型进行推理
      const response = await ollamaVision
        .pipe(outputParser)
        .invoke(multiModalMessage);

      return ResultWrapper.success(response);
    } catch (error) {
      console.error('视觉聊天服务错误:', error);
      return ResultWrapper.error(error.message);
    }
  }
  static async chat(humanMessage, systemMessage) {
    try {
      let invokeParams = null;
      // 创建用户消息
      const userMessage = new HumanMessage(humanMessage);
      if(systemMessage){
        // 创建系统消息
        const newSystemMessage = new SystemMessage(systemMessage);
        invokeParams = [newSystemMessage, userMessage];
      }else{
        invokeParams = userMessage;
      }
      const response = await ollama
        .pipe(outputParser)
        .invoke(invokeParams);
      return ResultWrapper.success(response);
    } catch (error) {
      return ResultWrapper.error(error.message);
    }
  }
  static async ragChat(question) {
    try {
      // 从向量存储中检索相关文档
      const fileRelevantDocs = await VectorStoreService.similaritySearch(
        FileRagService.COLLECTION_NAME,
        question,
        1
      );
      const webRelevantDocs = await VectorStoreService.similaritySearch(
        WebRagService.COLLECTION_NAME,
        question,
        1
      )
      const githubRelevantDocs = await VectorStoreService.similaritySearch(
        GithubRagService.COLLECTION_NAME,
        question,
        1
      )
      const relevantDocs = fileRelevantDocs.concat(webRelevantDocs);
      if (!relevantDocs || relevantDocs.length === 0) {
        return ResultWrapper.success('抱歉，我没有找到相关的信息来回答这个问题。');
      }

      // 将检索到的文档内容合并为上下文
      const context = relevantDocs
        .map(doc => doc.content)
        .join('\n\n');

        // 创建提示模板
const qaPrompt = PromptTemplate.fromTemplate(`使用以下上下文来回答问题。如果你不知道答案，就说你不知道，不要试图编造答案。

  上下文：{context}
  
  问题：{question}
  
  答案：`);
      // 创建QA链
      const qaChain = RunnableSequence.from([
        {
          context: (input) => input.context,
          question: (input) => input.question,
        },
        qaPrompt,
        ollama,
        outputParser,
      ]);

      // 执行QA链
      const response = await qaChain.invoke({
        context,
        question,
      });

      return ResultWrapper.success(response);
    } catch (error) {
      console.error('QA处理失败:', error);
      return ResultWrapper.error(error.message);
    }
  }
}

module.exports = ChatService;
