const { Controller } = require('egg');
const { Readable } = require('stream');
const { Chroma } = require('@langchain/community/vectorstores/chroma');
const { ChatOllama } = require('@langchain/community/chat_models/ollama');
const { OllamaEmbeddings } = require('@langchain/community/embeddings/ollama');
const { HumanMessage } = require('@langchain/core/messages');
const {
  createStuffDocumentsChain,
} = require('langchain/chains/combine_documents');
const {
  ChatPromptTemplate,
  MessagesPlaceholder,
} = require('@langchain/core/prompts');
const OpenAI = require('openai');
const { OpenAIStream, StreamingTextResponse } = require('ai');
const {
  RunnablePassthrough,
  RunnableSequence,
} = require('@langchain/core/runnables');
const { PrismaClient } = require('@prisma/client');

const { HOST } = process.env;

const SYSTEM_TEMPLATE = `Answer the user's questions based on the below context.
Your answer should be in the format of Markdown.
If the context doesn't contain any relevant information to the question, don't make something up and just say "I don't know":
<context>
{context}
</context>
`;
function toInt(str) {
  if (typeof str === 'number') return str;
  if (!str) return str;
  return parseInt(str, 10) || 0;
}
class ChatController extends Controller {
  async index() {
    const { ctx } = this;
    let { knowledgebaseId, model, messages } = ctx.request.body;
    knowledgebaseId = toInt(knowledgebaseId);

    if (knowledgebaseId) {
      console.log('Chat with knowledge base with id: ', knowledgebaseId);
      const prisma = new PrismaClient();
      const knowledgebase = await prisma.knowledgeBase.findUnique({
        where: {
          id: knowledgebaseId,
        },
      });
      console.log(
        `Knowledge base ${knowledgebase?.name} with embedding "${knowledgebase?.embedding}"`
      );
      if (!knowledgebase) {
        ctx.status = 404;
        ctx.body = {
          code: -1,
          data: `Knowledge base with id ${knowledgebaseId} not found`,
        };
        return;
      }
      const embeddings = new OllamaEmbeddings({
        model: `${knowledgebase.embedding}`,
        baseUrl: HOST,
      });

      const retriever = new Chroma(embeddings, {
        collectionName: `collection_${knowledgebase.id}`,
        url: process.env.CHROMADB_URL,
      }).asRetriever(4);

      const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([
        [ 'system', SYSTEM_TEMPLATE ],
        new MessagesPlaceholder('messages'),
      ]);

      // 使用Ollama模型 配置openai key可以自定义openai模型
      const chat = new ChatOllama({
        baseUrl: HOST,
        model,
      });

      const query = messages[messages.length - 1].content;
      console.log('User query: ', query, {
        baseUrl: HOST,
        model,
      });

      const relevant_docs = await retriever.invoke(query);
      console.log('Relevant documents: ', relevant_docs);

      const documentChain = await createStuffDocumentsChain({
        llm: chat,
        prompt: questionAnsweringPrompt,
      });

      const parseRetrieverInput = params => {
        return params.messages[params.messages.length - 1].content;
      };

      const retrievalChain = RunnablePassthrough.assign({
        context: RunnableSequence.from([ parseRetrieverInput, retriever ]),
      }).assign({
        answer: documentChain,
      });

      const response = await retrievalChain.stream({
        messages: [ new HumanMessage(query) ],
      });

      console.log(response);
      const readableStream = Readable.from(
        (async function* () {
          for await (const chunk of response) {
            const message = {
              message: {
                role: 'assistant',
                content: chunk?.answer,
              },
            };
            yield `${JSON.stringify(message)}\n\n`;
          }
        })()
      );
      ctx.status = 200;
      ctx.body = readableStream;
    } else {
      ctx.status = 404;
      ctx.body = {
        data: '无数据库参数',
      };
    }
  }

  async gpt() {
    const { ctx } = this;
    const { messages } = ctx.request.body;
    const openai = new OpenAI({
      apiKey: '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',
      baseUrl: 'https://api.openai.com/v1/chat/completions',
    });

    const PickMessages = messages.map(message => {
      return {
        role: message.role,
        content: message.content,
      };
    });

    console.log('====================================');
    console.log(PickMessages);
    console.log('====================================');

    const response = await openai.chat.completions.create({
      model: 'gpt-3.5-turbo',
      messages: [ ...PickMessages ],
      stream: true,
    });

    const stream = OpenAIStream(response);
    const readableStream = new StreamingTextResponse(stream);
    ctx.status = 200;
    ctx.body = readableStream;
  }
}

module.exports = ChatController;
