import { NextRequest, NextResponse } from "next/server";

import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
import { PromptTemplate } from "@langchain/core/prompts";
import { Document } from "@langchain/core/documents";
import {
    BytesOutputParser,
    StringOutputParser,
} from "@langchain/core/output_parsers";
import {
    RunnableSequence,
    RunnablePassthrough,
} from "@langchain/core/runnables";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter';
import { createStuffDocumentsChain } from "langchain/chains/combine_documents";
import { createRetrievalChain } from "langchain/chains/retrieval";
import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio";
import "cheerio";
import { Message as VercelChatMessage, StreamingTextResponse } from "ai";
import { pinecone } from "@/lib/utils/pinecone-client";
import { PINECONE_INDEX_NAME, PINECONE_NAME_SPACE } from "@/config/pinecone";
import { PineconeStore } from "@langchain/pinecone";
import { MultiQueryRetriever } from "langchain/retrievers/multi_query";

export const runtime = "edge";

// 独立会话提示词
const CONDENSE_QUESTION_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.

Follow Up Input: {question}
Standalone question:`;
const condenseQuestionPrompt = PromptTemplate.fromTemplate(CONDENSE_QUESTION_TEMPLATE);

// 文档检索提示词
const ANSWER_TEMPLATE = `Answer the question in Chinese based on only the following context:

<context>
    {context}
</context>

User: {question}`;
const answerPrompt = PromptTemplate.fromTemplate(ANSWER_TEMPLATE);

export function formatDocumentsAsString(documents: Document[]) {
    return documents.map(doc => doc.pageContent).join("\n")
}

// 历史对话
const formatVercelMessages = (chatHistory: VercelChatMessage[]) => {
    const formattedDialogueTurns = chatHistory.map((message) => {
        if (message.role === "user") {
            return `Human: ${message.content}`;
        } else if (message.role === "assistant") {
            return `Assistant: ${message.content}`;
        } else {
            return `${message.role}: ${message.content}`;
        }
    });
    return formattedDialogueTurns.join("\n");
};

export async function POST(req: NextRequest) {
    try {
        const body = await req.json();
        const messages = body.messages ?? [];
        const previousMessages = messages.slice(0, -1);
        const currentMessageContent = messages[messages.length - 1].content;

        // 读取网址
        // const loader = new CheerioWebBaseLoader(
        //     "https://js.langchain.com/v0.1/docs/get_started/introduction/"
        // );
        // const rawDocs = await loader.load();
        // const textSplitter = new RecursiveCharacterTextSplitter({
        //     chunkSize: 1000,
        //     chunkOverlap: 200,
        // });
        // const docs = await textSplitter.splitDocuments(rawDocs);
        // const vectorStore = await MemoryVectorStore.fromDocuments(
        //     docs,
        //     new OpenAIEmbeddings()
        // );

        // 自定义文档内容
        // const vectorStore = await MemoryVectorStore.fromDocuments(
        //     [
        //         new Document({ pageContent: "Harrison worked at Kensho" }),
        //         new Document({ pageContent: "Bears like to eat honey." }),
        //     ],
        //     new OpenAIEmbeddings()
        // );

        // 读取向量库
        const vectorStore = await PineconeStore.fromExistingIndex(
            new OpenAIEmbeddings(),
            {
                pineconeIndex: pinecone.Index(PINECONE_INDEX_NAME),
                namespace: PINECONE_NAME_SPACE,
                textKey: 'text'
            }
        );

        const llm = new ChatOpenAI({
            temperature: 0,
            modelName: "gpt-3.5-turbo"
        });

        // const retriever = vectorStore.asRetriever(1);
        const retriever = MultiQueryRetriever.fromLLM({
            llm,
            retriever: vectorStore.asRetriever(1)
        });

        // 独立会话链
        const standaloneQuestionChain = RunnableSequence.from([
            condenseQuestionPrompt,
            llm,
            new StringOutputParser(),
        ]);

        // 文档检索链
        const retrievalChain = retriever.pipe(formatDocumentsAsString);
        const retrievalQaChain = RunnableSequence.from([
            {
                // input: new RunnablePassthrough(),
                // context: retrievalChain,
                context: RunnableSequence.from([
                    (input) => input.question,
                    retrievalChain
                ]),
                question: (input) => input.question,
            },
            answerPrompt,
            llm
        ]);

        // 会话文档检索链
        const conversationalRetrievalQAChain = RunnableSequence.from([
            {
                question: standaloneQuestionChain,
            },
            retrievalQaChain,
            new BytesOutputParser()
        ]);
        const result = await conversationalRetrievalQAChain.stream({
            question: currentMessageContent
        });

        // const combineDocsChain = await createStuffDocumentsChain({
        //     llm,
        //     prompt,
        //     outputParser: new StringOutputParser()
        // });
        // const chain = await createRetrievalChain({
        //     retriever,
        //     combineDocsChain,
        // });
        // const result = await chain.stream({
        //     input: currentMessageContent,
        // });
        // for await (const chunk of result) {
        //     console.log(chunk);
        // }
        return new StreamingTextResponse(result);

        // const result = await chain.invoke({
        //     question: currentMessageContent,
        // });
        // return NextResponse.json(result, { status: 200 });
    } catch (e: any) {
        return NextResponse.json({ error: e.message }, { status: e.status ?? 500 });
    }
}
