import os, sys

now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append(os.path.join(now_dir, "GPT_SoVITS"))
os.environ["OPENAI_API_KEY"] = "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"
os.environ["SERPAPI_API_KEY"] = "9200796cba4b2569d70549a440e2ee16c690401256613ec3cdeffc46edb47652"

from operator import itemgetter

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI, OpenAI
from BaichuanChatModel import BaichuanChatModel
from funasr import AutoModel
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.retrievers import EnsembleRetriever
from langchain_community.chat_message_histories import RedisChatMessageHistory
from ConversationSummaryRedisMemory import ConversationSummaryRedisMemory
from langchain_community.document_loaders import PyPDFLoader
# from BaichuanLLM import BaichuanLLM
import time

SUMMARIZER_TEMPLATE = """
请将以下内容逐步概括所提供的对话内容，并将新的概括添加到之前的概括中，形成新的概括。EXAMPLE与END OF EXAMPLE之间的部分是给你的示例，并不是需要总结的内容

EXAMPLE
Current summary:
Human询问AI对人工智能的看法。AI认为人工智能是一种积极的力量。

New lines of conversation:
Human：为什么你认为人工智能是一种积极的力量？
AI：因为人工智能将帮助人类发挥他们的潜能。

New summary:
Human询问AI对人工智能的看法。AI认为人工智能是一种积极的力量，因为它将帮助人类发挥他们的潜能。
END OF EXAMPLE

Current summary:
{summary}

New lines of conversation:
{new_lines}

New summary:"""

SUMMARY_PROMPT = PromptTemplate(
    input_variables=["summary", "new_lines"], template=SUMMARIZER_TEMPLATE
)


class Conversation:
    # chat_model = BaichuanChatModel()

    chat_model = ChatOpenAI(streaming=True, max_tokens=2048)

    ASR_model = AutoModel(model="Chat/paraformer-zh", model_revision="v2.0.4",
                          vad_model="fsmn-vad", vad_model_revision="v2.0.4",
                          punc_model="ct-punc-c", punc_model_revision="v2.0.4",
                          # spk_model="cam++", spk_model_revision="v2.0.2",
                          )

    def __init__(self, template, session_id):

        self.session_id = session_id

        self.redis_memory = RedisChatMessageHistory(session_id, url="redis://localhost:6333")

        self.memory = ConversationSummaryRedisMemory(
            chat_memory=self.redis_memory,
            llm=OpenAI(),
            prompt=SUMMARY_PROMPT,
            max_token_limit=200,
            return_messages=True
        )

        self.history = []

        if self.redis_memory.messages:
            for i, message in enumerate(self.redis_memory.messages):
                if i % 2 == 0:
                    self.history = self.history + [[message.content, None]]
                else:
                    self.history[-1][1] = message.content

        self.memory.prune()

        self.prompt = ChatPromptTemplate.from_messages(template)

        self.chain = (
                RunnablePassthrough.assign(
                    history=RunnableLambda(self.memory.load_memory_variables) | itemgetter("history"),
                    retrieve_results=itemgetter("input") | RunnableLambda(self.retrieve)
                )
                | self.prompt
                | RunnableLambda(self.print_res)
                | Conversation.chat_model
        )
        self.retrieve_list = []
        if os.path.exists("Chat/Chroma") and os.path.isdir("Chat/Chroma"):
            for db in os.listdir("Chat/Chroma"):
                self.retrieve_list.append(Chroma(persist_directory=db, embedding_function=OpenAIEmbeddings()))
        self.retriever = None
        self.just_upload = False

    def get_message_history(self, session_id: str) -> RedisChatMessageHistory:
        redis_memory = RedisChatMessageHistory(session_id, url="redis://localhost:6333")
        return redis_memory

    def retrieve(self, question):
        if self.retriever and not self.just_upload:
            docs = self.retriever.invoke(question)
            res = ""
            for doc in docs:
                res += doc.page_content + "\n"
            return "。你可以选择性地参考以下内容，根据你的判断也可以不参考：" + res
        else:
            self.just_upload = False
            return ""

    def print_res(self, prompt):
        print(prompt)
        return prompt

    def user(self, user_message, history):
        self.history = history + [[user_message, None]]
        return "", self.history

    def file(self, file):
        self.just_upload = True
        file_extension = file.split(".")[-1]
        if file_extension == "pdf":
            loader = PyPDFLoader(file)
        else:
            loader = UnstructuredMarkdownLoader(file)
        docs = loader.load()
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
        splits = text_splitter.split_documents(docs)
        vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())
        # Retrieve and generate using the relevant snippets of the blog.
        self.retrieve_list.append(
            vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.5})
        )
        self.retriever = EnsembleRetriever(
            retrievers=self.retrieve_list, weights=[1 / len(self.retrieve_list) for _ in self.retrieve_list]
        )
        return self.history

    def bot_stream(self, history):
        if self.just_upload:
            ret = self.chain.stream({"input": history[-1][0] + ",请你复述以下语句“已收到文件，你有什么想问的问题吗”"})
        else:
            ret = self.chain.stream({"input": history[-1][0], "retrieve_results": ""})
        self.history[-1][1] = ""
        for token in ret:
            self.history[-1][1] += token.content
            yield token.content

    def baichuan_bot_stream(self, history):
        if self.just_upload:
            ret = self.chain.stream({"input": history[-1][0] + ",请你复述以下语句“已收到文件，你有什么想问的问题吗”"})
        else:
            ret = self.chain.stream({"input": history[-1][0], "retrieve_results": ""})

        self.history[-1][1] = ""
        print(f"stream begin:{time.time()}")
        for token in ret:
            new_content = token.content[len(self.history[-1][1]):]
            self.history[-1][1] += new_content
            yield new_content

    def bot(self, history):
        if self.just_upload:
            ret = self.chain.stream({"input": history[-1][0] + ",请你回复“已收到文件，你有什么想问的问题吗”"})
        else:
            ret = self.chain.stream({"input": history[-1][0]})

        self.history[-1][1] = ret
        return ret

    def listen(self, audio):
        user_message = Conversation.ASR_model.generate(input=audio,
                                                       batch_size_s=300,
                                                       hotword='魔搭')
        return user_message[0]["text"]

    def clear(self):
        self.memory.clear()
        self.history.clear()
        self.retrieve_list.clear()
        self.retriever = None
