import os, sys

now_dir = os.getcwd()
sys.path.append(now_dir)
sys.path.append(os.path.join(now_dir, "GPT_SoVITS"))
os.environ["OPENAI_API_KEY"] = "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"
os.environ["SERPAPI_API_KEY"] = "9200796cba4b2569d70549a440e2ee16c690401256613ec3cdeffc46edb47652"

from Inference.TTS import tts, play_wav
from flask import Flask, request, Response, jsonify, stream_with_context, send_file
from flask_httpauth import HTTPBasicAuth

from operator import itemgetter

from langchain.memory import ConversationBufferMemory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
from BaichuanChatModel import BaichuanChatModel
from funasr import AutoModel

app = Flask(__name__)


class Conversation:
    model = BaichuanChatModel()

    # model = ChatOpenAI(streaming=True, max_tokens=2048)

    # paraformer-zh is a multi-functional asr model
    # use vad, punc, spk or not as you need
    ASR_model = AutoModel(model="Chat/paraformer-zh", model_revision="v2.0.4",
                          vad_model="fsmn-vad", vad_model_revision="v2.0.4",
                          punc_model="ct-punc-c", punc_model_revision="v2.0.4",
                          # spk_model="cam++", spk_model_revision="v2.0.2",
                          )

    def __init__(self, template):

        self.memory = ConversationBufferMemory(return_messages=True)

        self.prompt = ChatPromptTemplate.from_messages(template)

        self.chain = (
                RunnablePassthrough.assign(
                    history=RunnableLambda(self.memory.load_memory_variables) | itemgetter("history")
                )
                | self.prompt
                | Conversation.model
        )

        self.data = {'cha_name': '阿梓', 'character_emotion': 'default',
                     'text': 'response_text',
                     'text_language': '多语种混合', 'batch_size': 10, 'speed': 1, 'top_k': 6, 'top_p': 0.8,
                     'temperature': 0.8,
                     'stream': 'False', 'save_temp': 'True'}
        self.history = []

    def user(self, user_message, history):
        self.history = history + [[user_message, None]]
        return "", self.history

    def file(self, file, history):
        self.history = history + [[file.name, None]]
        return self.history

    def bot_stream(self, history):
        ret = self.chain.stream({"input": history[-1][0]})
        self.history[-1][1] = ""
        for token in ret:
            self.history[-1][1] += token.content
            yield self.history

    def baichuan_bot_stream(self, history):
        ret = self.chain.stream({"input": history[-1][0]})
        self.history[-1][1] = ""
        for token in ret:
            new_content = token.content[len(self.history[-1][1]):]
            self.history[-1][1] += new_content
            yield self.history

    def bot(self, history):
        ret = self.chain.invoke({"input": history[-1][0]})
        self.history[-1][1] = ret
        return ret

    def read_aloud(self, history):
        self.data['text'] = "。。" + history[-1][1]
        play_wav(tts(self.data))
        return None

    def listen(self, audio, history):
        user_message = Conversation.ASR_model.generate(input=audio,
                                                       batch_size_s=300,
                                                       hotword='魔搭')
        print(user_message[0]["text"])
        self.history = history + [[user_message[0]["text"], None]]
        return self.history
