from typing import Callable
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_openai.chat_models import ChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.runnables import RunnablePassthrough, RunnableParallel
from operator import itemgetter
from datetime import datetime


class SalesChatBot:

    memory_token_limit: int = 2048
    memory_pair_limit: int = 10

    def __init__(self, vector_store_dir: str = 'sales_speech_db', token_counter: Callable[[str], int] = None ):
        self.chain = None
        self.db = FAISS.load_local(vector_store_dir, OpenAIEmbeddings(),allow_dangerous_deserialization=True)
        self.chat_model = ChatOpenAI(model='gpt-4-turbo')
        self.token_counter = self.chat_model.get_num_tokens if token_counter is None else token_counter
        self.memory = ChatMessageHistory()
        self.retriever = self.db.as_retriever(
            search_type='similarity_score_threshold',
            search_kwargs={'score_threshold': 0.6}
        )

    def initialize(self):
        system_prompt = (
            "Use the given context and history to answer the question, and answer in Chinese. "
            "If the user's question has no subject, please find the latest question with a subject in History as the subject of the current question."
            "If you don't know the answer, just say I don't know, I'll ask the leader or something. "
            "Use three sentence maximum and keep the answer concise. \n"
            "History: {history}\n\n"
            "Context(This is not a historical record, but if you don't know the answer, look here.): {context}"
        )
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", system_prompt),
                ("human", "{input}"),
            ]
        )

        self.chain = (
                RunnableParallel({
                    'input': RunnablePassthrough(),
                    'context': self.retriever
                })
                | {
                    "input": itemgetter('input'),
                    "context": lambda res: '\n'.join(doc.page_content for doc in res['context']),
                    "history": lambda res: "\n".join(
                        f"{message.type}: {message.content}" for message in self.memory.messages)
                }
                | prompt
                | self.chat_model
                | StrOutputParser()
        )

    def invoke(self, message):
        ans = self.chain.invoke(message)
        self.memory.add_user_message(f"{message}-current time: {datetime.now().strftime('%m-%d %H:%M:%S')}")
        self.memory.add_ai_message(ans)
        while len(self.memory.messages) > self.memory_pair_limit * 2 or \
            self.token_counter(''.join(message.content for message in self.memory.messages)) > self.memory_token_limit:
            self.memory.messages.pop(0)

        return ans
