import os

import requests
import re
from dotenv import load_dotenv
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import CacheBackedEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, PromptTemplate
from langchain.storage import LocalFileStore
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain_community.document_loaders import CSVLoader, UnstructuredExcelLoader, Docx2txtLoader, PyPDFLoader, \
    UnstructuredFileLoader
from langchain_community.vectorstores import FAISS
from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai import ChatOpenAI
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter

from langchain_community.chat_models import ChatZhipuAI
from  langchain_community.chat_models import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings


class KnowledgeBase:

    def __init__(self, api_key, model="gpt-3.5-turbo-16k", embedings="text-embedding-3-small"):
        self.api_key = api_key
        if model.startswith("glm"):
            self.llm = ChatZhipuAI(api_key=api_key, model=model)
        elif model.startswith("qwen"):
            self.llm = ChatTongyi(dashscope_api_key=api_key, model=model)
            self.aIEmbeddings = DashScopeEmbeddings(dashscope_api_key=api_key, model=embedings)
        else:
            self.llm = ChatOpenAI(openai_api_key=api_key, model=model)
            self.aIEmbeddings = OpenAIEmbeddings(openai_api_key=api_key, model=embedings)
        self.cache_embedder = None
        self.cache_embeddings_folder_path = './cache_embeddings/'
        self.store_path = './vector_store/'
        self.db = None

        load_dotenv()
        self.domain = os.getenv('domain')
        self.redis_url = os.getenv('redis_url')

    def download_file(self, url, local_folder_path):
        local_filename = url.split('/')[-1]
        local_filename = os.path.join(local_folder_path, local_filename)

        directory = os.path.dirname(local_filename)
        if not os.path.exists(directory):
            os.makedirs(directory)

        if not os.path.exists(local_filename):
            with open(local_filename, 'wb') as f:
                f.write(requests.get(self.domain+url).content)
        return local_filename

    def init_embeddings(self):
        cache_embeddings = LocalFileStore(self.cache_embeddings_folder_path)
        self.cache_embedder = CacheBackedEmbeddings.from_bytes_store(
            self.aIEmbeddings,
            cache_embeddings,
            namespace="customEmbeddings"
        )

    def process_document(self, file_path):
        loader = None
        data = None

        file_name, file_extension = os.path.splitext(file_path)
        if file_extension == '.csv':
            loader = CSVLoader(file_path=file_path)
        elif file_extension == '.pdf':
            loader = PyPDFLoader(file_path=file_path)
        elif file_extension == '.txt':
            loader = UnstructuredFileLoader(file_path=file_path)
        elif file_extension == '.docx':
            loader = Docx2txtLoader(file_path=file_path)
        elif file_extension == '.xlsx':
            loader = UnstructuredExcelLoader(file_path=file_path)

        if loader:
            data = loader.load()
        return data

    # 知识库的新增修改
    def upload(self, file_path, action_type):
        self.init_embeddings()
        file_path = self.download_file(file_path, os.path.join('vector_store', 'knowledge_base'))

        try:
            self.db = FAISS.load_local(folder_path=self.store_path, embeddings=self.cache_embedder,
                                       allow_dangerous_deserialization=True)
        except Exception as e:
            print("Local FAISS storage not found")

        if action_type == 'delete':
            if self.db:
                ids = [k for k, v in self.db.docstore._dict.items() if v.metadata.get("source", "").lower() == file_path.lower()]
                if ids:
                    self.db.delete(ids)
                    self.db.save_local(folder_path=self.store_path)
            return True

        data = self.process_document(file_path)
        if not data:
            print("Failed to process document")
            return False

        # text_split = SemanticChunker(self.cache_embedder)
        text_split = CharacterTextSplitter(
            separator="\n\n",
            chunk_size=300,
            chunk_overlap=20
        )
        documents = text_split.split_documents(data)

        new_db = FAISS.from_documents(documents, self.cache_embedder)

        if self.db:
            self.db.merge_from(new_db)
            self.db.save_local(folder_path=self.store_path)
        else:
            new_db.save_local(folder_path=self.store_path)

    # 获取知识库的内容
    def get_knowledge_base(self, file_path, page, size):
        self.init_embeddings()
        file_path = self.download_file(file_path, os.path.join('vector_store', 'knowledge_base'))

        try:
            self.db = FAISS.load_local(folder_path=self.store_path, embeddings=self.cache_embedder,
                                       allow_dangerous_deserialization=True)
            data = [{'id': k, 'data': v} for k, v in self.db.docstore._dict.items()
                    if v.metadata.get("source", "").lower() == file_path.lower()]
            start_index = (page - 1) * size
            end_index = page * size
            return {'total': len(data), 'list': data[start_index:end_index]}
        except Exception as e:
            print("Local FAISS storage not found")
            return {'total': 0, 'list': []}

    # 根据知识库内容询问大数据
    def invoke(self, query, uid, template=None):
        try:
            self.init_embeddings()
            self.db = FAISS.load_local(folder_path=self.store_path, embeddings=self.cache_embedder,
                                       allow_dangerous_deserialization=True)
        except Exception as e:
            pass

        if not template:
            template = """请根据内容和我的问题回答,如果你不懂，就回答"不知道"三个字就行，不要自己添加词语和标点。如果内容是空的，你就按照我的问题回答就行。
            内容：{context}
            """

        messages = [
            SystemMessagePromptTemplate.from_template(template),
            HumanMessagePromptTemplate.from_template("{question}"),
        ]
        chat_prompt = ChatPromptTemplate.from_messages(messages)

        message_history = RedisChatMessageHistory(
            url=self.redis_url, session_id=uid
        )
        memory = ConversationBufferMemory(
            memory_key='chat_history',
            chat_memory=message_history,
            return_messages=True
        )

        condense_question_prompt = """你作为一名酒店客服，我作为客户，给定以下对话和后续问题，用原始语言将后续问题改写为独立问题
对话：
{chat_history}
后续问题：{question}
后续独立问题：
"""

        condense_question_prompt = PromptTemplate.from_template(condense_question_prompt)
        # abc = self.db.as_retriever(search_kwargs={'k': 3}).invoke(query)
        # print(abc)
        chain = ConversationalRetrievalChain.from_llm(
            llm=self.llm,
            retriever=self.db.as_retriever(search_kwargs={'k': 3}),
            verbose=True,
            memory=memory,
            condense_question_prompt=condense_question_prompt,
            combine_docs_chain_kwargs={"prompt": chat_prompt}
        )

        return chain.invoke({'question': query})

        # prompt = ChatPromptTemplate.from_messages([
        #     ("system", "You are a world class technical documentation writer."),
        #     ("user", "{input}")
        # ])
        # chain = prompt | self.llm
        # abc = chain.invoke({"input": query})
        # return abc
