from abc import ABC
from langchain.embeddings import HuggingFaceEmbeddings
from config.config_lab import (sentence_size, EMBEDDING_MODEL,
                               DEVICE_IN, KB_HOST,
                               KB_PORT, KB_HOST_synchronize,
                               KB_PORT_synchronize, KB_NAME_synchronize)
from question.kb_service.kb_utils import get_kb_path, get_doc_path, torch_gc
from langchain.document_loaders import UnstructuredFileLoader, TextLoader, CSVLoader, UnstructuredExcelLoader
from langchain.vectorstores import Milvus
from typing import List
from langchain.docstore.document import Document
from langchain.text_splitter import TextSplitter
from question.kb_service.chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
import os
import operator

from question.kb_service.my_milvus_retriever import CustomMilvusRetriever

# 是否开启中文标题加强，以及标题增强的相关配置
# 通过增加标题判断，判断哪些文本为标题，并在metadata中进行标记；
# 然后将文本与往上一级的标题进行拼合，实现文本信息的增强。
ZH_TITLE_ENHANCE = False
# 知识库中单段文本长度(不适用MarkdownHeaderTextSplitter)
CHUNK_SIZE = sentence_size
# 知识库中相邻文本重合长度(不适用MarkdownHeaderTextSplitter)
OVERLAP_SIZE = 50


class MilvusService(ABC):
    _instance = None

    def __new__(cls, *args, **kwargs):
        if not cls._instance:
            cls._instance = super().__new__(cls)
            print("MilvusService instance created ...")
        return cls._instance

    def __init__(self,
                 knowledge_base_name: str,
                 ):
        # 知识库名称
        self.device = DEVICE_IN
        self.kb_name = knowledge_base_name
        if not hasattr(self, 'embeddings') or self.embeddings is None:
            # 向量嵌入模型
            self.embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL,
                                                    model_kwargs={'device': self.device})
        # 获得知识库路径
        self.kb_path = get_kb_path(self.kb_name)
        # 获得知识文件夹路径
        self.docs_path = get_doc_path(self.kb_name)
        # 主知识库
        self.vector_store = None
        # 从知识库
        self.vector_store_synchronize = None

    def write_check_file(self, filepath, docs):
        """将知识库docs写入文件"""
        fp = os.path.join(filepath, 'load_file.txt')
        with open(fp, 'a+', encoding='utf-8') as fout:
            fout.write("filepath=%s,len=%s" % (filepath, len(docs)))
            fout.write('\n')
            for i in docs:
                fout.write(str(i))
                fout.write('\n')
            fout.close()

    def tree(self, filepath, ignore_dir_names=None, ignore_file_names=None):
        """返回两个列表，第一个列表为 filepath 下全部文件的完整路径, 第二个为对应的文件名"""
        if ignore_dir_names is None:
            ignore_dir_names = []
        if ignore_file_names is None:
            ignore_file_names = []
        ret_list = []
        if isinstance(filepath, str):
            if not os.path.exists(filepath):
                print(f"{filepath} Path does not exist.")
                return None, None
            elif os.path.isfile(filepath) and os.path.basename(filepath) not in ignore_file_names:
                return [filepath], [os.path.basename(filepath)]
            elif os.path.isdir(filepath) and os.path.basename(filepath) not in ignore_dir_names:
                for file in os.listdir(filepath):
                    fullfilepath = os.path.join(filepath, file)
                    if os.path.isfile(fullfilepath) and os.path.basename(fullfilepath) not in ignore_file_names:
                        ret_list.append(fullfilepath)
                    if os.path.isdir(fullfilepath) and os.path.basename(fullfilepath) not in ignore_dir_names:
                        ret_list.extend(self.tree(fullfilepath, ignore_dir_names, ignore_file_names)[0])
        return ret_list, [os.path.basename(p) for p in ret_list]

    def load_file(self, doc_path):
        """读取单个文件，转换为list格式的知识库docs，并保存和返回"""
        if os.path.exists(doc_path):
            if doc_path.lower().endswith(".md"):
                loader = UnstructuredFileLoader(doc_path, mode="elements")
            elif doc_path.lower().endswith(".txt"):
                loader = TextLoader(doc_path, autodetect_encoding=True)
            elif doc_path.lower().endswith(".csv"):
                loader = CSVLoader(doc_path)
            elif doc_path.lower().endswith("xlsx") or doc_path.lower().endswith("xls"):
                loader = UnstructuredExcelLoader(doc_path, mode="elements")
            else:
                loader = UnstructuredFileLoader(doc_path, mode="elements")
            docs = loader.load()
            return docs
        else:
            print(f"{doc_path} File does not exist.")
            return []

    def load_batch_file(self, docs_path):
        loaded_files = []
        failed_files = []
        docs = []
        if isinstance(docs_path, str):
            if not os.path.exists(docs_path):
                print(f"{docs_path} Path does not exist.")
                return None
            elif os.path.isfile(docs_path):
                file = os.path.split(docs_path)[-1]
                try:
                    docs = self.load_file(docs_path)
                    print(f"{file} Successfully loaded.")
                    loaded_files.append(docs_path)
                except Exception as e:
                    print(e)
                    print(f"{file} Failed to load.")
                    return None
            elif os.path.isdir(docs_path):
                for fullfilepath, file in zip(*self.tree(docs_path, ignore_dir_names=['tmp_files'])):
                    try:
                        docs += self.load_file(fullfilepath)
                        print(f"{file} Successfully loaded.")
                        loaded_files.append(fullfilepath)
                    except Exception as e:
                        print(e)
                        failed_files.append(file)

                if len(failed_files) > 0:
                    print("The following files failed to load successfully.：")
                    for file in failed_files:
                        print(f"{file}")
        else:
            print(f"{docs_path} Incorrect format")
        docs = self.docs2texts(docs)
        self.write_check_file(self.kb_path, docs)
        return docs

    def docs2texts(
            self,
            docs: List[Document] = None,
            chunk_size: int = CHUNK_SIZE,
            chunk_overlap: int = OVERLAP_SIZE,
            text_splitter: TextSplitter = None,
    ):
        if text_splitter is None:
            text_splitter = self.get_text_spliter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
        docs = text_splitter.split_documents(docs)
        return docs

    def get_text_spliter(
            self,
            chunk_size: int = CHUNK_SIZE,
            chunk_overlap: int = OVERLAP_SIZE,
    ):
        return ChineseRecursiveTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)

    # 初始化向量数据库,只初始化一次
    def init_vector_store(self):
        print("Milvus init vector store.")
        # 加载文件，保存为知识库
        docs = self.load_batch_file(self.docs_path)
        if len(docs) > 0:
            print("Generating vector library---{}".format(len(docs)))
            self.vector_store = Milvus.from_documents(
                docs,
                self.embeddings,
                connection_args={"host": KB_HOST, "port": KB_PORT},
                collection_name=self.kb_name,
                drop_old=True,
            )
            print("Milvus init vector store success.")
            self.vector_store_synchronize = Milvus.from_documents(
                docs,
                self.embeddings,
                connection_args={"host": KB_HOST_synchronize, "port": KB_PORT_synchronize},
                collection_name=KB_NAME_synchronize,
                drop_old=True,
            )
            print("From milvus init vector store success.")
            torch_gc()
        else:
            print("Knowledge base is empty, please regenerate.")

    # 将文件解析后存入向量数据库
    def insert_doc_to_vector(self, fullfilepath):
        docs = self.load_file(fullfilepath)
        text_splitter = self.get_text_spliter(chunk_size=CHUNK_SIZE, chunk_overlap=OVERLAP_SIZE)
        docs = text_splitter.split_documents(docs)
        self.vector_store.add_documents(docs)
        self.vector_store_synchronize.add_documents(docs)
        print(f"{fullfilepath} Successfully loaded.")

    # 加载向量数据库
    def load_vector_store(self):
        # 判断是否存在collection，不存在则向量库初始化
        from pymilvus import (
            connections,
            utility
        )
        # 判断主向量库库是否存在
        connections.connect("default", host=KB_HOST, port=KB_PORT)
        has = utility.has_collection(self.kb_name)
        if has:
            # 存在则建立连接
            print("Knowledge base is existed.")
            self.vector_store = Milvus(
                self.embeddings,
                connection_args={"host": KB_HOST, "port": KB_PORT},
                collection_name=self.kb_name,
            )
            self.vector_store_synchronize = Milvus(
                self.embeddings,
                connection_args={"host": KB_HOST_synchronize, "port": KB_PORT_synchronize},
                collection_name=KB_NAME_synchronize,
            )
        else:
            self.init_vector_store()
            torch_gc()

    def do_search(self, query, top_k, score_threshold):
        query_embeddings = self.embeddings.embed_query(query)
        docs = self.vector_store.similarity_search_with_score_by_vector(query_embeddings, top_k)
        docs = [doc for doc in docs if doc[1] < score_threshold]
        return docs[:top_k]
