import os
import concurrent.futures
from typing import List, Dict, Any
from langchain.vectorstores import FAISS
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import OllamaEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain.docstore.document import Document
from loguru import logger

class KnowledgeBase:
    def __init__(self, input_dir: str, knowledge_base_path: str, embeddings_model="bge-m3", chunk_size=500, chunk_overlap=50):
        logger.info("################初始化知识库################")
        self.input_dir = input_dir
        self.knowledge_base_path = knowledge_base_path
        self.embeddings = OllamaEmbeddings(model=embeddings_model)
        self.vector_store = None
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        logger.info("################知识库初始化成功################")

    def load_single_document(self, file_path):
        try:
            loader = TextLoader(file_path, encoding="utf-8")
            return loader.load()
        except Exception as e:
            logger.error(f"加载文件 {file_path} 失败: {e}")
            return []

    def load_documents(self) -> List[Document]:
        logger.info("################知识库加载文档################")
        documents = []
        try:
            if not os.path.exists(self.input_dir):
                logger.warning(f"输入目录不存在: {self.input_dir}")
                return documents

            file_paths = []
            for filename in os.listdir(self.input_dir):
                if filename.endswith(".txt"):
                    file_paths.append(os.path.join(self.input_dir, filename))

            with concurrent.futures.ThreadPoolExecutor() as executor:
                results = executor.map(self.load_single_document, file_paths)
                for doc_list in results:
                    documents.extend(doc_list)
        except Exception as e:
            logger.error(f"加载文档时出错: {e}")
            raise
        logger.info("################知识库文档加载完成################")
        return documents

    def process_documents(self, documents: List[Document]) -> List[Document]:
        logger.info("################处理文档################")
        if not documents:
            return []

        text_splitter = CharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)
        result = text_splitter.split_documents(documents)
        logger.info("################文档处理完成################")
        return result

    def build_vector_store(self) -> None:
        logger.info("################构建向量知识库################")
        try:
            documents = self.load_documents()
            if not documents:
                logger.warning("没有找到要处理的文档")
                return

            processed_docs = self.process_documents(documents)

            # 创建向量存储
            self.vector_store = FAISS.from_documents(processed_docs, self.embeddings)

            # 保存向量存储
            os.makedirs(os.path.dirname(self.knowledge_base_path), exist_ok=True)
            self.vector_store.save_local(self.knowledge_base_path)

            logger.info(f"向量存储已保存到 {self.knowledge_base_path}")
        except Exception as e:
            logger.error(f"构建向量存储时出错: {e}")
            raise
        logger.info("################向量知识库构建完成################")

    def load_vector_store(self) -> None:
        logger.info("################加载向量知识库################")
        try:
            if not os.path.exists(self.knowledge_base_path):
                logger.warning(f"向量存储不存在: {self.knowledge_base_path}，将创建新的存储")
                self.build_vector_store()
                return

            self.vector_store = FAISS.load_local(
                self.knowledge_base_path,
                self.embeddings,
                allow_dangerous_deserialization=True
            )
            logger.info(f"向量存储已加载: {self.knowledge_base_path}")
        except Exception as e:
            logger.error(f"加载向量存储时出错: {e}")
            raise
        logger.info("################向量知识库加载完成################")

    def query(self, query: str, k: int = 3) -> List[Dict[str, Any]]:
        logger.info(f"################向知识库查询：{query} ################")
        if not self.vector_store:
            self.load_vector_store()

        try:
            docs = self.vector_store.similarity_search(query, k=k)
            result = [{"content": doc.page_content, "metadata": doc.metadata} for doc in docs]
            logger.info(f"################ 知识检索完成 ################")
            return result
        except Exception as e:
            logger.error(f"查询知识库时出错: {e}")
            return [{"error": f"查询知识库失败: {str(e)}"}]