import os
import numpy as np
import torch
from typing import List, Dict, Any
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm

class EmbeddingKnowledgeBase:
    def __init__(self, documents_dir: str, model_name: str = "paraphrase-multilingual-MiniLM-L12-v2"):
        """
        初始化嵌入式知识库
        
        Args:
            documents_dir: 文档目录
            model_name: 嵌入模型名称，默认使用多语言模型
        """
        self.documents_dir = documents_dir
        self.model_name = model_name
        self.documents = []
        self.document_embeddings = None
        self.model = None
        
    def load_documents(self) -> List[Document]:
        """加载各种格式的文档"""
        documents = []
        
        # 确保文档目录存在
        if not os.path.exists(self.documents_dir):
            os.makedirs(self.documents_dir)
            print(f"创建了文档目录: {self.documents_dir}")
            print("请在此目录中放置您的文档后再运行")
            return documents
            
        # 检查目录中是否有文件
        files = os.listdir(self.documents_dir)
        if not files:
            print(f"文档目录 {self.documents_dir} 为空，请添加一些文档")
            return documents
        
        # 实际加载文档
        for file in files:
            file_path = os.path.join(self.documents_dir, file)
            
            try:
                if file.endswith(".pdf"):
                    loader = PyPDFLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载PDF: {file}")
                    
                elif file.endswith(".docx"):
                    loader = Docx2txtLoader(file_path)
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载DOCX: {file}")
                    
                elif file.endswith(".txt"):
                    loader = TextLoader(file_path, encoding='utf-8')
                    docs = loader.load()
                    for doc in docs:
                        doc.metadata["source"] = file
                    documents.extend(docs)
                    print(f"已加载TXT: {file}")
            except Exception as e:
                print(f"加载文件 {file} 时出错: {e}")
        
        return documents
    
    def split_documents(self, documents: List[Document]) -> List[Document]:
        """将文档分割成小块"""
        if not documents:
            return []
            
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=100,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""]
        )
        
        splits = text_splitter.split_documents(documents)
        
        # 为每个分割添加ID
        for i, split in enumerate(splits):
            split.metadata["chunk_id"] = i
            
        return splits
    
    def load_embedding_model(self):
        """加载嵌入模型"""
        print(f"正在加载嵌入模型: {self.model_name}")
        try:
            # 尝试加载模型
            self.model = SentenceTransformer(self.model_name)
            print("嵌入模型加载成功")
            return True
        except Exception as e:
            print(f"加载嵌入模型时出错: {e}")
            
            # 尝试使用备用模型
            backup_models = [
                "all-MiniLM-L6-v2",
                "distiluse-base-multilingual-cased-v1",
                "distiluse-base-multilingual-cased-v2",
                "paraphrase-multilingual-mpnet-base-v2"
            ]
            
            for backup_model in backup_models:
                try:
                    print(f"尝试加载备用模型: {backup_model}")
                    self.model = SentenceTransformer(backup_model)
                    self.model_name = backup_model
                    print(f"成功加载备用模型: {backup_model}")
                    return True
                except Exception as e2:
                    print(f"加载备用模型 {backup_model} 出错: {e2}")
            
            print("所有模型加载失败")
            return False
    
    def compute_embeddings(self, texts: List[str]) -> np.ndarray:
        """计算文本嵌入向量"""
        if not self.model:
            raise ValueError("请先加载嵌入模型")
            
        # 使用批处理来加速处理
        batch_size = 32
        all_embeddings = []
        
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i+batch_size]
            batch_embeddings = self.model.encode(batch_texts, show_progress_bar=False)
            all_embeddings.append(batch_embeddings)
            
        return np.vstack(all_embeddings)
    
    def build_knowledge_base(self):
        """构建知识库的完整流程"""
        # 1. 加载嵌入模型
        if not self.load_embedding_model():
            print("无法构建知识库: 嵌入模型加载失败")
            return False
        
        # 2. 加载文档
        documents = self.load_documents()
        if not documents:
            print("无法构建知识库: 没有可用文档")
            return False
            
        print(f"已加载 {len(documents)} 个文档段落")
        
        # 3. 分割文档
        self.documents = self.split_documents(documents)
        print(f"文档已分割为 {len(self.documents)} 个文本块")
        
        # 4. 计算嵌入向量
        print("开始计算文档嵌入向量...")
        texts = [doc.page_content for doc in self.documents]
        
        # 使用tqdm显示进度
        print("计算文档嵌入向量中...")
        self.document_embeddings = self.compute_embeddings(texts)
        print(f"完成嵌入向量计算，形状: {self.document_embeddings.shape}")
        
        return True
    
    def search(self, query: str, n_results: int = 5, threshold: float = 0.0) -> List[Dict[str, Any]]:
        """搜索相关文档"""
        if not self.model or self.document_embeddings is None:
            print("请先运行build_knowledge_base()构建知识库")
            return []
        
        # 计算查询嵌入
        query_embedding = self.model.encode([query])[0]
        
        # 计算相似度
        similarities = cosine_similarity(
            [query_embedding], 
            self.document_embeddings
        )[0]
        
        # 获取相似度排序后的索引
        sorted_indices = np.argsort(similarities)[::-1]
        
        results = []
        for idx in sorted_indices[:n_results]:
            similarity = similarities[idx]
            if similarity >= threshold:
                doc = self.documents[idx]
                results.append({
                    "content": doc.page_content,
                    "source": doc.metadata.get("source", "未知来源"),
                    "page": doc.metadata.get("page", None),
                    "similarity": similarity
                })
        
        return results
    
    def save_embeddings(self, file_path: str = "embeddings.npz"):
        """保存嵌入向量到文件，加快后续加载"""
        if self.document_embeddings is None:
            print("没有嵌入向量可以保存")
            return False
            
        # 保存嵌入向量和文档元数据
        metadata = []
        for doc in self.documents:
            metadata.append({
                "content": doc.page_content,
                "source": doc.metadata.get("source", ""),
                "page": doc.metadata.get("page", None),
                "chunk_id": doc.metadata.get("chunk_id", None)
            })
            
        # 保存到文件
        np.savez(
            file_path,
            embeddings=self.document_embeddings,
            metadata=np.array(metadata, dtype=object)
        )
        print(f"嵌入向量已保存到: {file_path}")
        return True
    
    def load_embeddings(self, file_path: str = "embeddings.npz"):
        """从文件加载嵌入向量"""
        if not os.path.exists(file_path):
            print(f"嵌入文件不存在: {file_path}")
            return False
            
        try:
            # 加载文件
            data = np.load(file_path, allow_pickle=True)
            
            # 还原嵌入向量
            self.document_embeddings = data['embeddings']
            
            # 还原文档
            metadata_list = data['metadata']
            self.documents = []
            
            for meta in metadata_list:
                doc = Document(
                    page_content=meta.item()['content'],
                    metadata={
                        "source": meta.item()['source'],
                        "page": meta.item()['page'],
                        "chunk_id": meta.item()['chunk_id']
                    }
                )
                self.documents.append(doc)
                
            print(f"成功加载嵌入向量，形状: {self.document_embeddings.shape}")
            print(f"成功加载文档数: {len(self.documents)}")
            
            # 确保模型已加载
            if not self.model:
                self.load_embedding_model()
                
            return True
        except Exception as e:
            print(f"加载嵌入向量时出错: {e}")
            return False
    
    def get_document_stats(self):
        """获取知识库统计信息"""
        if not self.documents:
            return {
                "document_count": 0,
                "source_count": 0,
                "total_chars": 0
            }
            
        # 获取基本统计信息
        doc_count = len(self.documents)
        sources = [doc.metadata.get("source", "") for doc in self.documents]
        unique_sources = len(set([s for s in sources if s]))
        total_chars = sum(len(doc.page_content) for doc in self.documents)
        
        return {
            "document_count": doc_count,
            "source_count": unique_sources,
            "total_chars": total_chars
        }

# 使用示例
if __name__ == "__main__":
    # 创建知识库
    kb = EmbeddingKnowledgeBase(documents_dir="./public_health_docs")
    
    # 构建知识库
    if os.path.exists("embeddings.npz"):
        print("找到缓存的嵌入向量，直接加载...")
        kb.load_embeddings("embeddings.npz")
    else:
        print("开始构建新的知识库...")
        if kb.build_knowledge_base():
            # 保存嵌入向量以便后续使用
            kb.save_embeddings("embeddings.npz")
    
    # 测试搜索
    query = "新冠肺炎的主要症状"
    results = kb.search(query, n_results=5, threshold=0.3)
    
    print(f"\n查询: {query}")
    print(f"找到 {len(results)} 个相关结果:\n")
    
    for i, result in enumerate(results):
        print(f"结果 {i+1} (相似度: {result['similarity']:.4f}):")
        print(f"来源: {result['source']}")
        if result['page']:
            print(f"页码: {result['page']}")
        print(f"内容: {result['content'][:150]}...\n")