import os
import logging
import asyncio
import threading
import hashlib
import time
from typing import List, Dict, Any, Optional, Set

# 导入统一的日志配置模块
from log_config import setup_logging
# 添加Excel文件支持
import pandas as pd
import torch

# 处理 分类
from category import CategoryManager
# 解析 html 网页
from html_parser import HTMLProcessor
# 导入接口定义
from interfaces.embedding_service import IEmbeddingService

# 设置环境变量以避免gRPC fork警告
os.environ['GRPC_ENABLE_FORK_SUPPORT'] = '0'
# 设置环境变量以禁用tokenizers并行处理警告
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
# 过滤PyTorch的encoder_attention_mask弃用警告
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, message="`encoder_attention_mask` is deprecated and will be removed in version 4.55.0 for `BertSdpaSelfAttention.forward`.")
warnings.filterwarnings("ignore", category=FutureWarning, message="`encoder_attention_mask` is deprecated and will be removed in version 4.55.0 for `XLMRobertaSdpaSelfAttention.forward`.")
# 过滤pypdf的PDF读取警告
warnings.filterwarnings("ignore", category=UserWarning, message="Previous trailer cannot be read:.*")
warnings.filterwarnings("ignore", category=UserWarning, message="Object.* found")

# 预定义支持的backend类型
SUPPORTED_BACKENDS = ['HuggingFace', 'FlagModel']

# 导入模型相关模块
try:
    from langchain_huggingface import HuggingFaceEmbeddings
except ImportError:
    # 回退到旧版本导入方式
    from langchain_community.embeddings import HuggingFaceEmbeddings
from sentence_transformers import CrossEncoder
from FlagEmbedding import FlagModel, FlagReranker

# LangChain相关导入
from langchain_community.document_loaders import (
    TextLoader, PyPDFLoader, Docx2txtLoader,
    UnstructuredMarkdownLoader
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
# Milvus相关导入
from pymilvus import connections, Collection, CollectionSchema, FieldSchema, DataType, utility

# 配置日志
setup_logging()
logger = logging.getLogger(__name__)



class EmbeddingService(IEmbeddingService):
    """
    完成文本/文件/图像的嵌入和保存, 以及检索/Rerank
    """
    def __init__(self, category_manager: CategoryManager, html_parser:HTMLProcessor, 
                 embedding_model_name: str , embedding_model_path: Optional[str] = None,
                 rerank_model_name: Optional[str] = None, rerank_model_path: Optional[str] = None,
                 milvus_connection_args: Dict[str, Any] = None,
                 backend_name: str = "HuggingFace", 
                 saved_images_dir: str = "saved_images"  ):
        """
        初始化嵌入服务
        :param embedding_model_name: Embedding 模型名称
        :param embedding_model_path: Embedding 本地模型路径，可选
        :param rerank_model_name: 重排序模型名称，可选
        :param rerank_model_path: 重排序本地模型路径，可选
        :param milvus_connection_args: Milvus连接参数
        :param backend_name: 后端模型框架名称: HuggingFace, FlagModel。
        :param saved_images_dir: 图片文件保存目录
        """
        logger.info(f"embedding model path: {embedding_model_path}")
        logger.info(f"rerank model path: {rerank_model_path}")
        # 保存图片的目录
        self.image_dir = saved_images_dir        
        # html 解析器
        self.html_parser = html_parser
        # 存储分类管理器
        self.category_manager = category_manager
        # 初始化
        self.backend_name = backend_name
        self.embedding_model = self._init_embedding_model(embedding_model_path, embedding_model_name)
        self.rerank_model = self._init_rerank_model(rerank_model_path, rerank_model_name)
        # 根据模型最大程度调整文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=512,
            chunk_overlap=100,
            separators=["\n\n", "\n", " ", ""],     # 切分时优先级先段落,在句子,最后单词
            length_function=len
        )        
        # 连接Milvus
        self._connect_to_milvus(milvus_connection_args)
        # 初始化或加载集合
        self._init_collection()

    def _init_embedding_model(self, embedding_model_path: str, embedding_model_name: str):
        """初始化嵌入模型"""
        # 确定backend_name
        backend_name = self.backend_name        # 验证backend_name
        if backend_name not in SUPPORTED_BACKENDS:
            raise ValueError(f"不支持的backend: {backend_name}，可选值: {', '.join(SUPPORTED_BACKENDS)}")
        # 初始化嵌入模型
        # 自动选择最佳设备
        device = 'mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu'
        logger.info(f"使用设备: {device}")

        self.embedding_model = None
        if self.backend_name == 'HuggingFace':
            model_kwargs = {
                "device": device, 
                # 针对Apple Silicon的优化参数
                "model_kwargs": {
                    "torch_dtype": torch.float16,  # 使用半精度计算，节省内存并加速
                    "device_map": "auto"
                },
                "encode_kwargs": {
                    "normalize_embeddings": True,
                    "batch_size": 32  # 调整批次大小以适应M4内存
                }
            }
            if embedding_model_path:
                logger.info(f"使用本地 HuggingFace Embedding 模型: {embedding_model_path}")
                # 使用 路径 作为模型名称传入
                self.embedding_model = HuggingFaceEmbeddings(
                    model_name=embedding_model_path,
                    **model_kwargs
                ) 
            else:
                logger.info(f"使用预训练HuggingFace模型: {embedding_model_name}")
                # 使用 模型名称 作为模型名称传入
                self.embedding_model = HuggingFaceEmbeddings(
                    model_name=embedding_model_name,
                    **model_kwargs
                )
        else:   # FlagModel
            if device == "mps":  # mps 设备添加teeing优化参数
                model_kwargs = {
                    "torch_dtype": torch.float16,   # 启用半精度计算（MPS上效率更高）
                    "low_cpu_mem_usage": True   #  启用内存高效加载
                }
            else:
                model_kwargs = {}
            
            if embedding_model_path:
                logger.info(f"使用本地 FlagModel Embedding 模型: {embedding_model_path}")
                self.embedding_model = FlagModel(
                    model_name_or_path=embedding_model_path,
                    device=device,  # 使用Apple Silicon的GPU加速
                    **model_kwargs
                )
            else:
                    logger.info(f"使用预训练FlagModel模型: {embedding_model_name}")
                    self.embedding_model = FlagModel(
                        model_name_or_path=embedding_model_name,
                        device=device,  # 使用Apple Silicon的GPU加速
                        **model_kwargs
                    )
        return self.embedding_model

    def _init_rerank_model(self, rerank_model_path: str, rerank_model_name: str):
        """初始化重排序模型"""
        backend_name = self.backend_name        # 验证backend_name
        if backend_name not in SUPPORTED_BACKENDS:  
            raise ValueError(f"不支持的backend: {backend_name}，可选值: {', '.join(SUPPORTED_BACKENDS)}")  

        # 初始化重排序模型
        self.rerank_model = None
        if backend_name == 'HuggingFace':
            if rerank_model_path:
                logger.info(f"使用本地 HuggingFace Rerank 模型: {rerank_model_path}")
                self.rerank_model = CrossEncoder(rerank_model_path)
            elif rerank_model_name:
                logger.info(f"使用预训练HuggingFace重排序模型: {rerank_model_name}")
                self.rerank_model = CrossEncoder(rerank_model_name)
            else:
                logger.info("未配置重排序模型")
        elif backend_name == 'FlagModel':
            if rerank_model_path:
                logger.info(f"使用本地 FlagModel Rerank 模型: {rerank_model_path}")
                self.rerank_model = FlagReranker(model_name_or_path=rerank_model_path, use_fp16=True)
            elif rerank_model_name:
                logger.info(f"使用预训练FlagModel重排序模型: {rerank_model_name}")
                self.rerank_model = FlagReranker(model_name_or_path=rerank_model_name, use_fp16=True)
            else:
                logger.error("未配置重排序模型")
        else:
            raise ValueError(f"不支持的backend: {backend_name}，可选值: {', '.join(SUPPORTED_BACKENDS)}")       
        
        return self.rerank_model 

    def _connect_to_milvus(self, milvus_connection_args: Dict[str, Any]):
        """连接到Milvus服务，包含重试逻辑"""
        # milvus 连接参数
        self.connection_args = milvus_connection_args 
        max_retries = 3
        retry_delay = 2  # 秒

        for attempt in range(max_retries):
            try:
                connections.connect(
                    alias="default",
                    host=self.connection_args["host"],
                    port=self.connection_args["port"]
                )
                logger.info(f"成功连接到Milvus服务: {self.connection_args}")
                return
            except Exception as e:
                if attempt < max_retries - 1:
                    logger.warning(f"第 {attempt+1} 次连接Milvus服务失败: {str(e)}，将在 {retry_delay} 秒后重试")
                    time.sleep(retry_delay)
                else:
                    logger.error(f"连接Milvus服务失败 (已重试 {max_retries} 次): {str(e)}")
                    logger.error("请确认Milvus服务已启动并可在指定地址访问")
                    raise
        
    
    def _init_collection(self):
        """初始化Milvus集合"""
        # 创建集合字典，用于存储所有分类的集合对象
        self.collections = {}
        
        # 从分类管理器获取分类列表
        categories = self.category_manager.get_all_categories()
        
        # 获取当前模型的嵌入维度
        current_dimension = self._get_embedding_dimension()
        logger.info(f"当前模型的嵌入维度: {current_dimension}")
        
        # 定义集合字段结构
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64,
                       is_primary=True, auto_id=True),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR,
                       dim=current_dimension),
            FieldSchema(name="text", dtype=DataType.VARCHAR,
                       max_length=65535),
            FieldSchema(name="metadata", dtype=DataType.JSON, nullable=True)
        ]

        schema = CollectionSchema(fields=fields, description="文档嵌入集合")
        
        # 索引参数
        index_params = {
            "index_type": "IVF_FLAT",
            "metric_type": "L2",
            "params": {"nlist": 1024}
        }
        
        # 为每个分类创建集合
        logger.info(f"检验各 Collection 的维度 ... ")
        for category in categories:
            category_name = category["name"]
            
            # 检查集合是否存在
            if utility.has_collection(category_name):
                # 获取现有集合并检查维度
                collection = Collection(category_name)
                collection_schema = collection.schema
                
                # 查找嵌入字段
                embedding_field = None
                for field in collection_schema.fields:
                    if field.name == "embedding" and field.dtype == DataType.FLOAT_VECTOR:
                        embedding_field = field
                        break
                
                if embedding_field:
                    collection_dimension = embedding_field.dim
                    logger.info(f"  集合 '{category_name}' 的嵌入维度: {collection_dimension}")
                    
                    # 检查维度是否匹配
                    if collection_dimension != current_dimension:
                        logger.warning(f"  集合 '{category_name}' 的维度({collection_dimension})与当前模型维度({current_dimension})不匹配")
                        logger.warning(f" 请手动删除集合 '{category_name}' 并重启服务以创建新的匹配维度的集合")
                
                logger.info(f"成功加载Milvus现有集合: {category_name}")
            else:
                # 创建新集合
                collection = Collection(
                    name=category_name,
                    schema=schema,
                    using="default",
                    enable_dynamic_field=True
                )

                # 创建索引
                collection.create_index(
                    field_name="embedding",
                    index_params=index_params
                )

                logger.info(f"成功创建Milvus新集合: {category_name}，维度: {current_dimension}")
            
            # 将集合添加到字典中
            self.collections[category_name] = collection

    def remove_collection(self, collection_name: str) -> Dict[str, Any]:
        """
        删除指定的Milvus集合
        :param collection_name: 要删除的集合名称
        :return: 操作结果字典
        """
        try:
            # 检查集合是否存在
            if not utility.has_collection(collection_name):
                logger.warning(f"Milvus集合 {collection_name} 不存在，无法删除")
                return {
                    "success": False,
                    "message": f"Milvus集合 {collection_name} 不存在"
                }
            # default 集合不能删除
            if collection_name == 'default':
                logger.warning(f"Milvus集合 {collection_name} 不能删除")
                return {
                    "success": False,
                    "message": f"Milvus集合 {collection_name} 不能删除"
                }

            # 删除集合
            utility.drop_collection(collection_name)
            logger.info(f"成功删除Milvus集合: {collection_name}")
            
            return {
                "success": True,
                "message": f"Milvus集合 {collection_name} 已成功删除"
            }
        except Exception as e:
            logger.error(f"删除Milvus集合 {collection_name} 失败: {str(e)}")
            return {
                "success": False,
                "message": f"删除Milvus集合 {collection_name} 失败: {str(e)}"
            }

    def _get_embedding_dimension(self) -> int:
        """获取嵌入向量维度"""
        try:
            # 使用有意义的文本测试获取维度，而不是空字符串
            test_text = "这是一个测试文本，用于获取嵌入向量维度"
            if self.backend_name == 'FlagModel':
                try:
                    # 尝试获取嵌入向量
                    test_embedding = self.embedding_model.encode([test_text])[0]
                    dimension = len(test_embedding)
                    logger.info(f"检测到的嵌入向量维度: {dimension}")
                    return dimension
                except Exception as flag_error:
                    # 处理FlagModel特定的错误
                    logger.warning(f"FlagModel获取维度失败: {str(flag_error)}")
                    # 检查是否是meta tensor错误
                    if "meta tensor" in str(flag_error).lower():
                        logger.info("检测到meta tensor错误，尝试使用替代方法")
                        # 返回默认维度
                        return 768
                    else:
                        raise
            else:
                try:
                    # 尝试获取嵌入向量
                    test_embedding = self.embedding_model.embed_query(test_text)
                    dimension = len(test_embedding)
                    logger.info(f"检测到的嵌入向量维度: {dimension}")
                    return dimension
                except Exception as hf_error:
                    # 处理HuggingFace模型的错误
                    logger.warning(f"HuggingFace获取维度失败: {str(hf_error)}")
                    # 检查是否是meta tensor错误
                    if "meta tensor" in str(hf_error).lower():
                        logger.info("检测到meta tensor错误，尝试使用替代方法")
                        # 返回默认维度
                        return 768
                    else:
                        raise
        except Exception as e:
            logger.error(f"获取嵌入向量维度失败: {str(e)}")
            # 对于大多数常见嵌入模型，返回默认的768维
            return 768

    def embed_query(self, query: str) -> List[float]:
        """生成查询文本的嵌入向量"""
        if self.backend_name == 'FlagModel':
            # 使用encode方法，避免BertTokenizerFast性能提示
            return self.embedding_model.encode([query])[0].tolist()
        else:
            return self.embedding_model.embed_query(query)
            
    # 实现IEmbeddingService接口方法
    def get_embedding(self, text: str) -> List[float]:
        """获取文本的嵌入向量"""
        try:
            # 使用已有的embed_query方法实现
            return self.embed_query(text)
        except Exception as e:
            logger.error(f"获取嵌入向量失败: {str(e)}")
            return []

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """生成多个文档文本的嵌入向量"""
        if self.backend_name == 'FlagModel':
            # 使用encode方法，避免BertTokenizerFast性能提示
            return self.embedding_model.encode(texts).tolist()
        else:
            return self.embedding_model.embed_documents(texts)

    def _check_duplicate_chunks(self, chunks: List[Document], collection_name: str) -> List[Document]:
        """
        检查并移除重复的文档块
        :param chunks: 文档块列表
        :param collection_name: 要检查的集合名称，如果为None则使用默认集合
        :return: 去重后的文档块列表
        """
        if not chunks:
            return []
    
        # 如果未指定集合名称，使用默认集合
        target_collection_name = collection_name 
    
        try:
            # 如果集合不存在，直接返回所有文档块
            if not utility.has_collection(target_collection_name):
                logger.info(f"集合 {target_collection_name} 不存在，跳过重复检查")
                return chunks
    
            # 获取集合并加载
            collection = Collection(name=target_collection_name, using="default")
            collection.load()
    
            # 提取文档内容和生成嵌入
            unique_chunks = []
            for chunk in chunks:
                # 生成嵌入向量，使用类自身的embed_query方法以适配不同后端
                embedding = self.embed_query(chunk.page_content)
    
                # 执行查询
                search_params = {
                    "metric_type": "L2",
                    "params": {"nprobe": 10}
                }
    
                results = collection.search(
                    data=[embedding],
                    anns_field="embedding",
                    param=search_params,
                    limit=1,
                    output_fields=["text"]
                )
    
                # 检查结果，设置阈值0.1来判断是否为重复文档
                if not results or not results[0] or results[0][0].distance > 0.1:
                    unique_chunks.append(chunk)
                    logger.debug(f"添加新文档块")
                else:
                    logger.debug(f"跳过重复文档块")
    
            collection.release()
            logger.info(f"文档去重: 去重后文档块数量: {len(unique_chunks)}/{len(chunks)}")
            return unique_chunks
        except Exception as e:
            logger.error(f"检查重复文档块失败: {str(e)}")
            # 出错时返回原始块
            return chunks

    """嵌入服务类，提供文档嵌入和查询功能"""
    @staticmethod
    def load_document(file_path: str) -> List[Document]:
        """
        加载不同格式的文档
        :param file_path: 文件路径
        :return: 文档列表
        """
        file_extension = os.path.splitext(file_path)[1].lower()
        logger.info(f"加载文档: {file_path} 格式: {file_extension}")
        try:
            if file_extension == '.txt':
                loader = TextLoader(file_path, encoding='utf-8')
            elif file_extension == '.pdf':
                loader = PyPDFLoader(file_path)
            elif file_extension == '.docx':
                loader = Docx2txtLoader(file_path)
            elif file_extension == '.md':
                loader = UnstructuredMarkdownLoader(file_path)
            elif file_extension in ['.xlsx', '.xls']:
                documents = []
                # 读取Excel文件的所有工作表
                xl = pd.ExcelFile(file_path)
                for sheet_name in xl.sheet_names:
                    # 读取工作表数据
                    df = xl.parse(sheet_name)
                    # 将数据转换为字符串
                    text = df.to_string(index=False)
                    # 创建Document对象
                    documents.append(Document(
                        page_content=text,
                        metadata={
                            'source': file_path,
                            'sheet_name': sheet_name
                        }
                    ))
                return documents
            else:
                logger.info(f"不支持的文件格式: {file_extension}")
                raise ValueError(f"不支持的文件格式: {file_extension}")
            return loader.load()
        except Exception as e:
            logger.error(f"加载文档失败: {str(e)}")
            return []

    def split_document(self, documents: List[Document]) -> List[Document]:
        """
        分割文档
        :param documents: 文档列表
        :return: 分割后的文档列表
        """
        return self.text_splitter.split_documents(documents)

    def embedding_file(self, file_path: str, metadata: dict = None) -> Dict[str, Any]:
        """
        嵌入文档
        :param file_path: 文档路径
        :param metadata: 文档元数据，包含category等信息
        :return: 结果字典
        """
        if not os.path.exists(file_path):
            return {"success": False, "message": f"文件不存在: {file_path}"}

        try:
            logger.info(f"开始装载并解析文件: {file_path}")
            # 加载文档
            documents = self.load_document(file_path)
            if not documents:
                return {"success": False, "message": "加载文档失败. 请检查文件是否时支持的文件格式(.txt, .pdf, .docx, .md, .xlsx, .xls)."}

            # 分割文档
            split_docs = self.split_document(documents)
            if not split_docs:
                return {"success": False, "message": "分割文档失败"}

            # 打印切分的chunk数量
            logger.info(f"文档切分: 文档 {file_path} 已切分为 {len(split_docs)} 个文档块")

            # 为每个文档块添加源文件信息和元数据
            for doc in split_docs:
                doc.metadata["source"] = file_path
                if metadata:
                    doc.metadata.update(metadata)

            # 确定要使用的集合
            collection_name = None
            if metadata and "category" in metadata:
                category = metadata["category"]
                # 验证分类是否存在
                if category in self.collections:
                    collection_name = category
                else:
                    logger.warning(f"指定的分类 '{category}' 不存在，将使用默认集合")
                    return {"success": False, "message": f"指定的分类 '{category}' 不存在"}
            else:
                return {"success": False, "message": "未指定分类"}

            # 获取要使用的集合
            target_collection = self.collections.get(collection_name)
            logger.info(f"将使用集合 '{target_collection.name}' 进行文档嵌入")

            # 检查并移除重复的文档块
            unique_chunks = self._check_duplicate_chunks(split_docs, collection_name=collection_name)
            unique_count = len(unique_chunks)

            if unique_count == 0:
                return {
                    "success": True,
                    "message": "所有文档块均已存在，无需重复嵌入",
                    "document_path": file_path,
                    "chunks_count": 0,
                    "duplicate_count": len(split_docs)
                }

            # 生成嵌入向量
            logger.info("生成嵌入向量...")
            texts = [chunk.page_content for chunk in unique_chunks]
            metadatas = [chunk.metadata for chunk in unique_chunks]
            # 使用我们添加的embed_documents方法，它会根据backend选择正确的实现
            embeddings = self.embed_documents(texts)

            # 准备插入数据
            data = [
                embeddings,
                texts,
                metadatas
            ]

            # 插入数据到指定集合
            logger.info(f"插入 {len(unique_chunks)} 个文档块到集合 '{target_collection.name}'...")
            target_collection.insert(data)
            logger.info("文档块插入成功")

            # 加载集合以确保数据可搜索
            target_collection.load()
            logger.info(f"集合 '{target_collection.name}' 已加载")
            target_collection.release()

            return {
                "success": True,
                "message": "文档嵌入成功",
                "document_path": file_path,
                "chunks_count": unique_count,
                "total_chunks": len(split_docs),
                "duplicate_count": len(split_docs) - unique_count,
                "collection": target_collection.name
            }
        except Exception as e:
            logger.error(f"嵌入文档失败: {str(e)}")
            return {"success": False, "message": f"嵌入文档失败: {str(e)}"}
                
    def add_document(self, content: str, metadata: Dict = None, category: str = "default") -> bool:
        """添加单个文档到向量库"""
        try:
            if metadata is None:
                metadata = {}
            metadata["category"] = category
            
            # 使用embedding_text方法实现
            result = self.embedding_text(content, metadata)
            return result.get("success", False)
        except Exception as e:
            logger.error(f"添加单个文档失败: {str(e)}")
            return False


    def query(self, category: str, query: str, top_k: int = 5, rerank: bool = False) -> Dict[str, Any]:
        """
        查询相似文档
        :param category: 分类，指定要查询的集合
        :param query: 查询文本
        :param top_k: 返回的最大结果数
        :param rerank: 是否进行重排序
        :return: 结果字典
        """
        if not category or not category.strip():
            return {"success": False, "message": "分类名称不能为空"}
            
        if not query or not query.strip():
            return {"success": False, "message": "查询文本不能为空"}

        try:
            # 验证分类是否存在
            if category not in self.collections:
                # 如果集合不存在，检查Milvus中是否存在该集合
                if not utility.has_collection(category):
                    return {"success": False, "message": f"分类 '{category}' 不存在"}
                # 加载集合
                self.collections[category] = Collection(name=category, using="default")
            
            # 获取目标集合
            target_collection = self.collections[category]
            logger.info(f"Milvus将使用集合 {category} 进行查询")

            # 生成查询向量
            query_embedding = self.embed_query(query)
            logger.info(f"成功嵌入查询文本: {query}")

            # 设置搜索参数
            search_params = {
                "metric_type": "L2",
                "params": {"nprobe": 15}
            }

            # 执行搜索
            target_collection.load()
            results = target_collection.search(
                data=[query_embedding],
                anns_field="embedding",
                param=search_params,
                limit=2 * top_k,  # 修改为检索 多倍top_k的结果
                output_fields=["text", "metadata"]
            )
            logger.info(f"查询结果: 查询到{len(results[0])}个文档块")
            target_collection.release()

            # 处理搜索结果
            retrieved_docs = []
            for hit in results[0]:
                doc = Document(
                    page_content=hit.entity.get("text"),
                    metadata=hit.entity.get("metadata", {})
                )
                retrieved_docs.append((doc, hit.distance))

            # 如果需要重排序
            if rerank and hasattr(self, 'rerank_model') and self.rerank_model and retrieved_docs:
                logger.info("使用重排序模型优化结果...")
                # 准备重排序输入
                doc_texts = [doc.page_content for doc, _ in retrieved_docs]
                pairs = [[query, text] for text in doc_texts]

                # 根据后端类型选择合适的重排序方法
                if self.backend_name == 'FlagModel':
                    # 对于FlagModel，使用compute_score方法计算相似度
                    pairs = [[query, text] for text in doc_texts]
                    scores = self.rerank_model.compute_score(pairs)
                else:
                    # 对于HuggingFace，使用predict方法
                    scores = self.rerank_model.predict(pairs)

                # 按分数排序
                reranked_with_scores = sorted(zip(scores, retrieved_docs), key=lambda x: x[0], reverse=True)
                retrieved_docs = [(doc, score) for _, (doc, score) in reranked_with_scores[:top_k]]

                # 格式化结果
                formatted_results = []
                for doc, score in retrieved_docs:
                    formatted_results.append({
                        "text": doc.page_content,
                        "score": score,
                        "metadata": doc.metadata
                    })

                logger.info(f"重排序成功.  记录数量: {len(formatted_results)} 条 ")

                return {
                "success": True,
                "message": "查询成功",
                "query": query,
                "category": category,
                "results": formatted_results
            }
            else:
                logger.info("不 使用重排序模型优化结果...")
                # 格式化结果
                formatted_results = []
                for doc, score in retrieved_docs:
                    formatted_results.append({
                        "text": doc.page_content,
                        "score": score,
                        "metadata": doc.metadata
                    })

                logger.info(f"查询成功: '{query}', 在集合 {category} 中返回 {len(formatted_results)} 个结果")

                return {
                    "success": True,
                    "message": "查询成功",
                    "query": query,
                    "category": category,
                    "results": formatted_results
                }
        except Exception as e:
            logger.error(f"查询失败: {str(e)}")
            return {"success": False, "message": f"查询失败: {str(e)}"}

    def search(self, query: str, top_k: int = 5, category: str = None) -> List[Dict]:
        """搜索相似文档"""
        try:
            if category is None:
                category = "default"
            # 使用已有的query方法实现
            results = self.query(category, query, top_k=top_k)
            if results.get("success", False):
                return results.get("results", [])
            else:
                logger.error(f"搜索失败: {results.get('message', '')}")
                return []
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            return []

    def delete_document(self, document_id: int, category: str = "default") -> bool:
        """从向量库中删除文档"""
        try:
            collection_name = category
            if not utility.has_collection(collection_name):
                logger.error(f"分类 {category} 对应的集合不存在")
                return False
            
            collection = Collection(collection_name)
            collection.load()
            
            # 删除文档
            result = collection.delete(expr=f"id == {document_id}")
            collection.flush()
            collection.release()
            
            if result.delete_count > 0:
                logger.info(f"成功删除文档ID: {document_id} 从集合 {collection_name}")
                return True
            else:
                logger.warning(f"未找到文档ID: {document_id} 或删除失败")
                return False
        except Exception as e:
            logger.error(f"删除文档失败: {str(e)}")
            return False

    def embedding_text(self, text: str, metadata: dict = None) -> Dict[str, Any]:
        """
        嵌入文本
        :param text: 要嵌入的文本
        :param metadata: 文本元数据
        :return: 结果字典
        """
        if not text or not text.strip():
            return {"success": False, "message": "文本不能为空"}

        try:
            # 创建文档
            doc = Document(
                page_content=text,
                metadata=metadata or {}
            )

            # 从metadata中获取category
            if metadata and "category" in metadata:
                target_collection_name = metadata["category"]
                logger.info(f"Milvus将使用集合: {target_collection_name}")
            else:
                raise ValueError("metadata 中必须包含 category 字段")

            # 检查重复，传入目标集合名称
            unique_chunks = self._check_duplicate_chunks([doc], collection_name=target_collection_name)
            unique_count = len(unique_chunks)

            if unique_count == 0:
                return {
                    "success": True,
                    "message": "文本已存在，无需重复嵌入",
                    "embedding": None,
                    "chunks_count": 0,
                    "duplicate_count": 1,
                    "collection": target_collection_name
                }

            # 生成嵌入向量
            logger.info("生成文本嵌入向量...")
            texts = [chunk.page_content for chunk in unique_chunks]
            metadatas = [chunk.metadata for chunk in unique_chunks]
            embeddings = self.embed_documents(texts)

            # 准备插入数据
            data = [
                embeddings,
                texts,
                metadatas
            ]

            # 获取目标集合并插入数据
            collection = Collection(name=target_collection_name, using="default")
            logger.info(f"Milvus插入 {len(unique_chunks)} 个文本到集合 {target_collection_name}...")
            collection.insert(data)
            logger.info(f"Milvus文本插入集合 {target_collection_name} 成功")

            # 加载集合
            collection.load()
            logger.info(f"Milvus集合 {target_collection_name} 已加载")
            collection.release()

            return {
                "success": True,
                "message": "文本嵌入成功",
                "embedding": embeddings[0] if embeddings else None,
                "chunks_count": unique_count,
                "total_chunks": 1,
                "duplicate_count": 1 - unique_count,
                "collection": target_collection_name
            }
        except Exception as e:
            logger.error(f"Milvus嵌入文本失败: {str(e)}")
            return {"success": False, "message": f"Milvus嵌入文本失败: {str(e)}"}
                
    def update_document(self, document_id: int, content: str, metadata: Dict = None, category: str = "default") -> bool:
        """更新向量库中的文档"""
        try:
            # 由于Milvus不支持直接更新，我们先删除再添加
            # 删除旧文档
            deleted = self.delete_document(document_id, category)
            
            # 添加新文档
            if deleted:
                return self.add_document(content, metadata, category)
            else:
                logger.error(f"更新文档失败: 无法删除旧文档ID {document_id}")
                return False
        except Exception as e:
            logger.error(f"更新文档失败: {str(e)}")
            return False

    def embedding_image(self, image_path: str, metadata: dict = None) -> Dict[str, Any]:
        """
        嵌入图片及其描述文本
        :param image_path: 图片文件路径
        :param metadata: 图片元数据，必须包含 category 和 description 字段
        :return: 结果字典
        """
        if not image_path.startswith(('http://', 'https://')): # 本机图片
            if not os.path.exists(image_path):
                return {"success": False, "message": f"图片文件不存在: {image_path}"}

        if not metadata or not isinstance(metadata, dict):
            return {"success": False, "message": "metadata必须是一个字典对象"}

        # 从metadata中获取description
        description = metadata.get("description", "")
        if not description or not description.strip():
            return {"success": False, "message": "metadata中必须包含非空的description字段"}

        # 从metadata中获取category
        if "category" in metadata:
            target_collection_name = metadata["category"]
            logger.info(f"Milvus将使用集合: {target_collection_name}")
        else:
            raise ValueError("metadata 中必须包含 category 字段")

        try:
            if not image_path.startswith(('http://', 'https://')):   # 本机图片      
                # 创建图片存储目录
                # images_dir = self.image_dir
                # 组合形成  saved_images/default/  或  saved_images/personal/  这类路径
                images_dir = os.path.join(self.image_dir, target_collection_name)
                os.makedirs(images_dir, exist_ok=True)
                # 生成唯一文件名
                file_name = os.path.basename(image_path).lower()     # aaa.jpg
                name, ext = os.path.splitext(file_name)              # ('aaa', '.jpg')
                file_hash = hashlib.md5(open(image_path, 'rb').read()).hexdigest()
                unique_filename = f"{name}_{file_hash}{ext}"
                destination_path = os.path.join(images_dir, unique_filename)
                # 从web 访问图片时, 是通过 访问点 saved_images, 即 http://xxxx:xx/saved_images/default/xxxxx.pngs
                image_url = f"/saved_images/{target_collection_name}/{unique_filename}"
                # 复制图片文件
                import shutil
                shutil.copy2(image_path, destination_path)
                logger.info(f"图片已保存到: {destination_path}")
                # 创建文档
                doc = Document(
                    page_content=description,
                    metadata={
                        "source": destination_path,
                        "image_url": image_url,
                        "name": unique_filename,
                        "type": "image",
                        "original_name": os.path.basename(image_path),
                        'collection': target_collection_name,
                        **metadata  # 合并所有传入的metadata
                    }
                )                
            else:   # URL图片
                destination_path = ''
                unique_filename = ''
                # 创建文档
                doc = Document(
                    page_content=description,
                    metadata={
                        "source": destination_path,
                        "image_url": image_path,
                        "name": unique_filename,
                        "type": "image",
                        "original_name": '',
                        'collection': target_collection_name,
                        **metadata  # 合并所有传入的metadata
                    }
                )
               
            # 检查重复，传入目标集合名称
            unique_chunks = self._check_duplicate_chunks([doc], collection_name=target_collection_name)
            unique_count = len(unique_chunks)

            if unique_count == 0:
                return {
                    "success": True,
                    "message": "图片描述已存在，无需重复嵌入",
                    "image_path": destination_path,
                    "image_url": image_url,
                    "chunks_count": 0,
                    "duplicate_count": 1,
                    "collection": target_collection_name
                }

            # 生成嵌入向量
            logger.info("生成图片描述嵌入向量...")
            texts = [chunk.page_content for chunk in unique_chunks]
            metadatas = [chunk.metadata for chunk in unique_chunks]
            # 使用我们添加的embed_documents方法，它会根据backend选择正确的实现
            embeddings = self.embed_documents(texts)

            # 准备插入数据
            data = [
                embeddings,
                texts,
                metadatas
            ]

            # 获取目标集合并插入数据
            collection = Collection(name=target_collection_name, using="default")
            logger.info(f"Milvus插入 {len(unique_chunks)} 个图片描述到集合 {target_collection_name}...")
            collection.insert(data)
            logger.info(f"Milvus图片描述插入集合 {target_collection_name} 成功")

            # 加载集合
            collection.load()
            collection.release()

            return {
                "success": True,
                "message": "图片及其描述嵌入成功",
                "image_path": destination_path,
                "image_url": image_url,
                "description": description,
                "chunks_count": unique_count,
                "total_chunks": 1,
                "duplicate_count": 1 - unique_count,
                "collection": target_collection_name
            }
        except Exception as e:
            logger.error(f"Milvus嵌入图片失败: {str(e)}")
            return {"success": False, "message": f"Milvus嵌入图片失败: {str(e)}"}
                
    def clear_collection(self, category: str = "default") -> bool:
        """清空指定分类的集合"""
        try:
            if category == 'default':
                logger.warning(f"Milvus集合 {category} 不能删除")
                return False
            
            return self.remove_collection(category).get("success", False)
        except Exception as e:
            logger.error(f"清空集合失败: {str(e)}")
            return False

    def embedding_weburl(self, web_url:str, metadata: dict = None) -> Dict[str, Any]:
        '''针对网页url 进行解析, 针对解析后的文本内容进行嵌入'''
        if not web_url or not web_url.strip():
            return {"success": False, "message": "web url不能为空"}        
    
        try:
            result = self.html_parser.parse(html_path_or_url=web_url, target=["text"])
            if result['success']:
                text_content = result['text_content']
                images = result['images']

                logger.info(f"解析html内容成功. 文本长度: {len(text_content)}, 图片个数: {len(images)}")
                
                # 检查文本是否为空
                if not text_content or not text_content.strip():
                    logger.error(f"Milvus嵌入web url 失败: 解析后html 内容为空")
                    return {"success": False, "message": "Milvus嵌入web url 失败: 解析后html 内容为空"}
                
                # 检查文本长度，如果超过Milvus字段限制（65535字符），进行分割
                if len(text_content) > 65535:
                    logger.info(f"文本长度超过Milvus字段限制，将进行分割处理")
                    # 使用text_splitter分割长文本
                    chunks = self.text_splitter.split_text(text_content)
                    
                    # 为每个文本块创建文档
                    docs = [Document(page_content=chunk, metadata=metadata or {}) for chunk in chunks]
                    
                    # 从metadata中获取category
                    if metadata and "category" in metadata:
                        target_collection_name = metadata["category"]
                        logger.info(f"Milvus将使用集合: {target_collection_name}")
                    else:
                        raise ValueError("metadata 中必须包含 category 字段")
                    
                    # 检查重复
                    unique_chunks = self._check_duplicate_chunks(docs, collection_name=target_collection_name)
                    unique_count = len(unique_chunks)
                    
                    if unique_count == 0:
                        return {
                            "success": True,
                            "message": "所有文本块均已存在，无需重复嵌入",
                            "embedding": None,
                            "chunks_count": 0,
                            "total_chunks": len(chunks),
                            "duplicate_count": len(chunks),
                            "collection": target_collection_name
                        }
                    
                    # 生成嵌入向量
                    logger.info("生成文本块嵌入向量...")
                    texts = [chunk.page_content for chunk in unique_chunks]
                    metadatas = [chunk.metadata for chunk in unique_chunks]
                    embeddings = self.embed_documents(texts)
                    
                    # 准备插入数据
                    data = [
                        embeddings,
                        texts,
                        metadatas
                    ]
                    
                    # 获取目标集合并插入数据
                    collection = Collection(name=target_collection_name, using="default")
                    logger.info(f"Milvus插入 {len(unique_chunks)} 个文本块到集合 {target_collection_name}...")
                    collection.insert(data)
                    logger.info(f"Milvus文本块插入集合 {target_collection_name} 成功")
                    
                    # 加载集合
                    collection.load()
                    logger.info(f"Milvus集合 {target_collection_name} 已加载")
                    collection.release()
                    
                    return {
                        "success": True,
                        "message": f"文本分割后嵌入成功",
                        "embedding": embeddings[0] if embeddings else None,
                        "chunks_count": unique_count,
                        "total_chunks": len(chunks),
                        "duplicate_count": len(chunks) - unique_count,
                        "collection": target_collection_name
                    }
                else:
                    # 直接调用纯文本的嵌入函数进行嵌入
                    return self.embedding_text(text_content, metadata)
            else:
                logger.error(f"Milvus嵌入web url 失败: {result['message']}")
                return {"success": False, "message": f"Milvus嵌入web url 失败: {result['message']}"}
        except Exception as e:
            logger.error(f"Milvus嵌入web url 失败: {str(e)}")
            # 提供更友好的错误信息
            error_message = str(e)
            if "521" in error_message:
                error_message = f"服务器错误 (521): 无法访问该网站，可能需要验证码或该网站限制了爬虫访问"
            elif "Connection refused" in error_message or "timed out" in error_message:
                error_message = f"网络错误: 无法连接到该网站，请检查URL是否正确或网络连接是否正常"
            return {"success": False, "message": f"Milvus嵌入web url 失败: {error_message}"}
    
    def embedding_weburl_batch(self, urls: List[str], metadata: dict = None) -> Dict[str, Any]:
        '''批量处理网页url的嵌入'''
        if not urls or len(urls) == 0:
            return {"success": False, "message": "URL列表不能为空"}
        
        results = []
        success_count = 0
        failed_count = 0
        
        try:
            logger.info(f"开始批量处理 {len(urls)} 个网页URL")
            
            for i, url in enumerate(urls, 1):
                logger.info(f"处理URL {i}/{len(urls)}: {url}")
                
                # 调用单个URL处理方法
                result = self.embedding_weburl(url, metadata)
                
                # 添加URL信息到结果中
                result_with_url = {
                    'url': url,
                    'success': result['success'],
                    'message': result.get('message', ''),
                    'embedding': result.get('embedding', None)
                }
                
                results.append(result_with_url)
                
                if result['success']:
                    success_count += 1
                else:
                    failed_count += 1
                    logger.warning(f"URL处理失败 {i}/{len(urls)}: {url}, 原因: {result['message']}")
            
            logger.info(f"批量URL处理完成. 成功: {success_count}, 失败: {failed_count}")
            
            return {
                "success": True,
                "message": f"批量处理完成",
                "results": results,
                "success_count": success_count,
                "failed_count": failed_count
            }
        except Exception as e:
            logger.error(f"批量处理URL失败: {str(e)}")
            return {
                "success": False,
                "message": f"批量处理URL失败: {str(e)}",
                "results": results,
                "success_count": success_count,
                "failed_count": failed_count + (len(urls) - len(results))
            }

    def close(self):
        """关闭Milvus连接"""
        try:
            connections.disconnect(alias="default")
            logger.info("已关闭Milvus连接")
        except Exception as e:
            logger.error(f"关闭Milvus连接失败: {str(e)}")
    
    def get_embedding(self, text: str) -> List[float]:
        """获取文本的嵌入向量"""
        try:
            return self.embed_query(text)
        except Exception as e:
            logger.error(f"获取嵌入向量失败: {str(e)}")
            raise
    
    def search(self, query: str, category: Optional[str] = None, top_k: int = 5) -> List[Dict[str, Any]]:
        """在向量数据库中搜索相似向量"""
        try:
            # 如果没有指定分类，默认为'default'
            if category is None:
                category = "default"
            
            # 检查分类是否存在
            if not self.category_manager.is_category_exist(category):
                logger.warning(f"搜索的分类 '{category}' 不存在")
                return []
            
            # 使用现有的query方法进行搜索
            result = self.query(category, query, top_k=top_k, rerank=False)
            
            # 格式化结果以匹配接口要求
            if result.get("success", False):
                return [{
                    "text": item.get("text", ""),
                    "score": item.get("score", 0),
                    "metadata": item.get("metadata", {})
                } for item in result.get("similar_docs", [])]
            return []
        except Exception as e:
            logger.error(f"搜索失败: {str(e)}")
            return []
    
    def add_document(self, text: str, metadata: Optional[Dict[str, Any]] = None, category: Optional[str] = None) -> bool:
        """添加文档到向量数据库"""
        try:
            # 如果没有指定分类，默认为'default'
            if category is None:
                category = "default"
            
            # 确保分类存在
            if not self.category_manager.is_category_exist(category):
                self.category_manager.add_category(category)
            
            # 准备元数据
            if metadata is None:
                metadata = {}
            metadata["category"] = category
            
            # 使用现有的embedding_text方法添加文档
            result = self.embedding_text(text, metadata)
            
            return result.get("success", False)
        except Exception as e:
            logger.error(f"添加文档失败: {str(e)}")
            return False
    
    def delete_document(self, doc_id: str) -> bool:
        """从向量数据库中删除文档"""
        try:
            # 获取所有分类
            categories = self.list_categories()
            
            # 在所有分类中尝试删除文档
            for category in categories:
                if utility.has_collection(category):
                    collection = Collection(category)
                    
                    # 构建删除表达式
                    expr = f"id == '{doc_id}'"
                    
                    # 执行删除操作
                    delete_count = collection.delete(expr)
                    collection.flush()
                    collection.release()
                    
                    if delete_count > 0:
                        logger.info(f"成功从分类 '{category}' 中删除文档 ID: {doc_id}")
                        return True
            
            logger.warning(f"未找到文档 ID: {doc_id}")
            return False
        except Exception as e:
            logger.error(f"删除文档失败: {str(e)}")
            return False
    
    def list_categories(self) -> List[str]:
        """列出所有可用的分类"""
        try:
            # 使用category_manager获取所有分类
            return self.category_manager.get_all_categories()
        except Exception as e:
            logger.error(f"获取分类列表失败: {str(e)}")
            return []
    
    def add_category(self, category: str) -> bool:
        """添加新分类"""
        try:
            # 首先检查分类名是否有效
            if not self.category_manager.valid_name(category):
                logger.warning(f"分类名 '{category}' 无效")
                return False
            
            # 使用category_manager添加分类
            result = self.category_manager.add_category(category)
            
            # 如果添加成功，同时在Milvus中创建对应的集合
            if result:
                # 连接Milvus
                if not connections.has_connection(alias="default"):
                    self._connect_to_milvus()
                
                # 初始化集合
                self._init_collection(category)
                
            return result
        except Exception as e:
            logger.error(f"添加分类失败: {str(e)}")
            return False
    
    def delete_category(self, category: str) -> bool:
        """删除分类"""
        try:
            # 首先尝试从Milvus中删除集合
            if utility.has_collection(category):
                utility.drop_collection(category)
                logger.info(f"已从Milvus中删除集合 '{category}'")
            
            # 然后使用category_manager删除分类
            return self.category_manager.remove_category(category)
        except Exception as e:
            logger.error(f"删除分类失败: {str(e)}")
            return False
            
    def get_collection_stats(self, category: str = "default") -> Dict:
        """获取集合统计信息"""
        try:
            if not utility.has_collection(category):
                return {"count": 0, "status": "not_exists"}
            
            collection = Collection(category)
            count = collection.num_entities
            collection.release()
            
            return {
                "count": count,
                "status": "exists",
                "name": category
            }
        except Exception as e:
            logger.error(f"获取集合统计信息失败: {str(e)}")
            return {"count": 0, "status": "error", "error": str(e)}
            
    def batch_add_documents(self, documents: List[Dict], category: str = "default") -> Dict:
        """批量添加文档"""
        try:
            if not documents:
                return {"success": True, "added": 0, "skipped": 0, "message": "没有文档需要添加"}
            
            success_count = 0
            skipped_count = 0
            results = []
            
            for doc in documents:
                content = doc.get("content", "")
                metadata = doc.get("metadata", {})
                metadata["category"] = category
                
                result = self.embedding_text(content, metadata)
                
                if result.get("success", False):
                    success_count += 1
                else:
                    skipped_count += 1
                    results.append({"content": content, "error": result.get("message", "添加失败")})
            
            logger.info(f"批量添加文档完成: 新增 {success_count} 条，跳过 {skipped_count} 条记录")
            
            return {
                "success": True,
                "added": success_count,
                "skipped": skipped_count,
                "message": f"批量添加文档完成",
                "details": results
            }
        except Exception as e:
            logger.error(f"批量添加文档失败: {str(e)}")
            return {
                "success": False,
                "added": 0,
                "skipped": 0,
                "message": str(e)
            }

# 示例用法
if __name__ == "__main__":
    # 使用本地嵌入模型
    try:
        # 配置本地嵌入模型路径
        embedding_model_path = r"/Users/apple/.cache/huggingface/hub/models--shibing624--text2vec-base-chinese/snapshots/183bb99aa7af74355fb58d16edf8c13ae7c5433e"

        # 创建嵌入服务实例
        embedding_service = EmbeddingService(
            model_name="huggingface",
            model_path=embedding_model_path,
            collection_name="example_documents"
        )

        # 示例文档路径 - 使用test_docs中的文件
        example_doc_path = "./test_docs/drone.md"

        # 嵌入文档
        print(f"正在嵌入文档: {example_doc_path}")
        embedding_result = embedding_service.embedding(example_doc_path)
        print(f"嵌入结果: {embedding_result}")

        # 查询文档
        query = "圈圈机使用的电池容量一般是多少？"
        print(f"正在查询: {query}")
        query_result = embedding_service.query("default", query, top_k=3, rerank=bool(rerank_model))
        print(f"查询结果: {query_result}")

        # 关闭连接
        embedding_service.close()
    except ImportError as e:
        print(f"导入错误: {e}")
        print("请安装必要的依赖包，例如: pip install langchain pymilvus sentence-transformers")
    except Exception as e:
        print(f"发生错误: {e}")