#coding:utf-8

"""
    向量嵌入类
    暴露两个主要接口:
    1. embedding: 处理文本并保存嵌入
    2. query: 检索并返回重排序结果
"""
import logging
import warnings
import os
import time
import uuid

from typing import List, Union
from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType, utility
import numpy as np
from sentence_transformers import SentenceTransformer
from sentence_transformers import CrossEncoder



# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger('mcp.embedding')

# 抑制警告
warnings.filterwarnings('ignore')


class Embedding:
    def __init__(self,
                 embedding_model_name: str = "shibing624/text2vec-base-chinese",
                 embedding_model_path: str = None,
                 rerank_model_name: str = "cross-encoder/mmarco-mMiniLMv2-L12-H384-v1",
                 rerank_model_path: str = None,
                 collection_name: str = "rag_collection"):
        # 加载嵌入模型
        if embedding_model_path and os.path.exists(embedding_model_path):
            logger.info(f"从本地路径加载嵌入模型: {embedding_model_path}")
            self.embedding_model = SentenceTransformer(embedding_model_path)
            logger.info("嵌入模型加载完成")
        else:
            logger.info(f"从Hugging Face加载嵌入模型: {embedding_model_name}")
            self.embedding_model = SentenceTransformer(embedding_model_name)
            logger.info("嵌入模型加载完成")

        # 加载重排序模型
        if rerank_model_path and os.path.exists(rerank_model_path):
            logger.info(f"从本地路径加载重排序模型: {rerank_model_path}")
            self.rerank_model = CrossEncoder(rerank_model_path)
            logger.info("重排序模型加载完成")
        else:
            logger.info(f"从Hugging Face加载重排序模型: {rerank_model_name}")
            self.rerank_model = CrossEncoder(rerank_model_name)
            logger.info("重排序模型加载完成")

        # 连接到 Milvus 服务
        logger.info("连接到 Milvus 服务")
        # 从环境变量获取 Milvus 连接参数
        milvus_uri = os.environ.get('MILVUS_URI', 'localhost:19530')
        host, port = milvus_uri.split(':')
        connections.connect(alias="default", host=host, port=port)
        logger.info(f"成功连接到 Milvus 服务: {milvus_uri}")

        # 定义集合结构
        self.collection_name = collection_name
        fields = [
            FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, max_length=64),
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=768),
            FieldSchema(name="document", dtype=DataType.VARCHAR, max_length=5120),
            FieldSchema(name="is_image_caption", dtype=DataType.BOOL, default=False),
            FieldSchema(name="image_path", dtype=DataType.VARCHAR, max_length=1024, default=""),
            FieldSchema(name="image_size", dtype=DataType.INT64, default=0)
        ]
        schema = CollectionSchema(fields=fields, description="用于存储文档和嵌入向量的集合")

        # 如果集合存在，则删除它（为了添加新字段）
        # if utility.has_collection(self.collection_name):
        #     logger.info(f"删除现有集合: {self.collection_name}")
        #     utility.drop_collection(self.collection_name)
        #     logger.info(f"集合 {self.collection_name} 已删除")
        #     time.sleep(1)  # 等待集合删除完成

        # 无论集合是否存在，都获取或创建集合
        if utility.has_collection(self.collection_name):
            logger.info(f"加载现有集合: {self.collection_name}")
            self.collection = Collection(name=self.collection_name)
            logger.info(f"成功加载现有集合: {self.collection_name}")
        else:
            # 创建新集合
            logger.info(f"创建新集合: {self.collection_name}")
            self.collection = Collection(name=self.collection_name, schema=schema)
            logger.info(f"成功创建新集合: {self.collection_name}")
            # 等待集合创建完成
            time.sleep(1)

        # 创建索引
        try:
            index_params = {
                "index_type": "IVF_FLAT",
                "metric_type": "L2",
                "params": {"nlist": 1024}
            }
            self.collection.create_index(
                field_name="embedding", 
                index_params=index_params
            )
            logger.info("成功创建索引")

            # 等待索引创建完成
            time.sleep(1)
        except Exception as e:
            logger.error(f"创建索引时出错: {str(e)}")

        # 加载集合
        self.collection.load()
        logger.info("集合加载完成")

    def _split_text(self, text: str) -> List[str]:
        """
        文本切分
        """
        logger.info(f"开始切分文本，文本长度: {len(text)}字符")
        chunks = [chunk.strip() for chunk in text.split('\n\n') if chunk.strip()]
        logger.info(f"文本切分完成，得到 {len(chunks)} 个文本块")
        return chunks

    def _split_file(self, doc_file: str) -> List[str]:
        """
        文件切分
        """
        logger.info(f"开始读取文件: {doc_file}")
        # 验证文件是否存在
        if not os.path.exists(doc_file):
            error_msg = f"文件不存在: {doc_file}"
            logger.error(error_msg)
            raise FileNotFoundError(error_msg)
        
        # 验证文件是否可读
        if not os.access(doc_file, os.R_OK):
            error_msg = f"没有权限读取文件: {doc_file}"
            logger.error(error_msg)
            raise PermissionError(error_msg)
        
        try:
            with open(doc_file, 'r', encoding='utf-8') as f:
                text = f.read()
            logger.info(f"文件读取成功，文件长度: {len(text)}字符")
            
            # 检查文件内容是否为空
            if not text.strip():
                logger.warning(f"文件内容为空: {doc_file}")
                return []
            
            chunks = [chunk.strip() for chunk in text.split('\n\n') if chunk.strip()]
            logger.info(f"文件切分完成，得到 {len(chunks)} 个文本块")
            return chunks
        except UnicodeDecodeError:
            error_msg = f"文件编码错误，无法读取: {doc_file}"
            logger.error(error_msg)
            raise UnicodeDecodeError(error_msg)
        except Exception as e:
            error_msg = f"读取文件失败: {str(e)}"
            logger.error(error_msg)
            raise IOError(error_msg)

    def _embed_chunk(self, chunk: str) -> List[float]:
        """
        嵌入
        """
        logger.info(f"开始嵌入文本块，长度: {len(chunk)}字符")
        embedding = self.embedding_model.encode(chunk, normalize_embeddings=True)
        logger.info("文本块嵌入完成")
        return embedding.tolist()

    def _save_embeddings(self, chunks: List[str], embeddings: List[List[float]], 
                        is_image_captions: Union[List[bool], None] = None, 
                        image_paths: Union[List[str], None] = None, 
                        image_sizes: Union[List[int], None] = None) -> None:
        """
        保存嵌入到 Milvus
        """
        logger.info(f"开始保存 {len(chunks)} 个文本块的嵌入")

        # 准备数据
        ids = []
        embeddings_list = []
        documents = []
        is_image_captions_list = []
        image_paths_list = []
        image_sizes_list = []

        for i, (chunk, embedding) in enumerate(zip(chunks, embeddings)):
            # 使用UUID确保唯一ID
            doc_id = uuid.uuid4().hex
            ids.append(doc_id)
            embeddings_list.append(embedding)
            documents.append(chunk)
            
            # 处理图片相关字段
            if is_image_captions is not None:
                is_image_captions_list.append(is_image_captions[i])
            else:
                is_image_captions_list.append(False)
            
            if image_paths is not None:
                image_paths_list.append(image_paths[i])
            else:
                image_paths_list.append("")
            
            if image_sizes is not None:
                image_sizes_list.append(image_sizes[i])
            else:
                image_sizes_list.append(0)
            
            logger.info(f"添加文档，ID: {doc_id}, 长度: {len(chunk)}字符")

        # 批量插入
        if ids:
            # 确保ids是字符串列表且长度不超过64
            ids = [str(id)[:64] for id in ids]
            logger.info(f"ids类型: {type(ids)}, 元素类型: {type(ids[0])}, 长度: {len(ids)}, 第一个id: {ids[0]}")
            
            # 确保文档长度不超过5120
            documents = [doc[:5120] for doc in documents]
            
            # 确保图片路径长度不超过1024
            image_paths_list = [path[:1024] for path in image_paths_list]
            
            # 验证行列数匹配
            row_count = len(ids)
            
            # 验证数据一致性
            assert len(embeddings_list) == row_count, f"embedding行数({len(embeddings_list)})与id行数({row_count})不匹配"
            assert len(documents) == row_count, f"document行数({len(documents)})与id行数({row_count})不匹配"
            assert len(is_image_captions_list) == row_count, f"is_image_captions行数({len(is_image_captions_list)})与id行数({row_count})不匹配"
            assert len(image_paths_list) == row_count, f"image_paths行数({len(image_paths_list)})与id行数({row_count})不匹配"
            assert len(image_sizes_list) == row_count, f"image_sizes行数({len(image_sizes_list)})与id行数({row_count})不匹配"

            # 使用行-based格式插入
            try:
                logger.info("使用行-based格式插入...")
                row_data = [{'id': ids[i], 'embedding': embeddings_list[i], 'document': documents[i], 'is_image_caption': is_image_captions_list[i], 'image_path': image_paths_list[i], 'image_size': image_sizes_list[i]} for i in range(len(ids))]
                logger.info(f"行数据类型: {type(row_data)}, 元素类型: {type(row_data[0])}, id字段类型: {type(row_data[0]['id'])}")
                self.collection.insert(data=row_data)
                logger.info(f"成功使用行-based格式插入 {len(ids)} 个文档")
                # 刷新集合以确保数据可被搜索
                self.collection.flush()
                logger.info("集合已刷新")
            except Exception as e:
                logger.error(f"行-based插入数据时出错: {str(e)}")
                return

        # 旧的列-based插入代码已移除，直接使用行-based插入
        if ids:
            try:
                logger.info("使用行-based格式插入...")
                # 验证ids类型
                logger.info(f"行-based插入前，ids类型: {type(ids)}, 元素类型: {type(ids[0])}")
                row_data = [{'id': str(ids[i])[:64], 'embedding': embeddings_list[i], 'document': documents[i], 'is_image_caption': is_image_captions_list[i], 'image_path': image_paths_list[i], 'image_size': image_sizes_list[i]} for i in range(len(ids))]
                logger.info(f"行数据类型: {type(row_data)}, 元素类型: {type(row_data[0])}, id字段类型: {type(row_data[0]['id'])}")
                self.collection.insert(data=row_data)
                logger.info(f"成功使用行-based格式插入 {len(ids)} 个文档")
            except Exception as e:
                logger.error(f"插入数据时出错: {str(e)}")

    def retrieve(self, query: str, top_k: int = 5, rerank: bool = True) -> List[dict]:
        """
        从 Milvus 中检索与查询最相关的文本块
        """
        return self._retrieve(query, top_k, rerank)

    def _retrieve(self, query: str, top_k: int = 5, rerank: bool = True) -> List[dict]:
        """
        从 Milvus 中检索与查询最相关的文本块 (内部方法)
        """
        logger.info(f"开始检索，查询: '{query}', top_k: {top_k}, rerank: {rerank}")
        query_embedding = self._embed_chunk(query)

        # 搜索
        try:
            results = self.collection.search(
                data=[query_embedding],
                anns_field="embedding",
                param={"metric_type": "L2", "params": {"nprobe": 10}},
                limit=top_k,
                output_fields=["document", "image_path", "image_size", "is_image_caption"]
            )

            # 处理结果
            retrieved = []
            for result in results[0]:
                item = {
                    "document": result.entity.get("document"),
                    "image_path": result.entity.get("image_path"),
                    "image_size": result.entity.get("image_size"),
                    "is_image_caption": result.entity.get("is_image_caption")
                }
                retrieved.append(item)

            logger.info(f"检索完成，找到 {len(retrieved)} 个结果")

            # 根据rerank参数决定是否重排序
            if rerank and len(retrieved) > 1:
                logger.info("开始对结果进行重排序")
                retrieved = self._rerank(query, retrieved, top_k)
                logger.info("重排序完成")

            return retrieved
        except Exception as e:
            logger.error(f"检索时出错: {str(e)}")
            return []

    def retrieve_exact(self, document: str, top_k: int = 10) -> List[dict]:
        """
        从 Milvus 中精确匹配文档
        """
        logger.info(f"开始精确匹配文档，内容: '{document[:50]}...', top_k: {top_k}")

        # 精确查询
        try:
            # 对文档内容进行转义，避免特殊字符导致查询失败
            escaped_document = document.replace("'", "''")
            # 使用Milvus的query方法进行精确匹配
            expr = f"document == '{escaped_document}'"
            results = self.collection.query(
                expr=expr,
                output_fields=["document", "image_path", "image_size", "is_image_caption"],
                limit=top_k
            )

            logger.info(f"精确匹配完成，找到 {len(results)} 个结果")
            return results
        except Exception as e:
            logger.error(f"精确匹配时出错: {str(e)}")
            return []

    def _rerank(self, query: str, retrieved_chunks: List[dict], top_k: int = 5) -> List[dict]:
        """
        重排序
        """
        logger.info(f"开始重排序，查询: '{query}', 候选结果数: {len(retrieved_chunks)}, top_k: {top_k}")
        scores = self.rerank_model.predict([(query, chunk['document']) for chunk in retrieved_chunks])
        
        scored_chunks = list(zip(retrieved_chunks, scores))
        scored_chunks.sort(key=lambda x: x[1], reverse=True)

        reranked = [chunk for chunk, _ in scored_chunks[:top_k]]
        logger.info(f"重排序完成，返回 {len(reranked)} 个结果")
        return reranked

    def embedding(self, content: Union[str, None] = None, file_path: Union[str, None] = None, 
                        is_image_caption: bool = False, image_path: str = "", image_size: int = 0) -> None:
        """
        处理文本内容或文件，并保存嵌入
        
        Args:
            content: 文本内容
            file_path: 文档文件路径
            is_image_caption: 是否为图片说明
            image_path: 图片路径
        
        注意: 必须提供content或file_path中的一个
        """
        if content is None and file_path is None:
            logger.error("必须提供content或file_path中的一个")
            raise ValueError("必须提供content或file_path中的一个")
        
        if content is not None:
            logger.info("开始处理提供的文本内容")
            chunks = self._split_text(content)
            if not chunks:
                logger.warning("提供的文本内容为空或无法切分")
                return
        else:
            logger.info(f"开始处理文件: {file_path}")
            try:
                chunks = self._split_file(file_path)
                if not chunks:
                    logger.warning(f"文件内容为空或无法切分: {file_path}")
                    return
            except Exception as e:
                logger.error(f"处理文件时出错: {str(e)}")
                raise
        
        # 检查并移除重复文档
        logger.info(f"开始检查 {len(chunks)} 个文本块是否重复")
        existing_docs = set()
        try:
            # 尝试获取集合中的所有文档
            result = self.collection.query(expr="", output_fields=["document"], limit=10000)
            if result:
                existing_docs = set(item["document"] for item in result)
                logger.info(f"成功获取 {len(existing_docs)} 个现有文档")
        except Exception as e:
            # 如果获取失败，则继续执行
            logger.warning(f"获取现有文档失败: {str(e)}")
        
        # 过滤掉重复的文档
        unique_chunks = []
        for chunk in chunks:
            if chunk not in existing_docs:
                unique_chunks.append(chunk)
                existing_docs.add(chunk)
            else:
                logger.info(f"跳过重复文档，长度: {len(chunk)}字符")
        
        logger.info(f"去重后剩余 {len(unique_chunks)} 个文本块")
        if not unique_chunks:
            logger.warning("没有找到可处理的唯一文本块")
            return
        
        embeddings = [self._embed_chunk(chunk) for chunk in unique_chunks]
        logger.info(f"成功生成 {len(embeddings)} 个嵌入，预期{len(unique_chunks)}个")
        # assert len(embeddings) == len(unique_chunks), f"嵌入数量与文本块数量不匹配: {len(embeddings)} vs {len(unique_chunks)}"
        
        # 准备图片相关参数
        is_image_captions = [is_image_caption] * len(unique_chunks)
        image_paths = [image_path] * len(unique_chunks)
        image_sizes = [image_size] * len(unique_chunks)
        
        self._save_embeddings(unique_chunks, embeddings, is_image_captions=is_image_captions, image_paths=image_paths, image_sizes=image_sizes)
        logger.info("嵌入保存完成")

    def query(self, query: str, top_k: int = 3) -> List[dict]:
        """
        检索并返回重排序后的结果
        
        Args:
            query: 搜索查询字符串
            top_k: 要返回的最相关结果数量
        
        Returns:
            重排序后的结果列表，每个结果包含document和可能的image_path
        """
        logger.info(f"开始搜索和重排序，查询: '{query}', top_k: {top_k}")
        retrieved_chunks = self._retrieve(query, top_k * 2)  # 获取更多结果用于重排序
        
        if not retrieved_chunks:
            logger.info("没有找到检索结果")
            return []
        
        reranked_chunks = self._rerank(query, retrieved_chunks, top_k)
        logger.info(f"搜索和重排序完成，返回 {len(reranked_chunks)} 个结果")
        return reranked_chunks


if __name__ == '__main__':
    logger.info("开始运行嵌入模型演示")
    # 初始化嵌入模型
    embedding = Embedding(
        embedding_model_name="shibing624/text2vec-base-chinese",
        rerank_model_name="cross-encoder/mmarco-mMiniLMv2-L12-H384-v1"
    )
    
    try:
        # 处理文件
        embedding.embedding(file_path="doc.md")
        
        # 搜索
        query = "哆啦A梦使用的3个秘密道具分别是什么？"
        rerank_chunks = embedding.query(query, top_k=3)
        
        logger.info(f"查询完成，返回 {len(rerank_chunks)} 个重排序结果")
        for i, chunk in enumerate(rerank_chunks):
            logger.info(f"结果 [{i}]: {chunk[:50]}...")
    except Exception as e:
        logger.error(f"程序执行出错: {str(e)}")
    
    logger.info("程序运行结束")
