import os
from typing import List, Tuple, Optional, Dict
from pathlib import Path
import PyPDF2
from docx import Document
from utils.logger import Logger
from utils.exceptions import ToolError
from middleware.milvus_client import MilvusClient, VectorDatabaseRouter, SearchResult
from middleware.redis_adapter import get_redis_adapter
from tools.text_processing import text_cleaning, split_text_by_length, get_embedding

# 初始化日志（指定agent_name）
logger = Logger.get_logger(agent_name="doc_qa_tool")

# 向量检索配置
VECTOR_RETRIEVAL_CONFIG = {
    "default_top_k": 3,
    "similarity_threshold": 0.6,
    "batch_size": 100,
    "chunk_size": 512,
    "chunk_overlap": 50,
    "collection_mapping": {
        "document_vectors": "milvus",  # 文档向量集合映射到milvus
        "document_metadata": "milvus"   # 文档元数据集合映射到milvus
    }
}


class DocQATools:
    """文档问答核心工具集：文档解析、向量存储、检索增强"""

    def __init__(self):
        """初始化：复用向量数据库路由器、Redis（缓存）客户端"""
        try:
            # 使用向量数据库路由器，支持Milvus和PostgreSQL+PgVector自动切换
            self.vector_router = VectorDatabaseRouter()
            self.redis_client = get_redis_adapter()  # 使用Redis适配器，支持集群模式
            self.doc_collection = "doc_qa_kb"  # Milvus文档向量集合名
            self.supported_formats = {"pdf", "docx"}  # 支持的文档格式
            self.max_chunk_length = 512  # 文本分片最大长度
            self.chunk_overlap = 50  # 分片重叠长度
            self.embedding_cache_ttl = 86400  # 嵌入缓存过期时间（24小时）

            # 初始化向量数据库集合
            self._init_vector_collections()
            logger.info("DocQATools initialized successfully with vector database router")
        except Exception as e:
            logger.error(f"DocQATools初始化失败: {str(e)}", exc_info=True)
            raise ToolError(
                message="文档问答工具初始化失败",
                context={"error": str(e)}
            ) from e
    
    def _init_vector_collections(self):
        """初始化向量数据库集合"""
        try:
            # 初始化文档向量集合
            self._init_collection("document_vectors")
            # 初始化文档元数据集合
            self._init_collection("document_metadata")
        except Exception as e:
            logger.error(f"Failed to initialize vector collections: {str(e)}")
    
    def _init_collection(self, collection_name: str):
        """初始化单个向量集合"""
        try:
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(collection_name, "milvus")
            
            # 检查集合是否存在
            if not self.vector_router.collection_exists(collection_name, db_type):
                # 根据集合名称设置不同的schema
                if collection_name == "document_vectors":
                    schema = [
                        {"name": "id", "type": "VARCHAR", "params": {"max_length": 64}, "is_primary": True},
                        {"name": "document_id", "type": "VARCHAR", "params": {"max_length": 64}},
                        {"name": "page_num", "type": "INT64"},
                        {"name": "chunk_num", "type": "INT64"},
                        {"name": "text", "type": "VARCHAR", "params": {"max_length": 4096}},
                        {"name": "embedding", "type": "FLOAT_VECTOR", "params": {"dim": 768}}
                    ]
                    index_params = {"index_type": "IVF_FLAT", "nlist": 1024}
                else:  # document_metadata
                    schema = [
                        {"name": "document_id", "type": "VARCHAR", "params": {"max_length": 64}, "is_primary": True},
                        {"name": "filename", "type": "VARCHAR", "params": {"max_length": 512}},
                        {"name": "file_type", "type": "VARCHAR", "params": {"max_length": 32}},
                        {"name": "upload_time", "type": "VARCHAR", "params": {"max_length": 64}},
                        {"name": "page_count", "type": "INT64"},
                        {"name": "chunk_count", "type": "INT64"}
                    ]
                    index_params = None  # 元数据集合可能不需要向量索引
                
                # 创建集合
                self.vector_router.create_collection(
                    collection_name=collection_name,
                    schema=schema,
                    index_params=index_params,
                    db_type=db_type
                )
                logger.info(f"Created collection: {collection_name} with {db_type}")
        except Exception as e:
            logger.error(f"Failed to initialize collection {collection_name}: {str(e)}")

    def _init_milvus_collection(self):
        """初始化Milvus文档向量集合（兼容旧版本）"""
        try:
            # 使用向量路由器初始化集合
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.doc_collection, "milvus")
            
            if not self.vector_router.collection_exists(self.doc_collection, db_type):
                collection_schema = [
                    {"name": "id", "type": "VARCHAR", "params": {"max_length": 64}, "is_primary": True},
                    {"name": "text", "type": "VARCHAR", "params": {"max_length": 2048}},
                    {"name": "embedding", "type": "FLOAT_VECTOR", "params": {"dim": 768}}
                ]
                self.vector_router.create_collection(
                    collection_name=self.doc_collection,
                    schema=collection_schema,
                    index_params={"index_type": "IVF_FLAT", "nlist": 1024},
                    db_type=db_type
                )
                logger.info(f"Initialized legacy collection: {self.doc_collection}")
        except Exception as e:
            logger.error(f"Failed to initialize legacy collection: {str(e)}")

    def _validate_doc_format(self, file_path: str) -> bool:
        """验证文档格式是否支持"""
        file_ext = Path(file_path).suffix.lower().lstrip(".")
        if file_ext not in self.supported_formats:
            logger.warning(f"不支持的文档格式: {file_ext}，支持格式：{self.supported_formats}")
            return False
        return True

    def parse_document(self, file_path: str) -> List[str]:
        """解析文档为文本片段（支持PDF/Word）"""
        if not os.path.exists(file_path):
            logger.error(f"文档不存在: {file_path}")
            raise ToolError(
                message="文档不存在",
                context={"file_path": file_path}
            )

        if not self._validate_doc_format(file_path):
            raise ToolError(
                message="不支持的文档格式",
                context={"file_path": file_path, "supported_formats": list(self.supported_formats)}
            )

        try:
            file_ext = Path(file_path).suffix.lower().lstrip(".")
            full_text = ""

            # PDF解析
            if file_ext == "pdf":
                with open(file_path, "rb") as f:
                    pdf_reader = PyPDF2.PdfReader(f)
                    for page in pdf_reader.pages:
                        page_text = page.extract_text() or ""
                        full_text += page_text + "\n"

            # Word解析
            elif file_ext == "docx":
                doc = Document(file_path)
                for paragraph in doc.paragraphs:
                    full_text += paragraph.text + "\n"

            # 文本清洗+分片（适配向量库存储）
            cleaned_text = text_cleaning(full_text, remove_punctuation=False)
            chunks = split_text_by_length(
                cleaned_text,
                max_length=self.max_chunk_length,
                overlap=self.chunk_overlap,
                split_by_sentence=True
            )

            logger.debug(f"文档解析完成: 原文本长度={len(full_text)}, 分片数={len(chunks)}, file_path={file_path}")
            return chunks
        except Exception as e:
            logger.error(f"文档解析失败: file_path={file_path}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="文档解析失败",
                context={"file_path": file_path, "error": str(e)}
            ) from e

    def build_doc_vector_db(self, file_path: str, doc_id: str) -> Dict:
        """构建文档向量库（解析→嵌入→存储到向量数据库）"""
        try:
            # 1. 文档解析为分片
            doc_chunks = self.parse_document(file_path)
            if not doc_chunks:
                raise ToolError(message="文档解析后无有效文本", context={"file_path": file_path})

            # 2. 生成每个分片的嵌入
            embeddings = []
            valid_chunks = []
            for chunk in doc_chunks:
                embedding = get_embedding(chunk, cache=True)
                if embedding:
                    embeddings.append(embedding)
                    valid_chunks.append(chunk)

            if not valid_chunks:
                raise ToolError(message="无有效文本分片生成嵌入", context={"file_path": file_path})

            # 3. 批量插入向量数据库（使用路由器）
            data = [
                {"id": f"{doc_id}_{idx}", "text": chunk, "embedding": embedding}
                for idx, (chunk, embedding) in enumerate(zip(valid_chunks, embeddings))
            ]
            
            # 获取数据库类型
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.doc_collection, "milvus")
            
            # 执行插入
            insert_result = self.vector_router.insert(
                collection_name=self.doc_collection,
                data=data,
                db_type=db_type
            )

            # 4. 缓存文档元信息（doc_id→分片数/路径）
            doc_meta = {
                "doc_id": doc_id,
                "file_path": file_path,
                "chunk_count": len(valid_chunks),
                "insert_ids": insert_result if isinstance(insert_result, list) else []
            }
            self.redis_client.set(
                key=f"doc_qa:meta:{doc_id}",
                value=str(doc_meta),
                ex=self.embedding_cache_ttl * 7  # 元信息缓存7天
            )

            # 同时保存到document_metadata集合
            metadata_db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get("document_metadata", "milvus")
            metadata = {
                "document_id": doc_id,
                "filename": os.path.basename(file_path),
                "file_type": Path(file_path).suffix.lower().lstrip("."),
                "upload_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "page_count": 0,  # PDF页数需要专门提取
                "chunk_count": len(valid_chunks)
            }
            self.vector_router.insert(
                collection_name="document_metadata",
                data=[metadata],
                db_type=metadata_db_type
            )

            logger.info(f"文档向量库构建完成: doc_id={doc_id}, 插入分片数={len(valid_chunks)}, db_type={db_type}")
            return doc_meta
        except ToolError:
            raise
        except Exception as e:
            logger.error(f"文档向量库构建失败: doc_id={doc_id}, file_path={file_path}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="文档向量库构建失败",
                context={"doc_id": doc_id, "file_path": file_path, "error": str(e)}
            ) from e

    def search_doc_chunks(
            self,
            query: str,
            doc_id: Optional[str] = None,
            top_k: int = 5
    ) -> List[Tuple[str, float, str]]:
        """检索相关文档分片（(分片文本, 相似度, 分片ID)）"""
        try:
            # 1. 生成查询嵌入
            query_embedding = get_embedding(query)
            if not query_embedding:
                logger.warning("查询嵌入生成失败，返回空结果")
                return []

            # 2. 构造检索条件（可选按doc_id过滤）
            filter_expr = {"id": {"$regex": f"^{doc_id}_"}} if doc_id else None

            # 3. 向量数据库检索（使用路由器）
            db_type = VECTOR_RETRIEVAL_CONFIG["collection_mapping"].get(self.doc_collection, "milvus")
            results = self.vector_router.search(
                collection_name=self.doc_collection,
                query_vectors=[query_embedding],
                top_k=top_k,
                metric_type="COSINE",
                filter=filter_expr,
                db_type=db_type
            )

            # 4. 解析结果
            retrieved_chunks = []
            if results and len(results) > 0:
                # 批量获取实体信息，减少数据库访问次数
                hit_ids = [hit.id for hit in results[0]]
                if hit_ids:
                    entities = self.vector_router.get_entities_by_id(
                        collection_name=self.doc_collection,
                        ids=hit_ids,
                        db_type=db_type
                    )
                    
                    # 构建ID到实体的映射
                    id_to_entity = {entity.get("id"): entity for entity in entities}
                    
                    # 匹配命中结果和实体信息
                    for hit in results[0]:
                        entity = id_to_entity.get(hit.id)
                        if entity and "text" in entity:
                            retrieved_chunks.append((
                                entity["text"],  # 分片文本
                                round(hit.score, 4),  # 相似度
                                hit.id  # 分片ID
                            ))

            # 按相似度降序排序
            retrieved_chunks.sort(key=lambda x: x[1], reverse=True)
            logger.debug(f"文档分片检索完成: query={query[:30]}, 有效结果数={len(retrieved_chunks)}, db_type={db_type}")
            return retrieved_chunks
        except Exception as e:
            logger.error(f"文档分片检索失败: query={query}, doc_id={doc_id}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="文档分片检索失败",
                context={"query": query, "doc_id": doc_id, "error": str(e)}
            ) from e

    def generate_doc_answer(
            self,
            query: str,
            retrieved_chunks: List[Tuple[str, float, str]],
            similarity_threshold: float = 0.6
    ) -> Optional[str]:
        """基于检索到的文档分片生成答案"""
        try:
            # 过滤低相似度分片
            valid_chunks = [chunk for chunk, score, _ in retrieved_chunks if score >= similarity_threshold]
            if not valid_chunks:
                logger.info(f"无有效文档分片匹配: query={query}, 阈值={similarity_threshold}")
                return None

            # 合并分片生成答案（可替换为LLM调用，注入文档上下文）
            context = "\n".join([f"相关内容{idx + 1}: {chunk}" for idx, chunk in enumerate(valid_chunks)])
            answer = f"基于文档内容回答：\n{context}\n\n总结：{query}"

            logger.debug(f"文档问答答案生成完成: query={query[:30]}")
            return answer
        except Exception as e:
            logger.error(f"文档答案生成失败: query={query}, error={str(e)}", exc_info=True)
            raise ToolError(
                message="文档答案生成失败",
                context={"query": query, "error": str(e)}
            ) from e


# 单例工具实例
doc_qa_tools = DocQATools()

# 测试入口
if __name__ == "__main__":
    test_logger = Logger.update_context(task_id="doc_qa_test", agent_name="doc_qa_tool")
    test_logger.info("开始测试文档问答工具")

    # 测试文档路径（替换为实际文档路径）
    test_doc_path = "test_doc.pdf"
    test_doc_id = "test_20251108"
    test_query = "文档中提到的多Agent系统架构分层有哪些？"

    try:
        # 1. 构建向量库
        doc_meta = doc_qa_tools.build_doc_vector_db(test_doc_path, test_doc_id)
        test_logger.info(f"向量库构建结果: {doc_meta}")

        # 2. 检索相关分片
        retrieved_chunks = doc_qa_tools.search_doc_chunks(test_query, doc_id=test_doc_id)
        test_logger.info(f"检索到的分片: {[chunk[:50] for chunk, _, _ in retrieved_chunks]}")

        # 3. 生成答案
        answer = doc_qa_tools.generate_doc_answer(test_query, retrieved_chunks)
        test_logger.info(f"生成答案: {answer}")

        test_logger.success("文档问答工具测试完成")
    except ToolError as e:
        test_logger.error(f"测试失败: {e}")