from datetime import datetime
from typing import List, Dict, Optional, Any
import logging
import os
import json
from pathlib import Path
from uuid import uuid4

# LangChain核心组件（适配v0.2+）
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_ollama import OllamaEmbeddings
from langchain_ollama import OllamaLLM
from langchain_core.documents import Document
from app.exception.rag_exception import RAGException
# Milvus相关导入
from langchain_milvus import Milvus
from app.system_config import settings
from app.strategy.document_strategy import DocumentLoaderFactory
from app.utils.milvus_store import MilvusStoreWrapper
# 初始化日志
logger = logging.getLogger("Files-Parse")

# 单例工厂
loader_factory = DocumentLoaderFactory()

class FilesParseService:
    def __init__(
            self,
            milvus_host: str = settings.MILVUS_HOST,
            milvus_port: str = settings.MILVUS_PORT,
            ollama_base_url: str = settings.OLLAMA_URL,
            embedding_model: str = settings.OLLAMA_EMBEDDING_MODEL,
            llm_model: str = settings.OLLAMA_LLM_MODEL,
            default_collection: str = settings.MILVUS_COLLECTION,
            vector_dim: int = 1024  # bge-m3模型的向量维度
    ):
        self.milvus_host = milvus_host
        self.milvus_port = milvus_port
        self.ollama_base_url = ollama_base_url
        self.embedding_model = embedding_model
        logger.info(f"ollama_base_url: {self.ollama_base_url},embedding_model: {self.embedding_model}")
        self.llm_model = llm_model
        self.default_collection = default_collection
        self.vector_dim = vector_dim

        # 初始化核心组件
        self.embeddings = self._init_embeddings()
        # self.vector_store = self._init_vector_store()
        self.milvus_wrapper = MilvusStoreWrapper(is_removed=True,collection_name=self.default_collection,max_retries=1, retry_delay=1)
        self.vector_store = self.milvus_wrapper.vector_store
        self.llm = self._init_llm()

    def _init_embeddings(self) -> OllamaEmbeddings:
        try:
            os.environ["OLLAMA_HOST"] = self.ollama_base_url
            embeddings = OllamaEmbeddings(model=self.embedding_model, base_url=self.ollama_base_url)
            # 验证嵌入模型
            test_emb = embeddings.embed_query("contract2025")
            if isinstance(test_emb, list) and len(test_emb) > 0 and isinstance(test_emb[0], float):
                # 更新向量维度为实际模型输出维度
                self.vector_dim = len(test_emb)
                logger.info(f"嵌入模型就绪：{self.embedding_model}（维度：{self.vector_dim}）")
                return embeddings
            raise RAGException(503, "嵌入模型返回无效结果")
        except Exception as e:
            raise RAGException(500, f"嵌入模型初始化失败：{str(e)}")

    def _init_vector_store(self) -> Milvus:
        milvus_wrapper = MilvusStoreWrapper()
        return milvus_wrapper.vector_store


    def _init_llm(self) -> OllamaLLM:
        try:
            llm = OllamaLLM(
                model=self.llm_model,
                base_url=self.ollama_base_url,
                temperature=0.1,
                request_timeout=120.0
            )
            # 测试LLM
            test_resp = llm.invoke("仅返回'pong'")
            if "pong" in test_resp.strip().lower():
                logger.info(f"LLM就绪：{self.llm_model}")
                return llm
            raise RAGException(503, f"LLM测试失败：{test_resp[:50]}")
        except Exception as e:
            error_msg = str(e)
            if "connection refused" in error_msg.lower():
                error_msg = "Ollama服务未启动（需执行'ollama serve'）"
            elif "model not found" in error_msg.lower():
                error_msg = f"模型不存在（需执行'ollama pull {self.llm_model}'）"
            raise RAGException(503, error_msg)


    def _validate_metadata(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
        """校验合并后的元数据，确保必需字段存在"""
        valid_meta = {}
        for key, value in metadata.items():
            # 过滤非字符串键
            if not isinstance(key, str):
                logger.warning(f"跳过非字符串键：{type(key)}（值：{value}）")
                continue
            # 处理值类型
            if isinstance(value, (str, int, float, bool)):
                if isinstance(value, str):
                    value = value.replace("{", "").replace("}", "").replace("[", "").replace("]", "").strip()
                valid_meta[key] = value
            elif value is None:
                valid_meta[key] = "None"
            else:
                valid_meta[key] = str(value).replace("{", "").replace("}", "").strip()
                logger.warning(f"元数据'{key}'类型{type(value)}已转为字符串")

        # 校验必需字段
        required_fields = ["file_id", "chunk_id", "file_name"]
        for field in required_fields:
            if field not in valid_meta:
                valid_meta[field] = f"missing_{field}"
                logger.error(f"元数据强制补充缺失字段：{field}（默认值）")

        return valid_meta

    def _load_document(self, doc_path: str) -> List[Document]:
        """加载文档"""
        if not os.path.exists(doc_path):
            raise RAGException(404, f"文档不存在：{doc_path}")
        try:
            loader = UnstructuredMarkdownLoader(
                doc_path, mode="single", strategy="fast", encoding="utf-8"
            )
            docs = loader.load()
            if len(docs) == 0:
                raise RAGException(400, "文档加载后为空")
            logger.info(f"文档加载成功：{doc_path}（文档数：1，字符数：{len(docs[0].page_content)}）")
            return docs
        except UnicodeDecodeError:
            # 尝试GBK编码
            loader = UnstructuredMarkdownLoader(doc_path, mode="single", encoding="gbk")
            docs = loader.load()
            logger.warning(f"用GBK编码加载文档：{doc_path}")
            return docs
        except Exception as e:
            raise RAGException(500, f"文档加载失败：{str(e)}")

    def _split_document(self, docs: List[Document], doc_path: str) -> List[Document]:
        """文档分片处理"""
        splitter = RecursiveCharacterTextSplitter(
            chunk_size=800,
            chunk_overlap=100,
            separators=["\n## ", "\n### ", "\n\n", "\n", "。", "，"],
            length_function=len
        )
        chunks = splitter.split_documents(docs)
        # 过滤过短分片
        valid_chunks = [c for c in chunks if len(c.page_content.strip()) >= 100]
        if len(valid_chunks) == 0:
            raise RAGException(500, f"无有效分片（需至少100字符）：{doc_path}")
        logger.info(f"分片完成：{doc_path}（有效分片数：{len(valid_chunks)}）")
        return valid_chunks

    def _get_file_extension(self,filename):
        path = Path(filename)
        # 获取不带点的后缀名（如txt），若无后缀则返回空字符串
        return path.suffix.lstrip('.') if path.suffix else ''
    def _process_metadata(self, chunks: List[Document], doc_path: str, file_id: str) -> List[Document]:
        """处理文档元数据"""
        file_name = os.path.basename(doc_path)
        current_time = datetime.now().isoformat()
        processed_chunks = []

        for idx, chunk in enumerate(chunks, 1):
            # 基础元数据
            base_meta = {
                "file_id": str(file_id),
                "file_name": file_name,
                "file_ext": self._get_file_extension(file_name),
                "chunk_id": f"{file_id}_chunk_{idx:04d}",
                "created_at": current_time,
                "source_path": str(Path(doc_path).resolve()),
                "chunk_length": len(chunk.page_content),
                "business_scene": "contract"
            }
            # 合并原始元数据和基础元数据
            combined_meta = {**chunk.metadata, **base_meta}
            # 校验元数据
            final_meta = self._validate_metadata(combined_meta)
            chunk.metadata = final_meta
            processed_chunks.append(chunk)

        logger.info(f"元数据处理完成：{len(processed_chunks)}个分片（均通过校验）")
        return processed_chunks

    def _add_chunks_to_milvus(self, chunks: List[Document], file_id: str) -> Dict[str, Any]:
        """将文档分片添加到Milvus，使用带重试的批量处理"""
        try:
            if not chunks:
                raise RAGException(500, "无分片数据可入库")

            # 打印分片详情
            chunk_ids = [chunk.metadata["chunk_id"] for chunk in chunks]
            logger.info(f"开始入库：共{len(chunks)}个分片，ID列表：{chunk_ids[:5]}...")  # 只显示前5个ID
            logger.info(f"第一个分片内容预览：{chunks[0].page_content[:100]}...")
            logger.info(f"第一个分片元数据：{chunks[0].metadata}")

            # 使用带重试和分批的方法入库
            success_count = self.milvus_wrapper.add_documents_with_retry(
                docs=chunks,
                ids=chunk_ids,
                batch_size=10  # 每批处理10个分片，可根据情况调整
            )

            # 刷新集合确保数据写入
            self.milvus_wrapper.collection.flush()

            # 最终校验
            total_in_db = self.count_chunks_by_metadata({})
            logger.info(f"入库完成：成功{success_count}/{len(chunks)}，数据库总计数：{total_in_db}")

            if success_count == 0:
                raise RAGException(500, "所有分片均入库失败")

            return {
                "status": "partial_success" if success_count < len(chunks) else "success",
                "total": len(chunks),
                "success": success_count,
                "failed": len(chunks) - success_count,
                "db_total": total_in_db
            }

        except Exception as e:
            logger.error(f"入库流程异常终止：{str(e)}", exc_info=True)
            raise RAGException(500, f"add_documents失败：{str(e)}")

    def count_chunks_by_metadata(self, metadata_filter: Dict[str, Any]) -> int:
        """根据元数据统计分片数量（使用pymilvus原生API，不依赖内部属性）"""
        try:
            from pymilvus import Collection  # 显式导入Collection

            # 直接通过pymilvus获取集合（绕过langchain-milvus的封装）
            collection = Collection(self.default_collection)

            if metadata_filter:
                # 构建查询表达式
                expr_parts = []
                for key, value in metadata_filter.items():
                    if isinstance(value, str):
                        expr_parts.append(f'{key} == "{value}"')
                    else:
                        expr_parts.append(f'{key} == {value}')
                expr = " and ".join(expr_parts)
                # 执行查询并返回数量
                result = collection.query(expr=expr, limit=1)
                return len(result)
            else:
                # 获取集合中的所有实体数量
                return collection.num_entities
        except Exception as e:
            raise RAGException(500, f"统计失败：{str(e)}")

    def load_and_process_document(self, doc_path: str, file_id: str) -> Dict[str, Any]:
        """完整流程：加载→分片→元数据→入库"""
        logger.info(f"开始处理文档：file_id={file_id}，路径={doc_path}")
        try:
            # 1. 加载文档
            docs = self._load_document(doc_path)
            # 2. 分片
            chunks = self._split_document(docs, doc_path)
            # 3. 元数据处理
            processed_chunks = self._process_metadata(chunks, doc_path, file_id)
            # 4. 入库
            result = self._add_chunks_to_milvus(processed_chunks, file_id)
            # 5. 补充结果信息
            result.update({
                "file_name": os.path.basename(doc_path),
                "process_time": datetime.now().isoformat(),
                "message": "文档处理完成"
            })
            logger.info(f"文档处理成功：{json.dumps(result, ensure_ascii=False)}")
            return result
        except RAGException as e:
            logger.error(f"文档处理失败：[{e.code}] {e.message}")
            raise

    def process_single_document(self, doc_path: str, file_id: Optional[str] = None) -> Dict[str, Any]:
        """处理单个文档：加载→分片→元数据→入库"""
        logger.info(f"开始处理文档：路径={doc_path}")

        # 生成唯一file_id（如果未提供）
        if not file_id:
            file_id = str(uuid4())

        try:
            # 1. 使用策略工厂加载文档
            docs = loader_factory.load_document(doc_path)

            # 2. 分片
            chunks = self._split_document(docs, doc_path)

            # 3. 元数据处理
            processed_chunks = self._process_metadata(chunks, doc_path, file_id)

            # 4. 入库
            result = self._add_chunks_to_milvus(processed_chunks, file_id)

            # 5. 补充结果信息
            result.update({
                "file_name": os.path.basename(doc_path),
                "file_id": file_id,
                "process_time": datetime.now().isoformat(),
                "message": "文档处理完成"
            })
            logger.info(f"文档处理成功：{json.dumps(result, ensure_ascii=False)}")
            return result
        except RAGException as e:
            logger.error(f"文档处理失败：[{e.code}] {e.message}")
            raise

    def process_docs_directory(self, dir_path: str = "./docs") -> Dict[str, Any]:
        """
        处理目录下的所有文档

        参数:
            dir_path: 文档目录路径，默认为"./docs"

        返回:
            包含所有文件处理结果的汇总信息
        """
        if not os.path.exists(dir_path):
            raise RAGException(404, f"目录不存在：{dir_path}")

        if not os.path.isdir(dir_path):
            raise RAGException(400, f"{dir_path}不是一个目录")

        logger.info(f"开始处理目录：{dir_path}")

        # 获取目录下的所有文件
        all_files = []
        for root, _, files in os.walk(dir_path):
            for file in files:
                file_path = os.path.join(root, file)
                all_files.append(file_path)

        logger.info(f"发现{len(all_files)}个文件需要处理")

        # 记录所有文件的处理结果
        results = {
            "total_files": len(all_files),
            "success_files": 0,
            "failed_files": 0,
            "processing_results": []
        }

        # 逐个处理文件
        for file_path in all_files:
            try:
                # 检查文件扩展名是否受支持
                ext = Path(file_path).suffix.lower()
                if ext not in loader_factory.type_strategy_map:
                    logger.warning(f"跳过不支持的文件类型：{file_path}")
                    results["failed_files"] += 1
                    results["processing_results"].append({
                        "file_path": file_path,
                        "status": "unsupported",
                        "message": f"不支持的文件类型：{ext}"
                    })
                    continue

                # 处理文件
                result = self.process_single_document(file_path)
                results["success_files"] += 1
                results["processing_results"].append({
                    "file_path": file_path,
                    "status": result["status"],
                    "file_id": result["file_id"],
                    "chunks": {
                        "total": result["total"],
                        "success": result["success"],
                        "failed": result["failed"]
                    }
                })
                logger.info(f"文件处理完成：{file_path}")

            except Exception as e:
                logger.error(f"文件处理失败：{file_path}，错误：{str(e)}")
                results["failed_files"] += 1
                results["processing_results"].append({
                    "file_path": file_path,
                    "status": "error",
                    "message": str(e)
                })

        logger.info(f"目录处理完成：成功{results['success_files']}/{results['total_files']}")
        return results


# 测试入口
if __name__ == "__main__":
    try:
        # 1. 初始化服务
        logger.info("=" * 50)
        logger.info("开始初始化文档解析服务")
        parse_service = FilesParseService(default_collection="docx_documents_milvus")
        logger.info("服务初始化成功")

        # 2. 处理docs目录下的所有文件
        logger.info("开始处理docs目录下的所有文件")
        dir_result = parse_service.process_docs_directory("./docs/contract/docx")
        # dir_result = parse_service.process_docs_directory("./docs/contract/txt")
        print(f"\n【目录处理结果】\n{json.dumps(dir_result, ensure_ascii=False, indent=2)}")

        logger.info("\n" + "=" * 50)
        logger.info("所有文件处理完成")
        logger.info("=" * 50)

    except RAGException as e:
        logger.error(f"\n测试失败：[{e.code}] {e.message}", exc_info=True)
        exit(1)
    except Exception as e:
        logger.error(f"\n未知错误：{str(e)}", exc_info=True)
        exit(1)
