from pathlib import Path
import hashlib
import logging
from typing import List, Dict, Any
from fastapi import UploadFile
from langchain.docstore.document import Document
from src.document_processing.document_loader import DocumentLoaderFactory
from src.document_processing.text_splitter import DefaultTextSplitter
from src.vector_db.chroma_db import ChromaDB
from src.embedding.huggingface_embedding import HuggingFaceEmbedding
from config.app_config import get_app_config
from src.management.services.metadata_db import metadata_db  # 元数据操作类

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class DocumentService:
    def __init__(self):
        self.config = get_app_config()
        self.embedding = HuggingFaceEmbedding(
            model_name=self.config.embedding_config.model_name,
            model_path=self.config.embedding_config.model_path
        )
        self.vector_db = ChromaDB(
            embedding=self.embedding,
            db_path=self.config.vector_db_path,
            collection_name=self.config.collection_name
        )
        self.text_splitter = DefaultTextSplitter(config=self.config)
        self.raw_docs_dir = Path(self.config.raw_docs_path)
        self.raw_docs_dir.mkdir(parents=True, exist_ok=True)
        self.max_text_length = 10 * 1024 * 1024  # 10MB

    def _generate_unique_doc_id(self, file_name: str, file_content: bytes) -> str:
        content_hash = hashlib.md5(file_content).hexdigest()[:16]
        safe_file_name = "".join([c for c in file_name if c.isalnum() or c in ['.', '_']])
        return f"{safe_file_name}_{content_hash}"

    def _save_raw_file(self, file: UploadFile, doc_id: str) -> Path:
        doc_dir = self.raw_docs_dir / doc_id
        doc_dir.mkdir(parents=True, exist_ok=True)
        file_path = doc_dir / file.filename
        with open(file_path, "wb") as f:
            f.write(file.file.read())
        return file_path

    def process_uploads(self, files: List[UploadFile]) -> Dict[str, List[str]]:
        result = {"success": [], "failed": []}

        for file in files:
            try:
                logger.info(f"开始处理文件：{file.filename}")

                # 1. 读取文件内容并生成唯一ID
                file_content = file.file.read()
                if len(file_content) > self.max_text_length:
                    result["failed"].append(f"{file.filename}：文件过大（超过10MB）")
                    continue
                doc_id = self._generate_unique_doc_id(file.filename, file_content)
                file.file.seek(0)

                # 2. 去重检查
                if metadata_db.is_document_exist(doc_id):
                    result["failed"].append(f"{file.filename}：已存在（自动跳过）")
                    continue

                # 3. 保存原始文件
                try:
                    file_path = self._save_raw_file(file, doc_id)
                    logger.info(f"文件保存成功：{file_path}")
                except Exception as e:
                    result["failed"].append(f"{file.filename}：保存失败（{str(e)}）")
                    continue

                # 4. 解析文档
                try:
                    logger.info(f"开始解析文件：{file_path}")
                    loader = DocumentLoaderFactory.get_loader(str(file_path))
                    documents = loader.load()
                    full_text = "\n".join([doc.page_content for doc in documents])
                    logger.info(f"解析完成，文本长度：{len(full_text)}字符")

                    if not full_text.strip():
                        result["failed"].append(f"{file.filename}：内容为空")
                        continue
                except Exception as e:
                    logger.error(f"解析失败：{str(e)}", exc_info=True)
                    result["failed"].append(f"{file.filename}：解析失败（{str(e)}）")
                    continue

                # 5. 文本切片
                try:
                    logger.info(f"开始分块，文本长度：{len(full_text)}")
                    chunks = self.text_splitter.split_text(full_text)
                    logger.info(f"分块完成，得到{len(chunks)}个块")
                except Exception as e:
                    logger.error(f"分块失败：{str(e)}", exc_info=True)
                    result["failed"].append(f"{file.filename}：分块失败（{str(e)}）")
                    continue

                # 6. 向量存储
                try:
                    chunk_ids = [f"{doc_id}_chunk_{i}" for i in range(len(chunks))]
                    docs = [
                        Document(
                            page_content=chunk,
                            metadata={"doc_id": doc_id, "chunk_index": i}
                        )
                        for i, chunk in enumerate(chunks)
                    ]
                    self.vector_db.add_documents(documents=docs, ids=chunk_ids)
                    self.vector_db.persist()
                except Exception as e:
                    result["failed"].append(f"{file.filename}：向量存储失败（{str(e)}）")
                    continue

                # 7. 记录元数据
                metadata_db.add_document(
                    doc_id=doc_id,
                    file_name=file.filename,
                    file_path=str(file_path),
                    chunk_count=len(chunks),
                    status="success"
                )

                result["success"].append(f"{file.filename}：处理成功（切片数：{len(chunks)}）")
                logger.info(f"{file.filename} 处理成功")

            except Exception as e:
                logger.error(f"{file.filename} 处理异常：{str(e)}", exc_info=True)
                result["failed"].append(f"{file.filename}：处理异常（{str(e)}）")

        return result

    def get_all_metadata(self) -> List[Dict]:
        return metadata_db.get_all_documents()

    def delete_document(self, doc_id: str) -> Dict[str, str]:
        try:
            logger.info(f"===== 开始删除文档：{doc_id} =====")

            # 1. 检查文档是否存在
            if not metadata_db.is_document_exist(doc_id):
                logger.warning(f"文档不存在，删除终止：{doc_id}")
                return {"status": "failed", "message": f"文档ID {doc_id} 不存在"}

            # 2. 删除向量库片段（关键步骤）
            chunk_ids = self.vector_db.get_all_ids_by_prefix(prefix=doc_id)
            if chunk_ids:
                self.vector_db.delete(ids=chunk_ids)
                self.vector_db.persist()
                logger.info(f"已删除向量库片段：共 {len(chunk_ids)} 个")
            else:
                logger.info(f"无向量库片段需要删除：{doc_id}")

            # 3. 删除本地文件目录（关键步骤）
            doc_dir = self.raw_docs_dir / doc_id
            if doc_dir.exists():
                import shutil
                shutil.rmtree(doc_dir)
                logger.info(f"已删除本地文件目录：{doc_dir}")
            else:
                logger.info(f"无本地文件需要删除：{doc_dir}")

            # 🔴 核心修复：删除元数据库记录（之前缺少这一步！）
            metadata_db.delete_document(doc_id)  # 调用元数据删除方法
            logger.info(f"已删除元数据库记录：{doc_id}")

            return {"status": "success", "message": f"文档 {doc_id} 已完全删除"}

        except Exception as e:
            logger.error(f"===== 删除过程异常：{str(e)} =====", exc_info=True)
            return {"status": "failed", "message": f"删除失败：{str(e)}"}

    def get_document_chunks(self, doc_id: str) -> List[Dict[str, Any]]:
        """获取指定文档的所有切割片段"""
        try:
            if not metadata_db.is_document_exist(doc_id):
                raise ValueError(f"文档ID {doc_id} 不存在")

            chunk_ids = self.vector_db.get_all_ids_by_prefix(prefix=doc_id)
            if not chunk_ids:
                logger.warning(f"文档ID {doc_id} 未找到任何片段")
                return []

            chunk_ids.sort(key=lambda x: int(x.split("_chunk_")[-1]))

            chunks = []
            for chunk_id in chunk_ids:
                doc = self.vector_db.get_document(chunk_id)
                if doc and hasattr(doc, "page_content"):
                    chunk_index = int(chunk_id.split("_chunk_")[-1]) + 1
                    metadata = doc.metadata if hasattr(doc, "metadata") and doc.metadata is not None else {}
                    chunks.append({
                        "chunk_id": chunk_id,
                        "chunk_index": chunk_index,
                        "page_content": doc.page_content,
                        "metadata": metadata,
                        "length": len(doc.page_content)
                    })

            logger.info(f"成功获取文档 {doc_id} 的 {len(chunks)} 个片段")
            return chunks
        except Exception as e:
            logger.error(f"获取文档片段失败：{str(e)}", exc_info=True)
            raise