"""知识库管理模块

支持功能：
1. 文档导入（工单、故障分析、设备说明书等）
2. 文本标准化处理
3. 使用阿里云 text-embedding-v4 进行向量化
4. FAISS 向量数据库存储
5. 语义相似性搜索
6. 返回参考资料和元数据
"""

import json
import os
import re
from datetime import datetime
from pathlib import Path
from typing import List, Dict
import dashscope
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain_core.embeddings import Embeddings

from config.config import (
    KB_ENABLED,
    KB_PATH,
    KB_VECTORSTORE_PATH,
    ALIYUN_API_KEY,
    KB_EMBEDDING_CHUNK_SIZE,
    KB_EMBEDDING_CHUNK_OVERLAP,
)


class AliyunEmbedding(Embeddings):
    """阿里云文本嵌入模型 - text-embedding-v4 (使用 dashscope SDK)"""

    def __init__(self, api_key: str = None):
        """
        初始化阿里云嵌入模型

        Args:
            api_key: 阿里云 API Key
        """
        self.api_key = api_key or ALIYUN_API_KEY

        if not self.api_key:
            raise ValueError(
                "❌ 缺少阿里云 API Key。请在 .env 文件中设置 ALIYUN_API_KEY"
            )

        # 设置 dashscope API Key
        dashscope.api_key = self.api_key

        print(f"✅ 阿里云文本嵌入模型已初始化（使用 dashscope SDK）")
        print(f"   📍 模型: text-embedding-v4")

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """将文档列表转换为向量"""
        embeddings = []
        for text in texts:
            embedding = self._get_embedding(text)
            embeddings.append(embedding)
        return embeddings

    def embed_query(self, text: str) -> List[float]:
        """将查询文本转换为向量"""
        return self._get_embedding(text)

    def _get_embedding(self, text: str) -> List[float]:
        """调用阿里云 API 获取单个文本的向量

        Args:
            text: 要转换的文本

        Returns:
            向量列表
        """
        try:
            print(f"🔄 调用阿里云 text-embedding-v4: {text[:50]}...")
            
            # 使用 dashscope SDK 调用文本嵌入
            response = dashscope.TextEmbedding.call(
                model='text-embedding-v4',
                input=text
            )

            # 检查响应状态 (status_code 为 200 表示成功)
            if response.status_code != 200:
                error_msg = f"❌ 阿里云 API 错误: {response.status_code} - {response.message if hasattr(response, 'message') else ''}"
                print(error_msg)
                raise Exception(error_msg)

            # 检查输出是否存在
            if not response.output or 'embeddings' not in response.output:
                raise Exception(f"❌ API 响应格式错误: {response}")

            # 获取嵌入向量
            embeddings = response.output['embeddings']
            if embeddings and len(embeddings) > 0:
                embedding = embeddings[0]['embedding']
                if embedding:
                    print(f"✅ 成功获取嵌入，维度: {len(embedding)}")
                    return embedding
                else:
                    raise Exception("API 返回空向量")
            else:
                raise Exception("API 未返回嵌入向量")

        except Exception as e:
            error_msg = f"❌ 获取嵌入失败: {str(e)}"
            print(error_msg)
            raise Exception(error_msg)


class KnowledgeBaseManager:
    """知识库管理器 - 处理文档加载、存储和检索"""

    def __init__(
        self,
        kb_path: str = None,
        vectorstore_path: str = None,
        chunk_size: int = None,
        chunk_overlap: int = None,
    ):
        """初始化知识库管理器

        Args:
            kb_path: 知识库路径
            vectorstore_path: 向量库路径
            chunk_size: 文本分割块大小
            chunk_overlap: 块之间的重叠大小
        """
        self.kb_path = kb_path or KB_PATH
        self.vectorstore_path = vectorstore_path or KB_VECTORSTORE_PATH
        self.chunk_size = chunk_size or KB_EMBEDDING_CHUNK_SIZE
        self.chunk_overlap = chunk_overlap or KB_EMBEDDING_CHUNK_OVERLAP
        self.documents_file = os.path.join(self.kb_path, "documents.json")

        # 创建目录
        Path(self.kb_path).mkdir(exist_ok=True, parents=True)
        Path(self.vectorstore_path).mkdir(exist_ok=True, parents=True)

        print(f"📁 知识库路径: {self.kb_path}")
        print(f"📁 向量库路径: {self.vectorstore_path}")

        # 加载原始文档列表
        self.original_documents = self._load_documents()
        
        # 初始化嵌入模型
        try:
            self.embeddings = AliyunEmbedding()
        except Exception as e:
            print(f"❌ 初始化嵌入模型失败: {e}")
            self.embeddings = None
            self.vectorstore = None
            return

        # 加载或创建向量库
        self.vectorstore = self._load_or_create_vectorstore()

    def _load_or_create_vectorstore(self):
        """加载现有向量库或创建新的"""
        index_path = os.path.join(self.vectorstore_path, "index.faiss")

        if os.path.exists(index_path):
            print("📂 加载现有向量库...")
            try:
                vectorstore = FAISS.load_local(
                    self.vectorstore_path, self.embeddings, allow_dangerous_deserialization=True
                )
                print(f"✅ 向量库加载成功")
                return vectorstore
            except Exception as e:
                print(f"⚠️  加载向量库失败: {e}，将创建新向量库")
                return None
        else:
            print("🆕 将创建新向量库...")
            return None

    def _load_documents(self) -> List[Dict]:
        """加载原始文档列表"""
        if os.path.exists(self.documents_file):
            try:
                with open(self.documents_file, 'r', encoding='utf-8') as f:
                    docs = json.load(f)
                print(f"✅ 加载了 {len(docs)} 个原始文档")
                return docs
            except Exception as e:
                print(f"⚠️  加载原始文档失败: {e}，将使用空列表")
                return []
        return []
    
    def _save_documents(self):
        """保存原始文档列表"""
        try:
            with open(self.documents_file, 'w', encoding='utf-8') as f:
                json.dump(self.original_documents, f, ensure_ascii=False, indent=2)
            print(f"💾 原始文档已保存 ({len(self.original_documents)} 个)")
        except Exception as e:
            print(f"❌ 保存原始文档失败: {e}")

    def _normalize_text(self, text: str) -> str:
        """标准化文本处理

        Args:
            text: 原始文本

        Returns:
            标准化后的文本
        """
        # 移除多余空格
        text = " ".join(text.split())
        # 移除特殊控制字符
        text = re.sub(r"[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]", "", text)
        return text.strip()

    def add_documents(self, documents: List[Dict]) -> Dict:
        """添加文档到知识库

        Args:
            documents: 文档列表，每个文档包含：
                - content: 文本内容（必需）
                - source: 来源标识（必需）
                - doc_type: 文档类型（必需）
                - metadata: 其他元数据（可选）

        Returns:
            操作结果信息
        """
        if self.embeddings is None:
            return {"error": "❌ 嵌入模型未初始化，无法添加文档"}

        if not documents:
            return {"error": "文档列表不能为空"}

        processed_docs = []

        for i, doc in enumerate(documents):
            try:
                # 验证必需字段
                if "content" not in doc or not doc["content"]:
                    print(f"⚠️  文档 {i} 缺少 content 字段，已跳过")
                    continue

                if "source" not in doc or not doc["source"]:
                    print(f"⚠️  文档 {i} 缺少 source 字段，已跳过")
                    continue

                if "doc_type" not in doc or not doc["doc_type"]:
                    print(f"⚠️  文档 {i} 缺少 doc_type 字段，已跳过")
                    continue

                # 标准化处理文本
                content = self._normalize_text(doc["content"])

                # 创建元数据
                meta = {
                    "source": doc.get("source"),
                    "doc_type": doc.get("doc_type"),
                    "created_at": datetime.now().isoformat(),
                }

                # 合并额外的元数据
                if "metadata" in doc and isinstance(doc["metadata"], dict):
                    meta.update(doc["metadata"])

                # 创建 Document 对象
                processed_docs.append(Document(page_content=content, metadata=meta))
                print(f"✅ 文档 {i}: {doc['source']} ({doc['doc_type']}) - {len(content)} 字符")

            except Exception as e:
                print(f"❌ 处理文档 {i} 失败: {str(e)}")
                continue

        if not processed_docs:
            return {"error": "❌ 没有有效的文档被处理"}

        # 分割文本
        splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            separators=["\n\n", "\n", "。", "，", " ", ""],
        )
        chunks = splitter.split_documents(processed_docs)

        print(
            f"📄 已处理 {len(processed_docs)} 个文档，分割成 {len(chunks)} 个块"
        )

        # 添加到向量库或创建新向量库
        try:
            if chunks:
                if self.vectorstore is None:
                    print("🆕 创建新向量库...")
                    self.vectorstore = FAISS.from_documents(chunks, self.embeddings)
                    print("✅ 向量库创建成功")
                else:
                    print("📥 添加文档到现有向量库...")
                    self.vectorstore.add_documents(chunks)
                    print("✅ 文档添加成功")

                # 保存原始文档
                for doc in documents:
                    # 为每个文档生成唯一ID
                    doc_id = f"{doc.get('source')}_{hash(doc.get('content', '')[:50]) & 0x7FFFFFFF}"
                    doc_copy = doc.copy()
                    doc_copy['id'] = doc_id
                    doc_copy['created_at'] = datetime.now().isoformat()
                    # 检查是否已存在
                    if not any(d.get('id') == doc_id for d in self.original_documents):
                        self.original_documents.append(doc_copy)
                self._save_documents()
                
                # 保存向量库
                self._save_vectorstore()

                return {
                    "success": True,
                    "documents_count": len(processed_docs),
                    "chunks_count": len(chunks),
                    "message": f"✅ 成功添加 {len(processed_docs)} 个文档，分割成 {len(chunks)} 个块",
                }
        except Exception as e:
            return {
                "error": f"❌ 添加文档到向量库失败: {str(e)}"
            }

    def search_similar(
        self, query: str, k: int = 5, score_threshold: float = 0.3
    ) -> List[Dict]:
        """语义相似性搜索

        Args:
            query: 查询文本
            k: 返回结果数量
            score_threshold: 相似度阈值（0-1）

        Returns:
            相似文档列表
        """
        if self.vectorstore is None:
            return []

        try:
            # 使用带分数的搜索
            results = self.vectorstore.similarity_search_with_score(query, k=k)

            processed_results = []
            for doc, score in results:
                # FAISS 返回距离，需要转换为相似度（距离越小，相似度越高）
                # 使用 1 / (1 + distance) 转换
                similarity = 1.0 / (1.0 + score)

                if similarity >= score_threshold:
                    processed_results.append(
                        {
                            "content": doc.page_content,
                            "similarity": round(similarity, 4),
                            "source": doc.metadata.get("source", "未知"),
                            "doc_type": doc.metadata.get("doc_type", "未知"),
                            "created_at": doc.metadata.get("created_at", ""),
                            "metadata": {
                                k: v
                                for k, v in doc.metadata.items()
                                if k not in ["source", "doc_type", "created_at"]
                            },
                        }
                    )

            return processed_results

        except Exception as e:
            print(f"❌ 搜索失败: {str(e)}")
            return []

    def _save_vectorstore(self):
        """保存向量库到磁盘"""
        try:
            self.vectorstore.save_local(self.vectorstore_path)
            print(f"💾 向量库已保存到 {self.vectorstore_path}")
        except Exception as e:
            print(f"❌ 保存向量库失败: {str(e)}")

    def delete_documents_by_ids(self, doc_ids: List[str]) -> Dict:
        """按文档ID精确删除文档
        
        Args:
            doc_ids: 文档ID列表
        
        Returns:
            操作结果信息
        """
        if self.vectorstore is None:
            return {"error": "向量库未初始化，无法删除"}
        
        if not doc_ids:
            return {"warning": "文档ID列表为空", "deleted_count": 0}
        
        try:
            # 找出要删除的原始文档
            original_docs_to_delete = []
            for doc in self.original_documents:
                if doc.get("id") in doc_ids:
                    original_docs_to_delete.append(doc)
            
            if not original_docs_to_delete:
                return {"warning": "没有找到匹配的文档", "deleted_count": 0}
            
            # 获取要删除的 source 和 doc_type 组合
            delete_keys = set()
            for doc in original_docs_to_delete:
                delete_keys.add((doc.get("source"), doc.get("doc_type")))
            
            # 从向量库中找出所有匹配的块
            all_docs = list(self.vectorstore.docstore._dict.values())
            chunks_to_delete = []
            
            for doc in all_docs:
                source = doc.metadata.get("source")
                doc_type = doc.metadata.get("doc_type")
                if (source, doc_type) in delete_keys:
                    chunks_to_delete.append(doc)
            
            # 重建向量库（排除要删除的块）
            remaining_docs = [doc for doc in all_docs if doc not in chunks_to_delete]
            
            if remaining_docs:
                # 重新创建向量库
                self.vectorstore = FAISS.from_documents(remaining_docs, self.embeddings)
            else:
                # 如果没有剩余文档，删除向量库
                self.vectorstore = None
                import shutil
                if os.path.exists(self.vectorstore_path):
                    shutil.rmtree(self.vectorstore_path)
                    os.makedirs(self.vectorstore_path, exist_ok=True)
            
            # 从原始文档列表中删除（按ID精确删除）
            original_doc_ids = {doc.get("id") for doc in original_docs_to_delete}
            self.original_documents = [
                d for d in self.original_documents 
                if d.get("id") not in original_doc_ids
            ]
            self._save_documents()
            
            # 保存向量库
            self._save_vectorstore()
            
            # 返回删除的原始文档数量
            deleted_count = len(original_docs_to_delete)
            print(f"✅ 已删除 {deleted_count} 个原始文档（{len(chunks_to_delete)} 个向量块）")
            
            return {
                "success": True,
                "deleted_count": deleted_count,
                "message": f"成功删除 {deleted_count} 个文档"
            }
        except Exception as e:
            error_msg = f"❌ 删除文档失败: {str(e)}"
            print(error_msg)
            return {"error": error_msg}

    def delete_documents(self, filters: Dict = None) -> Dict:
        """删除匹配条件的文档
        
        Args:
            filters: 过滤条件字典，例如 {"source": "filename.txt"} 或 {"doc_type": "workorder"}
        
        Returns:
            操作结果信息
        """
        if self.vectorstore is None:
            return {"error": "向量库未初始化，无法删除"}
        
        if filters is None:
            filters = {}
        
        try:
            # 先找出要删除的原始文档（按 source 和 doc_type 匹配）
            original_docs_to_delete = []
            for doc in self.original_documents:
                match = True
                for key, value in filters.items():
                    if doc.get(key) != value:
                        match = False
                        break
                if match:
                    original_docs_to_delete.append(doc)
            
            if not original_docs_to_delete:
                return {"warning": "没有匹配的文档需要删除", "deleted_count": 0}
            
            # 获取要删除的 source 和 doc_type 组合
            delete_keys = set()
            for doc in original_docs_to_delete:
                delete_keys.add((doc.get("source"), doc.get("doc_type")))
            
            # 从向量库中找出所有匹配的块
            all_docs = list(self.vectorstore.docstore._dict.values())
            chunks_to_delete = []
            
            for doc in all_docs:
                source = doc.metadata.get("source")
                doc_type = doc.metadata.get("doc_type")
                if (source, doc_type) in delete_keys:
                    chunks_to_delete.append(doc)
            
            # 重建向量库（排除要删除的块）
            remaining_docs = [doc for doc in all_docs if doc not in chunks_to_delete]
            
            if remaining_docs:
                # 重新创建向量库
                self.vectorstore = FAISS.from_documents(remaining_docs, self.embeddings)
            else:
                # 如果没有剩余文档，删除向量库
                self.vectorstore = None
                import shutil
                if os.path.exists(self.vectorstore_path):
                    shutil.rmtree(self.vectorstore_path)
                    os.makedirs(self.vectorstore_path, exist_ok=True)
            
            # 从原始文档列表中删除
            original_doc_ids = {doc.get("id") for doc in original_docs_to_delete}
            self.original_documents = [
                d for d in self.original_documents 
                if d.get("id") not in original_doc_ids
            ]
            self._save_documents()
            
            # 保存向量库
            self._save_vectorstore()
            
            # 返回删除的原始文档数量（不是块的数量）
            deleted_count = len(original_docs_to_delete)
            print(f"✅ 已删除 {deleted_count} 个原始文档（{len(chunks_to_delete)} 个向量块）")
            
            return {
                "success": True,
                "deleted_count": deleted_count,
                "message": f"成功删除 {deleted_count} 个文档"
            }
        except Exception as e:
            error_msg = f"❌ 删除文档失败: {str(e)}"
            print(error_msg)
            return {"error": error_msg}

    def get_stats(self) -> Dict:
        """获取知识库统计信息"""
        stats = {
            "kb_enabled": KB_ENABLED,
            "kb_path": self.kb_path,
            "vectorstore_path": self.vectorstore_path,
            "vectorstore_exists": self.vectorstore is not None,
            "chunk_size": self.chunk_size,
            "chunk_overlap": self.chunk_overlap,
        }

        # 添加向量库大小信息
        if os.path.exists(self.vectorstore_path):
            size_mb = self._get_dir_size(self.vectorstore_path)
            stats["vectorstore_size_mb"] = size_mb

        return stats

    @staticmethod
    def _get_dir_size(path: str) -> float:
        """获取目录大小（MB）"""
        total_size = 0
        if os.path.exists(path):
            for entry in os.scandir(path):
                if entry.is_file():
                    total_size += entry.stat().st_size
        return round(total_size / 1024 / 1024, 2)


# 全局知识库实例
_kb_manager = None


def get_kb_manager():
    """获取知识库管理器单例"""
    global _kb_manager
    if _kb_manager is None:
        if KB_ENABLED:
            _kb_manager = KnowledgeBaseManager()
        else:
            print("⚠️  知识库功能已禁用（KB_ENABLED=false）")
    return _kb_manager


# 注：工具已拆分到其他文件
# - search_knowledge_base: 在 tools/knowledge_base_search.py 中

