"""
知识库管理服务
"""
import os
import json
import uuid
from typing import List, Dict, Optional, Tuple
from pathlib import Path
import chromadb
from chromadb.config import Settings
from sqlalchemy.orm import Session
from loguru import logger

from app.core.config import settings
from app.models.operation import OperationDocument, OperationCategory
from app.services.embedding_service import create_embedding_service


class KnowledgeService:
    """知识库管理服务"""

    _instance = None
    _initialized = False

    def __new__(cls):
        """单例模式实现"""
        if cls._instance is None:
            cls._instance = super().__new__(cls)
        return cls._instance

    def __init__(self):
        # 避免重复初始化
        if KnowledgeService._initialized:
            return

        try:
            self.embedding_service = create_embedding_service()
            if self.embedding_service.is_available():
                logger.info(f"成功初始化豆包嵌入服务: {settings.embedding_model}")
            else:
                logger.warning("豆包嵌入服务不可用")
                self.embedding_service = None
        except Exception as e:
            logger.warning(f"无法初始化嵌入服务: {e}")
            self.embedding_service = None

        try:
            self.chroma_client = chromadb.PersistentClient(
                path=settings.chroma_persist_directory,
                settings=Settings(anonymized_telemetry=False)
            )
            self.collection_name = "operation_documents"
            self._init_collection()
            logger.info("成功初始化ChromaDB")
        except Exception as e:
            logger.warning(f"无法初始化ChromaDB: {e}")
            self.chroma_client = None
            self.collection = None

        # 标记为已初始化
        KnowledgeService._initialized = True
    
    def _init_collection(self):
        """初始化向量数据库集合"""
        try:
            self.collection = self.chroma_client.get_collection(self.collection_name)
            logger.info(f"已连接到现有集合: {self.collection_name}")
        except Exception:
            self.collection = self.chroma_client.create_collection(
                name=self.collection_name,
                metadata={"description": "系统操作文档向量存储"}
            )
            logger.info(f"已创建新集合: {self.collection_name}")
    
    def add_document(self, db: Session, document: OperationDocument) -> str:
        """添加文档到知识库"""
        if not self.embedding_service or not self.collection:
            logger.warning("嵌入服务或向量数据库不可用，跳过文档添加")
            return "unavailable"

        try:
            # 生成文档的向量嵌入
            text_content = f"{document.title}\n{document.description}\n{document.content}"
            embedding = self.embedding_service.encode(text_content)
            if not isinstance(embedding, list):
                embedding = embedding.tolist() if hasattr(embedding, 'tolist') else list(embedding)

            # 生成唯一ID
            doc_id = str(uuid.uuid4())

            # 准备元数据
            metadata = {
                "title": document.title,
                "platform": document.platform or "general",
                "difficulty_level": document.difficulty_level,
                "risk_level": document.risk_level,
                "keywords": document.keywords or "",
                "category_id": document.category_id or 0
            }

            # 添加到向量数据库
            self.collection.add(
                embeddings=[embedding],
                documents=[text_content],
                metadatas=[metadata],
                ids=[doc_id]
            )
            
            # 更新数据库中的embedding_id
            document.embedding_id = doc_id
            db.commit()
            
            logger.info(f"已添加文档到知识库: {document.title}")
            return doc_id
            
        except Exception as e:
            logger.error(f"添加文档到知识库失败: {e}")
            db.rollback()
            raise
    
    def search_documents(
        self,
        query: str,
        platform: Optional[str] = None,
        max_results: int = None
    ) -> List[Dict]:
        """搜索相关文档"""
        if not self.embedding_service or not self.collection:
            logger.warning("嵌入服务或向量数据库不可用，返回空结果")
            return []

        try:
            max_results = max_results or settings.max_search_results

            # 生成查询向量
            query_embedding = self.embedding_service.encode(query)
            if not isinstance(query_embedding, list):
                query_embedding = query_embedding.tolist() if hasattr(query_embedding, 'tolist') else list(query_embedding)

            # 构建过滤条件
            where_filter = {}
            if platform:
                where_filter["platform"] = platform

            # 执行向量搜索，获取更多结果用于后续筛选
            search_results = max_results * 3  # 获取3倍的结果用于筛选
            results = self.collection.query(
                query_embeddings=[query_embedding],
                n_results=search_results,
                where=where_filter if where_filter else None,
                include=["documents", "metadatas", "distances"]
            )

            # 处理搜索结果
            documents = []
            if results["documents"] and results["documents"][0]:
                # 智能提取查询关键词
                query_keywords = self._extract_smart_keywords(query)
                logger.info(f"提取的关键词: {query_keywords}")

                for i, doc in enumerate(results["documents"][0]):
                    distance = results["distances"][0][i]
                    similarity = 1 - distance  # 转换为相似度
                    metadata = results["metadatas"][0][i]

                    # 计算智能关键词匹配度
                    keyword_score, match_details = self._calculate_keyword_score(doc, metadata, query_keywords, query)

                    # 动态调整综合评分权重
                    combined_score = self._calculate_combined_score(similarity, keyword_score, query_keywords, doc)

                    # 使用更智能的筛选条件
                    should_include = self._should_include_document(similarity, keyword_score, combined_score, i)

                    if should_include:
                        documents.append({
                            "content": doc,
                            "metadata": metadata,
                            "similarity": similarity,
                            "keyword_score": keyword_score,
                            "combined_score": combined_score,
                            "match_details": match_details,
                            "id": results["ids"][0][i]
                        })

                # 按综合评分排序
                documents.sort(key=lambda x: x["combined_score"], reverse=True)

                # 限制返回结果数量
                documents = documents[:max_results]

            logger.info(f"搜索查询: '{query}', 找到 {len(documents)} 个相关文档")
            return documents

        except Exception as e:
            logger.error(f"搜索文档失败: {e}")
            return []

    def _extract_smart_keywords(self, query: str) -> set:
        """智能提取查询关键词"""
        import re

        query_lower = query.lower()
        query_keywords = set()

        # 定义停用词（疑问词、助词等）
        stop_words = {
            '如何', '怎么', '怎样', '什么', '哪里', '哪个', '为什么', '是否', '能否',
            '实现', '设置', '操作', '使用', '启动', '开启', '关闭', '配置', '调整',
            '的', '了', '吗', '呢', '吧', '啊', '呀', '嘛', '？', '?', '！', '!',
            'how', 'what', 'where', 'when', 'why', 'can', 'could', 'should', 'would'
        }

        # 1. 提取核心中文词汇（优先保留专业术语）
        chinese_words = re.findall(r'[\u4e00-\u9fff]+', query_lower)
        for word in chinese_words:
            if len(word) >= 2 and word not in stop_words:
                query_keywords.add(word)

                # 对于长词汇，提取可能的专业术语组合
                if len(word) >= 4:
                    # 提取3-4字的子词组合
                    for i in range(len(word) - 2):
                        for j in range(3, min(5, len(word) - i + 1)):
                            sub_word = word[i:i+j]
                            if sub_word not in stop_words:
                                query_keywords.add(sub_word)

        # 2. 提取英文单词
        english_words = re.findall(r'[a-zA-Z]+', query_lower)
        for word in english_words:
            if len(word) >= 2 and word not in stop_words:
                query_keywords.add(word)

        # 3. 特殊处理：如果查询包含专业术语，给予更高权重
        professional_terms = {
            # 银行业务术语
            '还款冲销', '放款冲销', '利息调整', '账务处理', '风险分类',
            '贷款申请', '授信申请', '客户管理', '借据管理', '担保管理',
            '贷款核销', '代偿申请', '还款申请', '放款申请', '额度管理',
            '菜单路径', '操作步骤', '功能描述', '注意事项',
            # 系统操作术语
            '系统管理', '用户管理', '角色管理', '权限管理', '参数管理',
            '报表查询', '数据导出', '审批流程', '工作流程'
        }

        for term in professional_terms:
            if term in query_lower:
                query_keywords.add(term)
                # 同时添加术语的关键部分
                if '冲销' in term:
                    query_keywords.add('冲销')
                if '还款' in term:
                    query_keywords.add('还款')
                if '放款' in term:
                    query_keywords.add('放款')
                if '申请' in term:
                    query_keywords.add('申请')
                if '管理' in term:
                    query_keywords.add('管理')
                if '菜单' in term:
                    query_keywords.add('菜单')
                if '路径' in term:
                    query_keywords.add('路径')

        # 4. 移除过短或无意义的词
        query_keywords = {kw for kw in query_keywords if len(kw) >= 2}

        return query_keywords

    def _calculate_keyword_score(self, doc: str, metadata: Dict, query_keywords: set, original_query: str) -> Tuple[float, Dict]:
        """计算智能关键词匹配度"""
        title = metadata.get("title", "").lower()
        keywords = metadata.get("keywords", "").lower()
        content = doc.lower()

        match_details = {
            "title_matches": [],
            "keyword_matches": [],
            "content_matches": [],
            "exact_matches": []
        }

        total_score = 0
        max_possible_score = len(query_keywords)

        for keyword in query_keywords:
            keyword_score = 0

            # 标题匹配（权重最高）
            if keyword in title:
                keyword_score += 3
                match_details["title_matches"].append(keyword)

            # 关键词字段匹配
            if keyword in keywords:
                keyword_score += 2
                match_details["keyword_matches"].append(keyword)

            # 内容匹配
            if keyword in content:
                keyword_score += 1
                match_details["content_matches"].append(keyword)

            # 精确匹配奖励
            if keyword in original_query.lower() and len(keyword) >= 4:
                keyword_score *= 1.5
                match_details["exact_matches"].append(keyword)

            total_score += min(keyword_score, 3)  # 单个关键词最高3分

        # 归一化分数
        normalized_score = total_score / (max_possible_score * 3) if max_possible_score > 0 else 0

        return min(normalized_score, 1.0), match_details

    def _calculate_combined_score(self, similarity: float, keyword_score: float, query_keywords: set, doc: str) -> float:
        """动态计算综合评分"""
        # 基础权重
        similarity_weight = 0.7
        keyword_weight = 0.3

        # 如果向量相似度很低但关键词匹配度高，增加关键词权重
        if similarity < 0.1 and keyword_score > 0.5:
            similarity_weight = 0.4
            keyword_weight = 0.6

        # 如果查询包含专业术语，进一步调整权重
        professional_terms = ['在线路线规划', '路线规划', '导航', '行程规划']
        has_professional_term = any(term in ' '.join(query_keywords) for term in professional_terms)

        if has_professional_term and keyword_score > 0.3:
            similarity_weight = 0.5
            keyword_weight = 0.5

        combined_score = similarity * similarity_weight + keyword_score * keyword_weight

        # 长度惩罚：过短的文档可能不够详细
        if len(doc) < 100:
            combined_score *= 0.8

        return combined_score

    def _should_include_document(self, similarity: float, keyword_score: float, combined_score: float, index: int) -> bool:
        """智能判断是否包含文档"""
        # 总是包含前5个结果（扩大范围）
        if index < 5:
            return True

        # 有关键词匹配的文档（大幅降低阈值）
        if keyword_score >= 0.1:
            return True

        # 综合评分较高的文档（降低阈值）
        if combined_score >= 0.05:
            return True

        # 中等相似度的文档（降低阈值）
        if similarity >= 0.01:
            return True

        # 有任何关键词匹配的文档
        if keyword_score > 0:
            return True

        return False
    
    def update_document(self, db: Session, document: OperationDocument) -> bool:
        """更新文档"""
        try:
            if not document.embedding_id:
                logger.warning(f"文档 {document.title} 没有embedding_id，无法更新")
                return False
            
            # 重新生成向量嵌入
            text_content = f"{document.title}\n{document.description}\n{document.content}"
            embedding = self.embedding_service.encode(text_content)
            if not isinstance(embedding, list):
                embedding = embedding.tolist() if hasattr(embedding, 'tolist') else list(embedding)
            
            # 更新元数据
            metadata = {
                "title": document.title,
                "platform": document.platform or "general",
                "difficulty_level": document.difficulty_level,
                "risk_level": document.risk_level,
                "keywords": document.keywords or "",
                "category_id": document.category_id or 0
            }
            
            # 更新向量数据库
            self.collection.update(
                ids=[document.embedding_id],
                embeddings=[embedding],
                documents=[text_content],
                metadatas=[metadata]
            )
            
            db.commit()
            logger.info(f"已更新文档: {document.title}")
            return True
            
        except Exception as e:
            logger.error(f"更新文档失败: {e}")
            db.rollback()
            return False
    
    def delete_document(self, db: Session, document: OperationDocument) -> bool:
        """删除文档"""
        try:
            if document.embedding_id:
                self.collection.delete(ids=[document.embedding_id])
                logger.info(f"已从向量数据库删除文档: {document.title}")
            
            db.delete(document)
            db.commit()
            logger.info(f"已删除文档: {document.title}")
            return True
            
        except Exception as e:
            logger.error(f"删除文档失败: {e}")
            db.rollback()
            return False
    
    def load_documents_from_files(self, db: Session, directory: str = None) -> int:
        """从文件加载文档到知识库"""
        directory = directory or settings.knowledge_base_path
        loaded_count = 0
        
        try:
            knowledge_path = Path(directory)
            if not knowledge_path.exists():
                logger.warning(f"知识库目录不存在: {directory}")
                return 0
            
            # 遍历所有JSON文件
            for file_path in knowledge_path.glob("**/*.json"):
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        data = json.load(f)
                    
                    # 处理单个文档或文档列表
                    documents = data if isinstance(data, list) else [data]
                    
                    for doc_data in documents:
                        # 检查文档是否已存在
                        existing = db.query(OperationDocument).filter(
                            OperationDocument.title == doc_data.get("title")
                        ).first()
                        
                        if existing:
                            logger.info(f"文档已存在，跳过: {doc_data.get('title')}")
                            continue
                        
                        # 创建新文档
                        document = OperationDocument(
                            title=doc_data.get("title", ""),
                            description=doc_data.get("description", ""),
                            content=doc_data.get("content", ""),
                            keywords=doc_data.get("keywords", ""),
                            platform=doc_data.get("platform", "general"),
                            difficulty_level=doc_data.get("difficulty_level", 1),
                            risk_level=doc_data.get("risk_level", 1)
                        )
                        
                        db.add(document)
                        db.flush()  # 获取ID
                        
                        # 添加到向量数据库
                        self.add_document(db, document)
                        loaded_count += 1
                        
                except Exception as e:
                    logger.error(f"加载文件失败 {file_path}: {e}")
                    continue
            
            logger.info(f"成功加载 {loaded_count} 个文档到知识库")
            return loaded_count
            
        except Exception as e:
            logger.error(f"加载文档失败: {e}")
            return 0
    
    def get_collection_stats(self) -> Dict:
        """获取集合统计信息"""
        if not self.collection:
            return {
                "total_documents": 0,
                "collection_name": "unavailable",
                "embedding_model": "unavailable",
                "status": "offline"
            }

        try:
            count = self.collection.count()
            return {
                "total_documents": count,
                "collection_name": self.collection_name,
                "embedding_model": f"doubao:{settings.embedding_model}",
                "status": "online"
            }
        except Exception as e:
            logger.error(f"获取集合统计失败: {e}")
            return {"error": str(e), "status": "error"}
