"""
知识图谱服务模块
提供知识图谱存储和查询功能
"""

import os
from typing import List, Dict, Any, Optional, Tuple
import logging
from datetime import datetime
import traceback
from py2neo import Graph, Node, Relationship, NodeMatcher
from src.config.knowledge_graph_settings import get_knowledge_graph_settings
try:
    from sentence_transformers import SentenceTransformer
    SENTENCE_TRANSFORMERS_AVAILABLE = True
except ImportError:
    SentenceTransformer = None  # 修复：为SentenceTransformer提供默认值
    SENTENCE_TRANSFORMERS_AVAILABLE = False
    logging.warning("Sentence Transformers库未安装，将禁用语义搜索功能")
    
import numpy as np
try:
    import faiss
    FAISS_AVAILABLE = True
except ImportError:
    FAISS_AVAILABLE = False
    logging.warning("FAISS库未安装，将禁用语义搜索功能")

from src.utils.logging import get_logger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.interval import IntervalTrigger
import asyncio
import threading

logger = get_logger(__name__)

class KnowledgeGraphService:
    """知识图谱服务类"""
    
    def __init__(self):
        self.graph = None
        self.embedding_model = None
        self.vector_index = None
        self.node_matcher = None
        self.node_embeddings = []  # 存储节点嵌入
        self.embedding_to_node = {}  # 嵌入索引到节点ID的映射
        self.nlp = None  # spaCy NLP模型
        self.scheduler = None
        self._initialize_components()
    
    def _initialize_components(self):
        """初始化图数据库连接和组件"""
        try:
            # 获取配置
            kg_settings = get_knowledge_graph_settings()
            
            # 连接到Neo4j图数据库
            if kg_settings.neo4j_uri and kg_settings.neo4j_user and kg_settings.neo4j_password:
                self.graph = Graph(
                    uri=kg_settings.neo4j_uri,
                    user=kg_settings.neo4j_user,
                    password=kg_settings.neo4j_password
                )
                self.node_matcher = NodeMatcher(self.graph)
                logger.info("成功连接到Neo4j图数据库")
            else:
                logger.warning("Neo4j配置不完整，跳过图数据库连接")
                self.graph = None
                self.node_matcher = None
        except Exception as e:
            logger.warning(f"无法连接到Neo4j: {e}, 使用内存模式")
            self.graph = None
            self.node_matcher = None
        
        # 初始化嵌入模型
        try:
            if SENTENCE_TRANSFORMERS_AVAILABLE and FAISS_AVAILABLE and SentenceTransformer is not None:
                kg_settings = get_knowledge_graph_settings()
                logger.info(f"正在加载嵌入模型: {kg_settings.embedding_model}")
                self.embedding_model = SentenceTransformer(kg_settings.embedding_model)
                # 初始化FAISS向量索引
                try:
                    import faiss
                    self.vector_index = faiss.IndexFlatL2(kg_settings.faiss_index_dim)
                except (ImportError, NameError):
                    # faiss可能未定义
                    self.vector_index = None
                    logger.warning("FAISS库虽然标记为可用，但无法访问，将禁用语义搜索功能")
                logger.info("嵌入模型和向量索引初始化成功")
            else:
                logger.warning("Sentence Transformers或FAISS不可用，将禁用语义搜索功能")
                self.embedding_model = None
                self.vector_index = None
        except Exception as e:
            logger.warning(f"嵌入模型初始化失败: {e}，将禁用语义搜索功能")
            self.embedding_model = None
            self.vector_index = None

    def auto_build_from_data_source(self, data_source: str, update_interval: int = 3600):
        """
        从数据源自动构建和更新知识图谱
        
        Args:
            data_source: 数据源标识符
            update_interval: 更新间隔（秒）
        """
        try:
            # 初始化调度器
            if not self.scheduler:
                self.scheduler = BackgroundScheduler()
                self.scheduler.start()
            
            # 添加定时任务
            job_id = f"auto_build_{data_source}"
            
            # 检查是否已存在同名任务
            existing_job = self.scheduler.get_job(job_id)
            if existing_job:
                self.scheduler.remove_job(job_id)
            
            # 添加新任务
            self.scheduler.add_job(
                func=self._perform_auto_build,
                trigger=IntervalTrigger(seconds=update_interval),
                id=job_id,
                args=[data_source],
                name=f"自动构建知识图谱 - {data_source}"
            )
            
            logger.info(f"已添加自动构建任务: {job_id}，更新间隔: {update_interval}秒")
        except Exception as e:
            logger.error(f"设置自动构建任务失败: {e}")

    def _perform_auto_build(self, data_source: str):
        """
        执行自动构建任务
        
        Args:
            data_source: 数据源标识符
        """
        try:
            logger.info(f"开始自动构建知识图谱，数据源: {data_source}")
            
            # 根据数据源类型执行不同的构建逻辑
            if data_source == "knowledge_base":
                self._build_from_knowledge_base()
            elif data_source == "web_crawling":
                self._build_from_web_crawling()
            elif data_source == "document_analysis":
                self._build_from_document_analysis()
            else:
                logger.warning(f"未知的数据源类型: {data_source}")
                
            logger.info(f"自动构建知识图谱完成，数据源: {data_source}")
        except Exception as e:
            logger.error(f"自动构建知识图谱失败，数据源: {data_source}, 错误: {e}")

    def _build_from_knowledge_base(self):
        """从知识库构建知识图谱"""
        try:
            # 这里应该从知识库服务获取数据
            # 为演示目的，使用模拟数据
            logger.info("从知识库构建知识图谱")
            
            # 模拟从知识库获取的数据
            sample_data = [
                {
                    "title": "人工智能发展史",
                    "content": "人工智能的发展经历了符号主义、连接主义和行为主义等多个阶段",
                    "category": "technology",
                    "tags": ["AI", "history", "technology"]
                },
                {
                    "title": "机器学习基础",
                    "content": "机器学习是人工智能的一个重要分支，主要包括监督学习、无监督学习和强化学习",
                    "category": "technology",
                    "tags": ["machine learning", "AI", "algorithms"]
                }
            ]
            
            # 提取实体和关系并存储到图谱
            for item in sample_data:
                entities, relationships = self.extract_entities_and_relationships(item["content"])
                self._store_entities_and_relationships(entities, relationships)
                
        except Exception as e:
            logger.error(f"从知识库构建知识图谱失败: {e}")

    def _build_from_web_crawling(self):
        """从网络爬虫数据构建知识图谱"""
        logger.info("从网络爬虫数据构建知识图谱")
        # 实际实现中，这里应该从网络爬虫服务获取数据

    def _build_from_document_analysis(self):
        """从文档分析结果构建知识图谱"""
        logger.info("从文档分析结果构建知识图谱")
        # 实际实现中，这里应该从文档分析服务获取数据

    def incremental_update(self, new_data: List[Dict[str, Any]]) -> bool:
        """
        增量更新知识图谱
        
        Args:
            new_data: 新数据列表
            
        Returns:
            更新是否成功
        """
        try:
            # 提取新数据中的实体和关系
            all_entities = []
            all_relationships = []
            
            for item in new_data:
                entities, relationships = self.extract_entities_and_relationships(item.get("text", ""))
                all_entities.extend(entities)
                all_relationships.extend(relationships)
            
            # 存储到知识图谱（会自动处理重复项）
            self._store_entities_and_relationships(all_entities, all_relationships)
            
            logger.info(f"增量更新完成，新增 {len(all_entities)} 个实体和 {len(all_relationships)} 个关系")
            return True
        except Exception as e:
            logger.error(f"增量更新失败: {e}")
            return False

    def _store_entities_and_relationships(self, entities: List[Dict[str, Any]], relationships: List[Dict[str, Any]]):
        """
        存储实体和关系到知识图谱
        
        Args:
            entities: 实体列表
            relationships: 关系列表
        """
        if not self.graph:
            logger.warning("图数据库未连接，无法存储实体和关系")
            return
            
        # 检查node_matcher是否可用
        if not self.node_matcher:
            logger.warning("NodeMatcher未初始化，无法存储实体和关系")
            return
            
        try:
            # 开始事务
            tx = self.graph.begin()
            
            # 存储实体
            entity_nodes = {}
            for entity in entities:
                name = entity.get("name")
                entity_type = entity.get("type", "Entity")
                
                if name:
                    # 查找或创建实体节点
                    node = None
                    if self.node_matcher:
                        node = self.node_matcher.match(entity_type, name=name).first()
                    if not node:
                        node = Node(entity_type, name=name)
                        # 添加其他属性
                        for key, value in entity.items():
                            if key not in ["name", "type"]:
                                node[key] = value
                        tx.create(node)
                    entity_nodes[name] = node
            
            # 存储关系
            for relationship in relationships:
                source_name = relationship.get("source")
                target_name = relationship.get("target")
                rel_type = relationship.get("type", "RELATED_TO")
                
                if source_name and target_name and source_name in entity_nodes and target_name in entity_nodes:
                    source_node = entity_nodes[source_name]
                    target_node = entity_nodes[target_name]
                    
                    # 检查关系是否已存在
                    existing_rel = self.graph.match((source_node, target_node), rel_type).first()
                    if not existing_rel:
                        rel = Relationship(source_node, rel_type, target_node)
                        # 添加关系属性
                        for key, value in relationship.items():
                            if key not in ["source", "target", "type"]:
                                rel[key] = value
                        tx.create(rel)
            
            # 提交事务
            tx.commit()
            
            # 更新向量索引
            self._update_vector_index()
            
            logger.info(f"成功存储 {len(entities)} 个实体和 {len(relationships)} 个关系")
        except Exception as e:
            logger.error(f"存储实体和关系失败: {e}")

    def _update_vector_index(self):
        """
        更新向量索引
        简单实现：重新构建整个索引
        在生产环境中，可能需要更复杂的增量更新机制
        """
        try:
            # 重新构建索引
            self._build_vector_index()
            
            logger.info("向量索引更新完成")
        except Exception as e:
            logger.error(f"更新向量索引失败: {e}")

    def _build_vector_index(self):
        """构建向量索引"""
        if not self.graph or not self.embedding_model:
            logger.warning("图数据库未连接或嵌入模型未初始化，无法构建向量索引")
            return
        
        try:
            # 获取所有概念节点
            nodes = list(self.graph.nodes.match("Concept"))
            if not nodes:
                logger.info("知识图谱中没有Concept节点，跳过向量索引构建")
                return
            
            # 为每个节点生成嵌入
            node_texts = []
            node_ids = []
            
            for node in nodes:
                # 使用节点名称和描述构建文本
                text = str(node.get("name", ""))
                if node.get("description"):
                    text += ": " + str(node.get("description", ""))
                node_texts.append(text)
                node_ids.append(node.identity)
            
            # 生成嵌入
            if self.embedding_model:
                self.node_embeddings = self.embedding_model.encode(node_texts)
                
                # 构建索引映射
                self.embedding_to_node = {i: node_id for i, node_id in enumerate(node_ids)}
                
                # 添加到FAISS索引
                if len(self.node_embeddings) > 0 and self.vector_index is not None:
                    self.vector_index.reset()
                    embeddings_array = np.array(self.node_embeddings).astype('float32')
                    # 确保是二维数组
                    if embeddings_array.ndim == 1:
                        embeddings_array = np.expand_dims(embeddings_array, axis=0)
                    # 使用getattr避免Pylance错误
                    if hasattr(self.vector_index, 'add'):
                        add_method = getattr(self.vector_index, 'add')
                        add_method(embeddings_array)
                        logger.info(f"成功为 {len(self.node_embeddings)} 个节点构建向量索引")
                    else:
                        logger.warning("向量索引缺少add方法，无法添加节点嵌入")
                else:
                    logger.info("没有节点需要构建向量索引")
        except Exception as e:
            logger.warning(f"构建向量索引时发生错误: {e}")
            traceback.print_exc()

    def semantic_search(self, query: str, limit: int = 5, similarity_threshold: float = 0.5) -> List[Dict[str, Any]]:
        """
        优化的语义搜索方法，增强向量检索能力
        
        Args:
            query: 查询字符串
            limit: 返回结果数量限制
            similarity_threshold: 相似度阈值，低于此值的结果将被过滤
            
        Returns:
            搜索结果列表
        """
        try:
            # 检查是否具备语义搜索条件
            if not self.embedding_model or not self.vector_index or not self.graph:
                logger.warning("语义搜索不可用：缺少必要的组件")
                return []
            
            # 确保向量索引已构建
            if not hasattr(self, 'embedding_to_node') or not self.embedding_to_node:
                self._build_vector_index()
                if not hasattr(self, 'embedding_to_node') or not self.embedding_to_node:
                    logger.warning("无法构建向量索引，语义搜索不可用")
                    return []
            
            # 生成查询嵌入
            query_embedding = self.embedding_model.encode([query])
            query_embedding = np.array(query_embedding).astype('float32')
            
            # 确保是二维数组
            if query_embedding.ndim == 1:
                query_embedding = np.expand_dims(query_embedding, axis=0)
            
            # 验证输入维度是否与索引维度匹配
            if query_embedding.shape[1] != self.vector_index.d:
                raise ValueError(f"查询向量维度 {query_embedding.shape[1]} 与索引维度 {self.vector_index.d} 不匹配")
            
            # 执行搜索，检索更多候选项以提高质量
            limit_int: int = min(limit * 2, self.vector_index.ntotal) if self.vector_index.ntotal > 0 else limit
            if limit_int <= 0:
                return []
                
            # 使用正确的FAISS搜索方法
            distances, indices = self.vector_index.search(query_embedding, k=limit_int)
            
            # 处理搜索结果
            results = []
            processed_count = 0
            
            for i in range(len(indices[0])):
                if processed_count >= limit:
                    break
                    
                idx = indices[0][i]
                distance = float(distances[0][i])
                similarity = 1 / (1 + distance)  # 简单转换为相似度
                
                # 应用相似度阈值过滤
                if similarity < similarity_threshold:
                    continue
                
                # 获取对应的节点
                if idx in self.embedding_to_node:
                    node_id = self.embedding_to_node[idx]
                    node = self.graph.nodes.get(node_id)
                    if node:
                        # 修复: 通过更安全的类型检查解决 Pylance 报错
                        if hasattr(node, 'labels'):
                            # 确保 labels 是可迭代的
                            if isinstance(node.labels, (set, list, tuple)):
                                node_labels = set(node.labels)
                            else:
                                node_labels = {str(node.labels)}
                        else:
                            node_labels = set()
                        node_data = {
                            "id": node_id,
                            "labels": list(node_labels),
                            "properties": dict(node),
                            "similarity": similarity,
                            "distance": distance
                        }
                        results.append(node_data)
                        processed_count += 1
            
            # 按相似度排序
            results.sort(key=lambda x: x["similarity"], reverse=True)
            logger.info(f"语义搜索完成，返回 {len(results)} 个结果")
            return results[:limit]
            
        except Exception as e:
            logger.error(f"语义搜索失败: {e}")
            traceback.print_exc()
            return []

    def query_knowledge(self, query: str, limit: int = 5) -> List[Dict[str, Any]]:
        """
        查询知识图谱中的信息
        
        Args:
            query: 查询字符串
            limit: 返回结果数量限制
            
        Returns:
            查询结果列表
        """
        # 如果有图数据库连接，尝试使用图数据库查询
        if self.graph and self.node_matcher:
            try:
                # 基于关键词的图数据库查询
                nodes = self.node_matcher.match("Concept").where(
                    "_.name =~ $query OR _.description =~ $query", 
                    query=f".*{query}.*"
                ).limit(limit)
                
                results = []
                for node in nodes:
                    # 修复: 通过更安全的类型检查解决 Pylance 报错
                    if hasattr(node, 'labels'):
                        # 确保 labels 是可迭代的
                        if isinstance(node.labels, (set, list, tuple)):
                            node_labels = set(node.labels)
                        else:
                            node_labels = {str(node.labels)}
                    else:
                        node_labels = set()
                    node_data = {
                        "id": node.identity,
                        "labels": list(node_labels),
                        "properties": dict(node)
                    }
                    results.append(node_data)
                
                logger.info(f"知识查询完成，返回 {len(results)} 个结果")
                return results
            except Exception as e:
                logger.error(f"图数据库查询失败: {e}")
        
        # 如果没有图数据库或查询失败，返回空列表
        return []

    def hybrid_search(self, query: str, limit: int = 5) -> List[Dict[str, Any]]:
        """
        混合搜索方法，结合语义搜索和关键词搜索
        
        Args:
            query: 查询字符串
            limit: 返回结果数量限制
            
        Returns:
            搜索结果列表
        """
        # 优先尝试语义搜索
        semantic_results = self.semantic_search(query, limit) if self.embedding_model and self.vector_index else []
        
        # 如果语义搜索有结果，返回这些结果
        if semantic_results:
            logger.info(f"混合搜索完成，语义搜索返回 {len(semantic_results)} 个结果")
            return semantic_results
        
        # 否则回退到普通查询
        query_results = self.query_knowledge(query, limit)
        logger.info(f"混合搜索完成，关键词搜索返回 {len(query_results)} 个结果")
        return query_results

    def extract_entities_and_relationships(self, text: str) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
        """
        从文本中提取实体和关系
        
        Args:
            text: 输入文本
            
        Returns:
            包含实体列表和关系列表的元组
        """
        # 简单实现，仅用于演示
        # 在实际应用中，这里应该使用NLP技术进行实体识别和关系抽取
        
        entities = []
        relationships = []
        
        # 模拟一些实体提取
        if "人工智能" in text:
            entities.append({"name": "人工智能", "type": "Concept", "description": "计算机科学的一个分支"})
        if "机器学习" in text:
            entities.append({"name": "机器学习", "type": "Concept", "description": "人工智能的一个重要分支"})
        if "符号主义" in text:
            entities.append({"name": "符号主义", "type": "Concept", "description": "人工智能发展的一个阶段"})
        if "连接主义" in text:
            entities.append({"name": "连接主义", "type": "Concept", "description": "人工智能发展的一个阶段"})
        if "监督学习" in text:
            entities.append({"name": "监督学习", "type": "Concept", "description": "机器学习的一种方法"})
        if "无监督学习" in text:
            entities.append({"name": "无监督学习", "type": "Concept", "description": "机器学习的一种方法"})
            
        # 模拟一些关系提取
        if "人工智能" in text and "机器学习" in text:
            relationships.append({
                "source": "机器学习", 
                "target": "人工智能", 
                "type": "属于",
                "description": "机器学习是人工智能的一个重要分支"
            })
            
        if "机器学习" in text and "监督学习" in text:
            relationships.append({
                "source": "监督学习", 
                "target": "机器学习", 
                "type": "属于",
                "description": "监督学习是机器学习的一种方法"
            })
            
        if "符号主义" in text and "人工智能" in text:
            relationships.append({
                "source": "符号主义", 
                "target": "人工智能", 
                "type": "发展阶段",
                "description": "符号主义是人工智能发展的一个阶段"
            })
        
        logger.info(f"从文本中提取了 {len(entities)} 个实体和 {len(relationships)} 个关系")
        return entities, relationships

    def store_knowledge(self, text: str) -> bool:
        """
        存储知识到图谱
        
        Args:
            text: 要存储的文本
            
        Returns:
            存储是否成功
        """
        # 简单实现，总是返回成功
        logger.info(f"存储知识: {text}")
        return True

    def infer_new_relationships(self) -> List[Dict[str, Any]]:
        """
        推理新的关系
        
        Returns:
            推理出的关系列表
        """
        # 简单实现，返回空列表
        logger.info("推理新的关系")
        return []

    def cross_domain_knowledge_fusion(self, domains: List[str]) -> bool:
        """
        跨领域知识融合
        
        Args:
            domains: 领域列表
            
        Returns:
            融合是否成功
        """
        # 简单实现，总是返回成功
        logger.info(f"跨领域知识融合: {domains}")
        return True
