"""
知识图谱构建引擎
负责实体链接、关系推理、图数据构建和查询优化
"""

import asyncio
import logging
from typing import List, Dict, Any, Optional, Set, Tuple
from dataclasses import dataclass
from datetime import datetime
import hashlib
import json

from neo4j import AsyncGraphDatabase
from py2neo import Graph, Node, Relationship, Subgraph
import networkx as nx
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer

from src.core.models.graph_schema import (
    VulnerabilityNode, ProductNode, AttackVectorNode,
    MitigationNode, ThreatActorNode, GraphRelation
)
from src.core.config.settings import GRAPH_CONFIG
from src.core.utils.embedding import EmbeddingGenerator
from src.core.utils.ontology import SecurityOntology


@dataclass
class EntityLinkingResult:
    """实体链接结果"""
    entity_id: str
    canonical_name: str
    confidence: float
    aliases: List[str]
    properties: Dict[str, Any]


@dataclass
class GraphStats:
    """图统计信息"""
    node_count: int
    relationship_count: int
    node_types: Dict[str, int]
    relationship_types: Dict[str, int]
    connected_components: int
    average_degree: float


class EntityLinker:
    """实体链接器 - 将抽取的实体链接到标准知识库"""
    
    def __init__(self):
        self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
        self.ontology = SecurityOntology()
        self.logger = logging.getLogger(self.__class__.__name__)
        
        # 预加载标准实体库
        self.standard_entities = self._load_standard_entities()
        self.entity_embeddings = self._compute_entity_embeddings()
    
    def _load_standard_entities(self) -> Dict[str, Dict]:
        """加载标准实体库（CVE、CPE、CWE等）"""
        # 这里应该加载标准的安全实体数据库
        return {
            "vulnerabilities": self._load_cve_database(),
            "products": self._load_cpe_database(),
            "weaknesses": self._load_cwe_database(),
            "attack_patterns": self._load_attack_patterns()
        }
    
    def _load_cve_database(self) -> Dict[str, Dict]:
        """加载CVE数据库"""
        # 模拟CVE数据
        return {
            "CVE-2023-12345": {
                "id": "CVE-2023-12345",
                "description": "Apache HTTP Server path traversal vulnerability",
                "severity": "HIGH",
                "cvss_score": 8.5,
                "aliases": ["Apache Path Traversal"]
            }
        }
    
    def _load_cpe_database(self) -> Dict[str, Dict]:
        """加载CPE（通用平台枚举）数据库"""
        return {
            "cpe:2.3:a:apache:http_server:2.4.50": {
                "id": "cpe:2.3:a:apache:http_server:2.4.50",
                "vendor": "apache",
                "product": "http_server",
                "version": "2.4.50",
                "aliases": ["Apache HTTP Server", "Apache Web Server"]
            }
        }
    
    def _load_cwe_database(self) -> Dict[str, Dict]:
        """加载CWE（通用弱点枚举）数据库"""
        return {
            "CWE-22": {
                "id": "CWE-22",
                "name": "Path Traversal",
                "description": "Improper Limitation of a Pathname to a Restricted Directory",
                "aliases": ["Directory Traversal", "Path Traversal"]
            }
        }
    
    def _load_attack_patterns(self) -> Dict[str, Dict]:
        """加载攻击模式数据库（MITRE ATT&CK）"""
        return {
            "T1083": {
                "id": "T1083",
                "name": "File and Directory Discovery",
                "description": "Adversaries may enumerate files and directories",
                "tactics": ["Discovery"],
                "aliases": ["File Discovery"]
            }
        }
    
    def _compute_entity_embeddings(self) -> Dict[str, np.ndarray]:
        """计算标准实体的向量表示"""
        embeddings = {}
        
        for entity_type, entities in self.standard_entities.items():
            embeddings[entity_type] = {}
            for entity_id, entity_data in entities.items():
                # 构建实体文本描述
                text_parts = [
                    entity_data.get('name', ''),
                    entity_data.get('description', ''),
                    ' '.join(entity_data.get('aliases', []))
                ]
                entity_text = ' '.join(filter(None, text_parts))
                
                # 计算embedding
                embedding = self.embedding_model.encode(entity_text)
                embeddings[entity_type][entity_id] = embedding
        
        return embeddings
    
    async def link_entity(self, entity: Dict[str, Any]) -> EntityLinkingResult:
        """链接单个实体到标准知识库"""
        entity_type = entity.get('type', 'UNKNOWN')
        entity_name = entity.get('name', '')
        entity_properties = entity.get('properties', {})
        
        # 映射实体类型到标准数据库
        type_mapping = {
            'VULNERABILITY': 'vulnerabilities',
            'PRODUCT': 'products',
            'ATTACK_VECTOR': 'attack_patterns',
            'WEAKNESS': 'weaknesses'
        }
        
        standard_type = type_mapping.get(entity_type)
        if not standard_type or standard_type not in self.standard_entities:
            # 无法链接，创建新实体
            return self._create_new_entity(entity)
        
        # 计算查询实体的embedding
        query_text = f"{entity_name} {entity_properties.get('description', '')}"
        query_embedding = self.embedding_model.encode(query_text)
        
        # 计算与标准实体的相似度
        best_match = None
        best_score = 0.0
        
        for entity_id, std_embedding in self.entity_embeddings[standard_type].items():
            similarity = cosine_similarity([query_embedding], [std_embedding])[0][0]
            
            if similarity > best_score:
                best_score = similarity
                best_match = entity_id
        
        # 根据相似度阈值决定是否链接
        threshold = 0.7
        if best_match and best_score >= threshold:
            std_entity = self.standard_entities[standard_type][best_match]
            return EntityLinkingResult(
                entity_id=best_match,
                canonical_name=std_entity.get('name', best_match),
                confidence=best_score,
                aliases=std_entity.get('aliases', []),
                properties=std_entity
            )
        else:
            # 相似度不够，创建新实体
            return self._create_new_entity(entity)
    
    def _create_new_entity(self, entity: Dict[str, Any]) -> EntityLinkingResult:
        """创建新实体"""
        entity_id = self._generate_entity_id(entity)
        
        return EntityLinkingResult(
            entity_id=entity_id,
            canonical_name=entity['name'],
            confidence=1.0,  # 新实体置信度为1
            aliases=[],
            properties=entity.get('properties', {})
        )
    
    def _generate_entity_id(self, entity: Dict[str, Any]) -> str:
        """生成实体ID"""
        entity_str = f"{entity['type']}:{entity['name']}"
        return hashlib.md5(entity_str.encode()).hexdigest()[:16]
    
    async def link_entities_batch(self, entities: List[Dict[str, Any]]) -> List[EntityLinkingResult]:
        """批量链接实体"""
        tasks = [self.link_entity(entity) for entity in entities]
        return await asyncio.gather(*tasks)


class RelationInferencer:
    """关系推理器 - 基于本体和规则推理隐含关系"""
    
    def __init__(self):
        self.ontology = SecurityOntology()
        self.logger = logging.getLogger(self.__class__.__name__)
        
        # 定义推理规则
        self.inference_rules = self._load_inference_rules()
    
    def _load_inference_rules(self) -> List[Dict]:
        """加载推理规则"""
        return [
            {
                "name": "vulnerability_product_transitive",
                "pattern": [
                    ("?vuln", "AFFECTS", "?product1"),
                    ("?product1", "COMPONENT_OF", "?product2")
                ],
                "conclusion": ("?vuln", "AFFECTS", "?product2"),
                "confidence": 0.8
            },
            {
                "name": "vulnerability_cwe_mapping",
                "pattern": [
                    ("?vuln", "HAS_TYPE", "?weakness"),
                    ("?weakness", "PARENT_OF", "?parent_weakness")
                ],
                "conclusion": ("?vuln", "BELONGS_TO", "?parent_weakness"),
                "confidence": 0.9
            },
            {
                "name": "attack_chain_inference",
                "pattern": [
                    ("?vuln", "EXPLOITED_BY", "?attack1"),
                    ("?attack1", "ENABLES", "?attack2")
                ],
                "conclusion": ("?vuln", "LEADS_TO", "?attack2"),
                "confidence": 0.7
            }
        ]
    
    def infer_relations(self, existing_relations: List[Dict], entities: List[Dict]) -> List[Dict]:
        """推理新的关系"""
        inferred_relations = []
        
        # 构建实体和关系索引
        entity_index = {entity['name']: entity for entity in entities}
        relation_index = self._build_relation_index(existing_relations)
        
        # 应用每个推理规则
        for rule in self.inference_rules:
            new_relations = self._apply_rule(rule, relation_index, entity_index)
            inferred_relations.extend(new_relations)
        
        return inferred_relations
    
    def _build_relation_index(self, relations: List[Dict]) -> Dict:
        """构建关系索引"""
        index = {}
        for relation in relations:
            source = relation['source']
            rel_type = relation['relation_type']
            target = relation['target']
            
            if source not in index:
                index[source] = {}
            if rel_type not in index[source]:
                index[source][rel_type] = set()
            
            index[source][rel_type].add(target)
        
        return index
    
    def _apply_rule(self, rule: Dict, relation_index: Dict, entity_index: Dict) -> List[Dict]:
        """应用单个推理规则"""
        inferred = []
        pattern = rule['pattern']
        conclusion = rule['conclusion']
        confidence = rule['confidence']
        
        # 简化的模式匹配实现
        if len(pattern) == 2:
            # 处理两个模式的传递性推理
            pattern1, pattern2 = pattern
            
            for entity1 in entity_index:
                if entity1 in relation_index:
                    # 查找匹配第一个模式的关系
                    rel_type1 = pattern1[1]
                    if rel_type1 in relation_index[entity1]:
                        for entity2 in relation_index[entity1][rel_type1]:
                            if entity2 in relation_index:
                                # 查找匹配第二个模式的关系
                                rel_type2 = pattern2[1]
                                if rel_type2 in relation_index[entity2]:
                                    for entity3 in relation_index[entity2][rel_type2]:
                                        # 生成推理结论
                                        inferred.append({
                                            'source': entity1,
                                            'relation_type': conclusion[1],
                                            'target': entity3,
                                            'confidence': confidence,
                                            'inferred': True,
                                            'rule': rule['name']
                                        })
        
        return inferred


class GraphConstructor:
    """图构造器 - 负责将实体和关系构建成图结构"""
    
    def __init__(self):
        self.driver = AsyncGraphDatabase.driver(
            GRAPH_CONFIG['neo4j']['uri'],
            auth=(GRAPH_CONFIG['neo4j']['username'], GRAPH_CONFIG['neo4j']['password'])
        )
        self.logger = logging.getLogger(self.__class__.__name__)
        
        # 图优化参数
        self.batch_size = 1000
        self.index_properties = {
            'Vulnerability': ['cve_id', 'name'],
            'Product': ['cpe_id', 'name'],
            'AttackVector': ['technique_id', 'name'],
            'Mitigation': ['mitigation_id', 'name'],
            'ThreatActor': ['actor_id', 'name']
        }
    
    async def initialize_graph_schema(self):
        """初始化图数据库架构"""
        async with self.driver.session() as session:
            # 创建约束和索引
            constraints = [
                "CREATE CONSTRAINT vuln_cve_id IF NOT EXISTS FOR (v:Vulnerability) REQUIRE v.cve_id IS UNIQUE",
                "CREATE CONSTRAINT product_cpe_id IF NOT EXISTS FOR (p:Product) REQUIRE p.cpe_id IS UNIQUE",
                "CREATE CONSTRAINT attack_technique_id IF NOT EXISTS FOR (a:AttackVector) REQUIRE a.technique_id IS UNIQUE"
            ]
            
            for constraint in constraints:
                try:
                    await session.run(constraint)
                    self.logger.info(f"创建约束: {constraint}")
                except Exception as e:
                    self.logger.warning(f"约束创建失败: {e}")
            
            # 创建索引
            for node_type, properties in self.index_properties.items():
                for prop in properties:
                    index_query = f"CREATE INDEX {node_type.lower()}_{prop}_index IF NOT EXISTS FOR (n:{node_type}) ON (n.{prop})"
                    try:
                        await session.run(index_query)
                        self.logger.info(f"创建索引: {node_type}.{prop}")
                    except Exception as e:
                        self.logger.warning(f"索引创建失败: {e}")
    
    async def create_nodes(self, linked_entities: List[EntityLinkingResult]):
        """批量创建节点"""
        async with self.driver.session() as session:
            # 按类型分组节点
            nodes_by_type = {}
            for entity in linked_entities:
                node_type = self._get_node_type(entity)
                if node_type not in nodes_by_type:
                    nodes_by_type[node_type] = []
                nodes_by_type[node_type].append(entity)
            
            # 批量创建每种类型的节点
            for node_type, entities in nodes_by_type.items():
                await self._create_nodes_batch(session, node_type, entities)
    
    async def _create_nodes_batch(self, session, node_type: str, entities: List[EntityLinkingResult]):
        """批量创建同类型节点"""
        # 构建Cypher查询
        query = f"""
        UNWIND $entities AS entity
        MERGE (n:{node_type} {{id: entity.entity_id}})
        SET n.name = entity.canonical_name,
            n.confidence = entity.confidence,
            n.aliases = entity.aliases,
            n.created_at = datetime(),
            n.updated_at = datetime()
        """
        
        # 添加特定属性
        property_setters = self._get_property_setters(node_type)
        if property_setters:
            query += ", " + ", ".join(property_setters)
        
        # 分批执行
        for i in range(0, len(entities), self.batch_size):
            batch = entities[i:i + self.batch_size]
            batch_data = [
                {
                    'entity_id': entity.entity_id,
                    'canonical_name': entity.canonical_name,
                    'confidence': entity.confidence,
                    'aliases': entity.aliases,
                    'properties': entity.properties
                }
                for entity in batch
            ]
            
            try:
                await session.run(query, entities=batch_data)
                self.logger.info(f"创建 {len(batch)} 个 {node_type} 节点")
            except Exception as e:
                self.logger.error(f"节点创建失败: {e}")
    
    def _get_node_type(self, entity: EntityLinkingResult) -> str:
        """根据实体确定节点类型"""
        if entity.entity_id.startswith('CVE-'):
            return 'Vulnerability'
        elif entity.entity_id.startswith('cpe:'):
            return 'Product'
        elif entity.entity_id.startswith('T'):
            return 'AttackVector'
        elif entity.entity_id.startswith('CWE-'):
            return 'Weakness'
        else:
            return 'Entity'  # 通用实体类型
    
    def _get_property_setters(self, node_type: str) -> List[str]:
        """获取特定节点类型的属性设置器"""
        setters = {
            'Vulnerability': [
                "n.cve_id = entity.properties.id",
                "n.severity = entity.properties.severity",
                "n.cvss_score = entity.properties.cvss_score",
                "n.description = entity.properties.description"
            ],
            'Product': [
                "n.cpe_id = entity.properties.id",
                "n.vendor = entity.properties.vendor",
                "n.product = entity.properties.product",
                "n.version = entity.properties.version"
            ],
            'AttackVector': [
                "n.technique_id = entity.properties.id",
                "n.tactics = entity.properties.tactics",
                "n.description = entity.properties.description"
            ]
        }
        return setters.get(node_type, [])
    
    async def create_relationships(self, relations: List[Dict]):
        """批量创建关系"""
        async with self.driver.session() as session:
            # 按关系类型分组
            relations_by_type = {}
            for relation in relations:
                rel_type = relation['relation_type']
                if rel_type not in relations_by_type:
                    relations_by_type[rel_type] = []
                relations_by_type[rel_type].append(relation)
            
            # 批量创建每种类型的关系
            for rel_type, rels in relations_by_type.items():
                await self._create_relationships_batch(session, rel_type, rels)
    
    async def _create_relationships_batch(self, session, rel_type: str, relations: List[Dict]):
        """批量创建同类型关系"""
        query = f"""
        UNWIND $relations AS rel
        MATCH (source) WHERE source.name = rel.source OR source.id = rel.source
        MATCH (target) WHERE target.name = rel.target OR target.id = rel.target
        MERGE (source)-[r:{rel_type}]->(target)
        SET r.confidence = rel.confidence,
            r.created_at = datetime(),
            r.inferred = COALESCE(rel.inferred, false),
            r.rule = COALESCE(rel.rule, '')
        """
        
        # 分批执行
        for i in range(0, len(relations), self.batch_size):
            batch = relations[i:i + self.batch_size]
            
            try:
                await session.run(query, relations=batch)
                self.logger.info(f"创建 {len(batch)} 个 {rel_type} 关系")
            except Exception as e:
                self.logger.error(f"关系创建失败: {e}")
    
    async def optimize_graph(self):
        """优化图数据库性能"""
        async with self.driver.session() as session:
            # 更新图统计信息
            await session.run("CALL db.stats.retrieve('GRAPH')")
            
            # 清理重复关系
            cleanup_query = """
            MATCH (a)-[r1:AFFECTS]->(b)
            MATCH (a)-[r2:AFFECTS]->(b)
            WHERE id(r1) < id(r2)
            DELETE r2
            """
            await session.run(cleanup_query)
            
            self.logger.info("图优化完成")
    
    async def get_graph_stats(self) -> GraphStats:
        """获取图统计信息"""
        async with self.driver.session() as session:
            # 获取节点统计
            node_stats_query = """
            MATCH (n)
            RETURN labels(n)[0] as node_type, count(n) as count
            """
            node_result = await session.run(node_stats_query)
            node_types = {record['node_type']: record['count'] async for record in node_result}
            
            # 获取关系统计
            rel_stats_query = """
            MATCH ()-[r]->()
            RETURN type(r) as rel_type, count(r) as count
            """
            rel_result = await session.run(rel_stats_query)
            relationship_types = {record['rel_type']: record['count'] async for record in rel_result}
            
            # 获取总计数
            total_nodes = sum(node_types.values())
            total_relationships = sum(relationship_types.values())
            
            # 获取连通分量数
            components_query = "CALL gds.wcc.stats('myGraph') YIELD componentCount"
            try:
                components_result = await session.run(components_query)
                components = [record['componentCount'] async for record in components_result][0]
            except:
                components = 0
            
            # 计算平均度
            avg_degree = (total_relationships * 2) / total_nodes if total_nodes > 0 else 0
            
            return GraphStats(
                node_count=total_nodes,
                relationship_count=total_relationships,
                node_types=node_types,
                relationship_types=relationship_types,
                connected_components=components,
                average_degree=avg_degree
            )


class KnowledgeGraphBuilder:
    """知识图谱构建器 - 协调整个构建流程"""
    
    def __init__(self):
        self.entity_linker = EntityLinker()
        self.relation_inferencer = RelationInferencer()
        self.graph_constructor = GraphConstructor()
        self.logger = logging.getLogger(self.__class__.__name__)
    
    async def build_graph(self, entities: List[Dict], relations: List[Dict]) -> GraphStats:
        """构建知识图谱"""
        start_time = datetime.now()
        self.logger.info(f"开始构建知识图谱，实体数: {len(entities)}, 关系数: {len(relations)}")
        
        try:
            # 1. 初始化图架构
            await self.graph_constructor.initialize_graph_schema()
            
            # 2. 实体链接
            self.logger.info("开始实体链接...")
            linked_entities = await self.entity_linker.link_entities_batch(entities)
            self.logger.info(f"实体链接完成，链接实体数: {len(linked_entities)}")
            
            # 3. 关系推理
            self.logger.info("开始关系推理...")
            inferred_relations = self.relation_inferencer.infer_relations(relations, entities)
            all_relations = relations + inferred_relations
            self.logger.info(f"关系推理完成，总关系数: {len(all_relations)}")
            
            # 4. 创建节点
            self.logger.info("开始创建图节点...")
            await self.graph_constructor.create_nodes(linked_entities)
            
            # 5. 创建关系
            self.logger.info("开始创建图关系...")
            await self.graph_constructor.create_relationships(all_relations)
            
            # 6. 图优化
            self.logger.info("开始图优化...")
            await self.graph_constructor.optimize_graph()
            
            # 7. 获取统计信息
            stats = await self.graph_constructor.get_graph_stats()
            
            build_time = (datetime.now() - start_time).total_seconds()
            self.logger.info(f"知识图谱构建完成，耗时: {build_time:.2f}秒")
            self.logger.info(f"图统计 - 节点: {stats.node_count}, 关系: {stats.relationship_count}")
            
            return stats
            
        except Exception as e:
            self.logger.error(f"知识图谱构建失败: {e}")
            raise
    
    async def update_graph(self, new_entities: List[Dict], new_relations: List[Dict]):
        """增量更新知识图谱"""
        self.logger.info("开始增量更新知识图谱...")
        
        # 过滤已存在的实体和关系
        filtered_entities = await self._filter_existing_entities(new_entities)
        filtered_relations = await self._filter_existing_relations(new_relations)
        
        if filtered_entities or filtered_relations:
            await self.build_graph(filtered_entities, filtered_relations)
            self.logger.info("增量更新完成")
        else:
            self.logger.info("没有新的实体或关系需要更新")
    
    async def _filter_existing_entities(self, entities: List[Dict]) -> List[Dict]:
        """过滤已存在的实体"""
        # 简化实现，实际应该查询数据库
        return entities
    
    async def _filter_existing_relations(self, relations: List[Dict]) -> List[Dict]:
        """过滤已存在的关系"""
        # 简化实现，实际应该查询数据库
        return relations


# 使用示例
async def main():
    """知识图谱构建示例"""
    # 模拟提取的实体和关系
    entities = [
        {
            "type": "VULNERABILITY",
            "name": "CVE-2023-12345",
            "properties": {
                "description": "Apache HTTP Server path traversal vulnerability",
                "severity": "HIGH",
                "cvss_score": 8.5
            }
        },
        {
            "type": "PRODUCT",
            "name": "Apache HTTP Server",
            "properties": {
                "vendor": "Apache",
                "version": "2.4.50"
            }
        }
    ]
    
    relations = [
        {
            "source": "CVE-2023-12345",
            "target": "Apache HTTP Server",
            "relation_type": "AFFECTS",
            "confidence": 0.95
        }
    ]
    
    # 构建知识图谱
    builder = KnowledgeGraphBuilder()
    stats = await builder.build_graph(entities, relations)
    
    print(f"构建完成的知识图谱统计:")
    print(f"- 节点数: {stats.node_count}")
    print(f"- 关系数: {stats.relationship_count}")
    print(f"- 平均度: {stats.average_degree:.2f}")


if __name__ == '__main__':
    asyncio.run(main()) 