import numpy as np
import networkx as nx
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import StandardScaler
from collections import defaultdict
#动态异构图异常事件检测服务 (DHGED)
class DynamicHeterogeneousGraphEventDetection:
    def __init__(self, embedding_dim=64, hidden_dim=32, window_size=5, anomaly_threshold=0.9):
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.window_size = window_size
        self.anomaly_threshold = anomaly_threshold
        
        self.model = None
        self.node_embeddings = {}
        self.event_scores = {}
        self.event_patterns = {}
        self.baseline_patterns = {}
    
    class EventDetectionModel(nn.Module):
        def __init__(self, in_dim, hidden_dim, embedding_dim, num_node_types, num_edge_types):
            super().__init__()
            
            # 节点特征编码器
            self.node_encoder = nn.Sequential(
                nn.Linear(in_dim, hidden_dim),
                nn.ReLU(),
                nn.Linear(hidden_dim, embedding_dim)
            )
            
            # 边特征编码器
            self.edge_encoder = nn.Sequential(
                nn.Linear(in_dim, hidden_dim),
                nn.ReLU(),
                nn.Linear(hidden_dim, embedding_dim)
            )
            
            # 图级特征编码器
            self.graph_encoder = nn.Sequential(
                nn.Linear(embedding_dim*2, hidden_dim),
                nn.ReLU(),
                nn.Linear(hidden_dim, embedding_dim)
            )
            
            # 时间序列编码器(GRU)
            self.time_encoder = nn.GRU(embedding_dim, embedding_dim, batch_first=True)
            
            # 事件异常检测器
            self.anomaly_detector = nn.Sequential(
                nn.Linear(embedding_dim*2, hidden_dim),
                nn.ReLU(),
                nn.Dropout(0.2),
                nn.Linear(hidden_dim, 1),
                nn.Sigmoid()
            )
            
            # 类型嵌入
            self.node_type_embeddings = nn.Embedding(num_node_types, embedding_dim)
            self.edge_type_embeddings = nn.Embedding(num_edge_types, embedding_dim)
            
        def forward(self, node_features, edge_features, graph_features, node_types, edge_types, time_seqs=None):
            # 节点特征编码
            node_emb = self.node_encoder(node_features)
            
            # 边特征编码
            edge_emb = self.edge_encoder(edge_features)
            
            # 添加类型信息
            if node_types is not None:
                node_type_emb = self.node_type_embeddings(node_types)
                node_emb = node_emb + node_type_emb
                
            if edge_types is not None:
                edge_type_emb = self.edge_type_embeddings(edge_types)
                edge_emb = edge_emb + edge_type_emb
            
            # 图级特征编码
            combined_features = torch.cat([
                torch.mean(node_emb, dim=0, keepdim=True),
                torch.mean(edge_emb, dim=0, keepdim=True)
            ], dim=1)
            graph_emb = self.graph_encoder(combined_features)
            
            # 时间序列编码
            if time_seqs is not None:
                batch_size = time_seqs.size(0)
                time_seqs = time_seqs.view(batch_size, -1, self.embedding_dim)
                time_output, _ = self.time_encoder(time_seqs)
                time_emb = time_output[:, -1, :]  # 取最后一个时间步
            else:
                time_emb = graph_emb
            
            # 异常检测
            anomaly_input = torch.cat([graph_emb, time_emb], dim=1)
            anomaly_score = self.anomaly_detector(anomaly_input)
            
            return anomaly_score, graph_emb
    
    def _extract_features(self, graph):
        """从图中提取特征"""
        # 节点特征
        node_features = []
        node_types = []
        node_mapping = {}
        
        for i, (node, attr) in enumerate(graph.nodes(data=True)):
            node_mapping[node] = i
            
            # 基本特征
            feat = [
                graph.degree(node),  # 度
                nx.clustering(graph, node),  # 聚类系数
                len(list(nx.all_neighbors(graph, node)))  # 邻居数
            ]
            
            # 添加节点属性作为特征
            for k, v in attr.items():
                if k != 'type' and isinstance(v, (int, float)):
                    feat.append(v)
            
            # 如果特征太少，添加一些零填充
            while len(feat) < 5:
                feat.append(0)
                
            node_features.append(feat)
            node_types.append(self.type_to_idx.get(attr.get('type', 'unknown'), 0))
        
        # 边特征
        edge_features = []
        edge_types = []
        
        for u, v, attr in graph.edges(data=True):
            # 基本特征
            feat = [
                attr.get('weight', 1.0),
                len(list(nx.common_neighbors(graph, u, v))),
                abs(graph.degree(u) - graph.degree(v))
            ]
            
            # 添加边属性
            for k, v in attr.items():
                if k != 'type' and isinstance(v, (int, float)):
                    feat.append(v)
            
            # 如果特征太少，添加零填充
            while len(feat) < 5:
                feat.append(0)
                
            edge_features.append(feat)
            edge_types.append(self.edge_type_to_idx.get(attr.get('type', 'default'), 0))
        
        # 图级特征
        graph_features = [
            graph.number_of_nodes(),
            graph.number_of_edges(),
            nx.density(graph),
            len(list(nx.isolates(graph))),
            nx.number_connected_components(graph)
        ]
        
        # 标准化特征
        scaler = StandardScaler()
        if node_features:
            node_features = scaler.fit_transform(node_features)
        if edge_features:
            edge_features = scaler.fit_transform(edge_features)
        if graph_features:
            graph_features = scaler.fit_transform([graph_features])[0]
        
        return (
            torch.FloatTensor(node_features) if node_features else torch.FloatTensor(),
            torch.FloatTensor(edge_features) if edge_features else torch.FloatTensor(),
            torch.FloatTensor(graph_features) if graph_features else torch.FloatTensor(),
            torch.LongTensor(node_types) if node_types else torch.LongTensor(),
            torch.LongTensor(edge_types) if edge_types else torch.LongTensor(),
            node_mapping
        )
    
    def _prepare_data(self, graph_sequence):
        """准备模型输入数据"""
        # 收集节点和边的类型
        node_types = set()
        edge_types = set()
        
        for g in graph_sequence:
            for _, attr in g.nodes(data=True):
                node_types.add(attr.get('type', 'unknown'))
            
            for _, _, attr in g.edges(data=True):
                edge_types.add(attr.get('type', 'default'))
        
        # 建立类型到索引的映射
        self.type_to_idx = {t: i for i, t in enumerate(node_types)}
        self.edge_type_to_idx = {t: i for i, t in enumerate(edge_types)}
        
        # 提取每个图的特征
        all_features = []
        
        for g in graph_sequence:
            features = self._extract_features(g)
            all_features.append(features)
        
        return all_features
    
    def _extract_event_patterns(self, graph_sequence, all_features):
        """提取图序列中的事件模式"""
        event_patterns = []
        
        # 每个窗口分析变化
        for i in range(len(graph_sequence) - self.window_size + 1):
            window_graphs = graph_sequence[i:i+self.window_size]
            window_features = all_features[i:i+self.window_size]
            
            # 分析窗口内的事件
            events = self._detect_window_events(window_graphs, window_features)
            if events:
                for event in events:
                    event_patterns.append({
                        'time_idx': i + self.window_size - 1,
                        'pattern': event['pattern'],
                        'affected_nodes': event['nodes'],
                        'affected_edges': event['edges'],
                        'score': event['score']
                    })
        
        return event_patterns
    
    def _detect_window_events(self, window_graphs, window_features):
        """检测窗口内的事件"""
        events = []
        
        # 计算窗口内的变化
        # 简化：这里仅检测节点和边的增减
        node_changes = {}
        edge_changes = {}
        
        for t in range(1, len(window_graphs)):
            prev_g = window_graphs[t-1]
            curr_g = window_graphs[t]
            
            # 节点变化
            prev_nodes = set(prev_g.nodes())
            curr_nodes = set(curr_g.nodes())
            added_nodes = curr_nodes - prev_nodes
            removed_nodes = prev_nodes - curr_nodes
            
            # 边变化
            prev_edges = set(prev_g.edges())
            curr_edges = set(curr_g.edges())
            added_edges = curr_edges - prev_edges
            removed_edges = prev_edges - curr_edges
            
            # 记录变化
            if added_nodes or removed_nodes:
                node_changes[t] = {
                    'added': added_nodes,
                    'removed': removed_nodes
                }
            
            if added_edges or removed_edges:
                edge_changes[t] = {
                    'added': added_edges,
                    'removed': removed_edges
                }
        
        # 分析变化模式
        if node_changes or edge_changes:
            # 提取事件模式
            patterns = self._extract_change_patterns(window_graphs, node_changes, edge_changes)
            
            for pattern in patterns:
                if pattern['score'] > self.anomaly_threshold:
                    events.append(pattern)
        
        return events
    
    def _extract_change_patterns(self, window_graphs, node_changes, edge_changes):
        """提取变化模式"""
        patterns = []
        
        # 检查节点类型变化
        node_type_changes = defaultdict(int)
        affected_nodes = set()
        
        for t, changes in node_changes.items():
            curr_graph = window_graphs[t]
            
            # 分析增加的节点类型
            for node in changes['added']:
                if node in curr_graph.nodes():
                    node_type = curr_graph.nodes[node].get('type', 'unknown')
                    node_type_changes[('add', node_type)] += 1
                    affected_nodes.add(node)
            
            # 分析删除的节点类型
            for node in changes['removed']:
                if node in window_graphs[t-1].nodes():
                    node_type = window_graphs[t-1].nodes[node].get('type', 'unknown')
                    node_type_changes[('remove', node_type)] += 1
                    affected_nodes.add(node)
        
        # 检查边类型变化
        edge_type_changes = defaultdict(int)
        affected_edges = set()
        
        for t, changes in edge_changes.items():
            curr_graph = window_graphs[t]
            prev_graph = window_graphs[t-1]
            
            # 分析增加的边类型
            for u, v in changes['added']:
                if curr_graph.has_edge(u, v):
                    edge_type = curr_graph.edges[u, v].get('type', 'default')
                    if u in curr_graph.nodes() and v in curr_graph.nodes():
                        u_type = curr_graph.nodes[u].get('type', 'unknown')
                        v_type = curr_graph.nodes[v].get('type', 'unknown')
                        edge_type_changes[('add', u_type, edge_type, v_type)] += 1
                        affected_edges.add((u, v))
            
            # 分析删除的边类型
            for u, v in changes['removed']:
                if prev_graph.has_edge(u, v):
                    edge_type = prev_graph.edges[u, v].get('type', 'default')
                    if u in prev_graph.nodes() and v in prev_graph.nodes():
                        u_type = prev_graph.nodes[u].get('type', 'unknown')
                        v_type = prev_graph.nodes[v].get('type', 'unknown')
                        edge_type_changes[('remove', u_type, edge_type, v_type)] += 1
                        affected_edges.add((u, v))
        
        # 创建变化模式
        if node_type_changes or edge_type_changes:
            # 计算异常分数 (简化版)
            # 实际应用中可以使用更复杂的评分方法
            total_changes = sum(node_type_changes.values()) + sum(edge_type_changes.values())
            avg_window_size = sum(len(g) for g in window_graphs) / len(window_graphs)
            
            # 变化率作为基础异常分数
            base_score = min(1.0, total_changes / (avg_window_size + 1))
            
            # 添加模式
            patterns.append({
                'pattern': {
                    'node_changes': dict(node_type_changes),
                    'edge_changes': dict(edge_type_changes)
                },
                'nodes': list(affected_nodes),
                'edges': list(affected_edges),
                'score': base_score,
                'total_changes': total_changes
            })
        
        return patterns
    
    def fit(self, graph_sequence):
        """训练模型并检测事件"""
        if len(graph_sequence) < self.window_size:
            print(f"图序列长度 ({len(graph_sequence)}) 小于窗口大小 ({self.window_size})")
            return []
        
        # 准备数据
        all_features = self._prepare_data(graph_sequence)
        
        # 提取事件模式
        self.event_patterns = self._extract_event_patterns(graph_sequence, all_features)
        
        # 如果有足够的数据，训练模型
        if len(graph_sequence) >= 2 * self.window_size:
            # 创建训练数据
            train_graphs = []
            train_labels = []
            
            # 使用前半部分作为基线
            baseline_window = graph_sequence[:self.window_size]
            baseline_features = all_features[:self.window_size]
            self.baseline_patterns = self._extract_event_patterns(baseline_window, baseline_features)
            
            # 为每个时间步创建训练样本
            for i in range(self.window_size, len(graph_sequence)):
                window_graphs = graph_sequence[i-self.window_size:i]
                window_features = all_features[i-self.window_size:i]
                
                # 提取此窗口的事件
                events = self._detect_window_events(window_graphs, window_features)
                
                # 如果有事件，标记为异常(1)，否则为正常(0)
                label = 1.0 if any(e['score'] > self.anomaly_threshold for e in events) else 0.0
                
                train_graphs.append(window_graphs[-1])  # 使用窗口的最后一个图
                train_labels.append(label)
            
            # 初始化模型
            in_dim = all_features[0][0].size(1) if len(all_features[0][0]) > 0 else 5
            self.model = self.EventDetectionModel(
                in_dim=in_dim,
                hidden_dim=self.hidden_dim,
                embedding_dim=self.embedding_dim,
                num_node_types=len(self.type_to_idx),
                num_edge_types=len(self.edge_type_to_idx)
            )
            
            # 训练模型 (简化版)
            # 实际应用需要更完整的训练流程
            optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
            criterion = nn.BCELoss()
            
            for epoch in range(50):  # 简化的训练循环
                epoch_loss = 0
                for i, g in enumerate(train_graphs):
                    # 提取特征
                    node_feat, edge_feat, graph_feat, node_types, edge_types, _ = self._extract_features(g)
                    
                    # 前向传播
                    anomaly_score, _ = self.model(
                        node_feat, edge_feat, graph_feat, node_types, edge_types
                    )
                    
                    # 计算损失
                    target = torch.FloatTensor([train_labels[i]])
                    loss = criterion(anomaly_score, target)
                    
                    # 反向传播
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    
                    epoch_loss += loss.item()
                
                if (epoch + 1) % 10 == 0:
                    print(f"Epoch {epoch+1}/50, Loss: {epoch_loss/len(train_graphs):.4f}")
        
        return self.event_patterns
    
    def detect_events(self, new_graph, previous_graphs=None):
        """检测新图中的事件"""
        if not previous_graphs:
            if not hasattr(self, 'previous_graphs'):
                self.previous_graphs = []
            previous_graphs = self.previous_graphs
        
        # 确保有足够的历史图
        if len(previous_graphs) < self.window_size - 1:
            previous_graphs.append(new_graph)
            self.previous_graphs = previous_graphs
            return []
        
        # 创建当前窗口
        window_graphs = previous_graphs[-(self.window_size-1):] + [new_graph]
        
        # 提取特征
        window_features = []
        for g in window_graphs:
            features = self._extract_features(g)
            window_features.append(features)
        
        # 检测窗口内的事件
        events = self._detect_window_events(window_graphs, window_features)
        
        # 如果模型已训练，使用模型进行预测
        if self.model is not None:
            node_feat, edge_feat, graph_feat, node_types, edge_types, node_mapping = self._extract_features(new_graph)
            
            with torch.no_grad():
                anomaly_score, graph_emb = self.model(
                    node_feat, edge_feat, graph_feat, node_types, edge_types
                )
                
                # 更新事件分数
                models_score = anomaly_score.item()
                
                for event in events:
                    # 结合基于规则和模型的分数
                    combined_score = 0.7 * event['score'] + 0.3 * models_score
                    event['models_score'] = models_score
                    event['combined_score'] = combined_score
                    
                    # 更新异常标记
                    event['is_anomaly'] = combined_score > self.anomaly_threshold
        
        # 更新历史图
        previous_graphs.append(new_graph)
        if len(previous_graphs) > self.window_size:
            previous_graphs = previous_graphs[-self.window_size:]
        self.previous_graphs = previous_graphs
        
        return events
    
    def explain_event(self, event):
        """解释检测到的事件"""
        explanation = {
            'summary': f"在时间点 {event['time_idx']} 检测到异常事件，异常分数为 {event['score']:.4f}",
            'affected_entities': {
                'nodes': len(event['affected_nodes']),
                'edges': len(event['affected_edges'])
            },
            'changes': {
                'node_changes': {},
                'edge_changes': {}
            }
        }
        
        # 解释节点变化
        for change_type, count in event['pattern']['node_changes'].items():
            operation, node_type = change_type
            explanation['changes']['node_changes'][f"{operation} {node_type}"] = count
        
        # 解释边变化
        for change_type, count in event['pattern']['edge_changes'].items():
            if len(change_type) == 4:  # ('add/remove', source_type, edge_type, target_type)
                operation, src_type, edge_type, tgt_type = change_type
                explanation['changes']['edge_changes'][f"{operation} {src_type}-[{edge_type}]->{tgt_type}"] = count
        
        return explanation 