import networkx as nx
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.ensemble import IsolationForest
from collections import defaultdict
import random

class MultiViewHeterogeneousAnomalyNodeDetection:
    def __init__(self, embedding_dim=64, hidden_dim=32, num_views=3, contamination=0.1, alpha=0.5):
        self.embedding_dim = embedding_dim
        self.hidden_dim = hidden_dim
        self.num_views = num_views
        self.contamination = contamination
        self.alpha = alpha  # 视图融合参数
        
        self.models = {}  # 每种节点类型的异常检测模型
        self.mvgnn_model = None  # 多视图GNN模型
        self.node_embeddings = {}  # 节点嵌入
        self.node_scores = {}  # 节点异常分数
        
        self.temporal_scores = []  # 时序异常分数
        self.view_weights = []  # 视图权重
    
    class MVGNNModel(nn.Module):
        def __init__(self, in_dim, hidden_dim, embedding_dim, num_node_types, num_views):
            super().__init__()
            
            # 每个视图的GNN层
            self.view_layers = nn.ModuleList([
                nn.Sequential(
                    nn.Linear(in_dim, hidden_dim),
                    nn.ReLU(),
                    nn.Linear(hidden_dim, embedding_dim)
                ) for _ in range(num_views)
            ])
            
            # 视图注意力层
            self.view_attention = nn.Sequential(
                nn.Linear(embedding_dim, hidden_dim),
                nn.Tanh(),
                nn.Linear(hidden_dim, 1)
            )
            
            # 节点类型嵌入
            self.type_embeddings = nn.Embedding(num_node_types, embedding_dim)
            
            # 自注意力层，用于节点之间的消息传递
            self.self_attention = nn.MultiheadAttention(embedding_dim, 4, batch_first=True)
            
            # 异常检测层
            self.anomaly_scorer = nn.Sequential(
                nn.Linear(embedding_dim, hidden_dim),
                nn.ReLU(),
                nn.Dropout(0.2),
                nn.Linear(hidden_dim, 1),
                nn.Sigmoid()
            )
        
        def forward(self, features_list, adj_matrices_list, node_types):
            # 对每个视图进行处理
            view_outputs = []
            
            for i, (features, adj_matrices) in enumerate(zip(features_list, adj_matrices_list)):
                # 处理此视图
                x = self.view_layers[i](features)
                
                # 添加节点类型信息
                type_emb = self.type_embeddings(node_types)
                x = x + type_emb
                
                # 消息传递
                for adj in adj_matrices:
                    if adj is not None and adj.sum() > 0:
                        # 自注意力消息传递
                        attn_mask = (adj > 0).float()
                        attended_x, _ = self.self_attention(x, x, x, key_padding_mask=attn_mask)
                        x = x + attended_x
                
                view_outputs.append(x)
            
            # 视图融合
            if len(view_outputs) > 1:
                # 计算视图注意力权重
                view_weights = []
                for view_emb in view_outputs:
                    weight = self.view_attention(view_emb)
                    view_weights.append(weight)
                
                # Softmax归一化权重
                view_weights = torch.cat(view_weights, dim=1)
                view_weights = F.softmax(view_weights, dim=1)
                
                # 加权融合
                fused_embedding = torch.zeros_like(view_outputs[0])
                for i, view_emb in enumerate(view_outputs):
                    fused_embedding += view_emb * view_weights[:, i:i+1]
            else:
                fused_embedding = view_outputs[0]
            
            # 计算异常分数
            anomaly_scores = self.anomaly_scorer(fused_embedding)
            
            return fused_embedding, anomaly_scores, view_weights
    
    def _create_views(self, graph):
        """从异构图创建多个视图"""
        # 视图1: 基于节点类型的邻接关系
        view1 = nx.Graph()
        view1.add_nodes_from(graph.nodes(data=True))
        
        # 只保留连接相同类型节点的边
        for u, v, data in graph.edges(data=True):
            if graph.nodes[u].get('type') == graph.nodes[v].get('type'):
                view1.add_edge(u, v, **data)
        
        # 视图2: 基于节点间的边类型
        view2 = nx.Graph()
        view2.add_nodes_from(graph.nodes(data=True))
        
        # 根据边类型创建边
        for u, v, data in graph.edges(data=True):
            edge_type = data.get('type', 'default')
            if edge_type != 'default':
                view2.add_edge(u, v, **data)
        
        # 视图3: 基于特定属性的边（例如权重）
        view3 = nx.Graph()
        view3.add_nodes_from(graph.nodes(data=True))
        
        # 根据边权重添加边
        threshold = 0.5  # 边权重阈值
        for u, v, data in graph.edges(data=True):
            weight = data.get('weight', 1.0)
            if weight > threshold:
                view3.add_edge(u, v, **data)
        
        return [view1, view2, view3]
    
    def _extract_features(self, graph):
        """提取节点特征"""
        features = {}
        node_types = {}
        
        for node, attr in graph.nodes(data=True):
            # 基本特征
            feat = [
                graph.degree(node),
                nx.clustering(graph, node) if graph.degree(node) > 1 else 0,
                len(list(nx.neighbors(graph, node)))
            ]
            
            # 添加节点属性作为特征
            for k, v in attr.items():
                if k != 'type' and isinstance(v, (int, float)):
                    feat.append(v)
            
            # 填充或截断特征
            if len(feat) < 5:
                feat.extend([0] * (5 - len(feat)))
            elif len(feat) > 5:
                feat = feat[:5]
            
            features[node] = np.array(feat)
            node_types[node] = attr.get('type', 'unknown')
        
        return features, node_types
    
    def _prepare_model_input(self, views, features, node_types):
        """准备模型输入数据"""
        # 节点到索引的映射
        nodes = list(views[0].nodes())
        node_to_idx = {node: i for i, node in enumerate(nodes)}
        
        # 收集所有节点类型
        unique_types = sorted(set(node_types.values()))
        type_to_idx = {t: i for i, t in enumerate(unique_types)}
        
        # 准备每个视图的特征和邻接矩阵
        features_list = []
        adj_matrices_list = []
        
        for view in views:
            # 特征矩阵
            feature_matrix = np.zeros((len(nodes), len(next(iter(features.values())))))
            for node, feat in features.items():
                if node in node_to_idx:
                    feature_matrix[node_to_idx[node]] = feat
            
            # 邻接矩阵
            adj_matrix = np.zeros((len(nodes), len(nodes)))
            for u, v in view.edges():
                if u in node_to_idx and v in node_to_idx:
                    u_idx = node_to_idx[u]
                    v_idx = node_to_idx[v]
                    adj_matrix[u_idx][v_idx] = 1
                    adj_matrix[v_idx][u_idx] = 1
            
            # 标准化特征矩阵和邻接矩阵
            feature_matrix = feature_matrix / (np.max(feature_matrix, axis=0) + 1e-10)
            rowsum = adj_matrix.sum(axis=1)
            degree_mat_inv_sqrt = np.diag(1.0 / np.sqrt(rowsum + 1e-10))
            adj_normalized = degree_mat_inv_sqrt.dot(adj_matrix).dot(degree_mat_inv_sqrt)
            
            features_list.append(torch.FloatTensor(feature_matrix))
            adj_matrices_list.append([torch.FloatTensor(adj_normalized)])
        
        # 节点类型索引
        type_indices = np.zeros(len(nodes), dtype=int)
        for node, t in node_types.items():
            if node in node_to_idx:
                type_indices[node_to_idx[node]] = type_to_idx[t]
        
        return (
            features_list,
            adj_matrices_list,
            torch.LongTensor(type_indices),
            node_to_idx,
            {i: n for n, i in node_to_idx.items()},
            len(unique_types)
        )
    
    def convert_dict_to_graph(self, graph_dict):
        """
        将字典格式的图数据转换为NetworkX图对象
        
        参数:
        graph_dict: dict - 包含nodes和edges的字典
        
        返回:
        G: nx.Graph - NetworkX图对象
        """
        try:
            G = nx.Graph()
            
            # 检查输入格式
            if not isinstance(graph_dict, dict):
                print(f"输入不是字典，而是 {type(graph_dict)}")
                return G
            
            # 添加节点
            if 'nodes' in graph_dict:
                for node_data in graph_dict['nodes']:
                    if not isinstance(node_data, dict):
                        continue
                        
                    node_id = node_data.pop('id', None)
                    if node_id is None:
                        node_id = node_data.pop('node_id', None)
                    
                    if node_id is not None:
                        G.add_node(node_id, **{k: v for k, v in node_data.items() if k != 'id' and k != 'node_id'})
            
            # 添加边
            if 'edges' in graph_dict:
                for edge_data in graph_dict['edges']:
                    if not isinstance(edge_data, dict):
                        continue
                        
                    source = edge_data.pop('source', None)
                    if source is None:
                        source = edge_data.pop('from', None)
                        
                    target = edge_data.pop('target', None)
                    if target is None:
                        target = edge_data.pop('to', None)
                    
                    if source is not None and target is not None:
                        # 确保两个节点都存在
                        if source not in G:
                            G.add_node(source)
                        if target not in G:
                            G.add_node(target)
                        
                        G.add_edge(source, target, **{k: v for k, v in edge_data.items() 
                                                     if k != 'source' and k != 'target' and k != 'from' and k != 'to'})
            
            # 确保图非空
            if len(G.nodes) == 0:
                print("警告：转换后的图没有节点！输入格式可能有问题")
                print(f"输入数据：{graph_dict.keys() if isinstance(graph_dict, dict) else type(graph_dict)}")
            
            return G
        except Exception as e:
            print(f"转换图时出错: {str(e)}")
            # 返回一个空图而不是抛出异常
            return nx.Graph()
    
    def _ensure_graph(self, input_data):
        """
        确保输入是NetworkX图对象，尝试多种方式进行转换
        
        参数:
        input_data: 任何类型的输入数据
        
        返回:
        nx.Graph: 转换后的图对象
        """
        # 如果已经是图对象，直接返回
        if isinstance(input_data, nx.Graph):
            return input_data
        
        # 如果是字典，尝试转换
        if isinstance(input_data, dict):
            # 检查标准格式
            if 'nodes' in input_data and 'edges' in input_data:
                return self.convert_dict_to_graph(input_data)
            
            # 检查其他可能的格式
            if 'data' in input_data and isinstance(input_data['data'], dict):
                if 'nodes' in input_data['data'] and 'edges' in input_data['data']:
                    return self.convert_dict_to_graph(input_data['data'])
                
            # 检查是否为邻接表格式
            has_lists = any(isinstance(v, list) for v in input_data.values())
            if has_lists:
                G = nx.Graph()
                for node, neighbors in input_data.items():
                    if isinstance(neighbors, list):
                        G.add_node(node)
                        for neighbor in neighbors:
                            G.add_edge(node, neighbor)
                return G
            
            # 尝试其他格式...
            print(f"未知的字典格式: {list(input_data.keys())}")
        
        # 如果是字符串，可能是JSON
        if isinstance(input_data, str):
            try:
                import json
                data = json.loads(input_data)
                return self._ensure_graph(data)
            except:
                pass
        
        # 如果是列表，尝试处理
        if isinstance(input_data, list):
            # 可能是边列表
            G = nx.Graph()
            try:
                for item in input_data:
                    if isinstance(item, (list, tuple)) and len(item) >= 2:
                        G.add_edge(item[0], item[1])
                if len(G.nodes) > 0:
                    return G
            except:
                pass
            
            # 可能是节点列表
            try:
                G = nx.Graph()
                for item in input_data:
                    if isinstance(item, dict) and 'id' in item:
                        G.add_node(item['id'], **{k:v for k,v in item.items() if k != 'id'})
                if len(G.nodes) > 0:
                    return G
            except:
                pass
        
        # 无法转换，返回空图并记录错误
        print(f"无法将类型 {type(input_data)} 转换为图")
        return nx.Graph()
    
    def fit(self, graph):
        """训练模型"""
        # 确保输入是图对象
        graph = self._ensure_graph(graph)
        
        # 检查图是否为空
        if len(graph.nodes) == 0:
            print("错误：图没有节点")
            return {}
        
        # 创建多个视图
        views = self._create_views(graph)
        
        # 提取特征
        features, node_types = self._extract_features(graph)
        
        # 准备模型输入
        features_list, adj_matrices_list, type_indices, node_to_idx, idx_to_node, num_node_types = self._prepare_model_input(
            views, features, node_types
        )
        
        # 初始化GNN模型
        self.mvgnn_model = self.MVGNNModel(
            in_dim=features_list[0].shape[1],
            hidden_dim=self.hidden_dim,
            embedding_dim=self.embedding_dim,
            num_node_types=num_node_types,
            num_views=len(views)
        )
        
        # 训练模型
        optimizer = torch.optim.Adam(self.mvgnn_model.parameters(), lr=0.01)
        self.mvgnn_model.train()
        
        for epoch in range(100):
            # 前向传播
            embeddings, anomaly_scores, view_weights = self.mvgnn_model(
                features_list, adj_matrices_list, type_indices
            )
            
            # 使用自监督对比学习损失
            # 正样本：相同类型的节点
            # 负样本：随机节点
            pos_pairs = []
            neg_pairs = []
            
            for i in range(len(type_indices)):
                # 找出相同类型的节点
                same_type_indices = (type_indices == type_indices[i]).nonzero(as_tuple=True)[0]
                
                # 正样本：随机选择一个相同类型的节点
                if len(same_type_indices) > 1:
                    pos_idx = same_type_indices[random.randint(0, len(same_type_indices)-1)]
                    while pos_idx == i:
                        pos_idx = same_type_indices[random.randint(0, len(same_type_indices)-1)]
                    pos_pairs.append((i, pos_idx.item()))
                
                # 负样本：随机选择不同类型的节点
                diff_type_indices = (type_indices != type_indices[i]).nonzero(as_tuple=True)[0]
                if len(diff_type_indices) > 0:
                    neg_idx = diff_type_indices[random.randint(0, len(diff_type_indices)-1)]
                    neg_pairs.append((i, neg_idx.item()))
            
            # 计算对比损失
            contrast_loss = 0
            for i, j in pos_pairs:
                # 正样本距离最小化
                contrast_loss += F.pairwise_distance(embeddings[i:i+1], embeddings[j:j+1])
            
            for i, j in neg_pairs:
                # 负样本距离最大化，使用hinge loss
                distance = F.pairwise_distance(embeddings[i:i+1], embeddings[j:j+1])
                contrast_loss += torch.max(torch.zeros_like(distance), 1.0 - distance)
            
            contrast_loss = contrast_loss / (len(pos_pairs) + len(neg_pairs))
            
            # 重构损失：使用嵌入还原邻接矩阵
            recon_loss = 0
            for i, adj_list in enumerate(adj_matrices_list):
                recon_adj = torch.mm(embeddings, embeddings.t())
                target_adj = adj_list[0] if adj_list else torch.zeros_like(recon_adj)
                recon_loss += F.binary_cross_entropy_with_logits(recon_adj, target_adj)
            
            recon_loss = recon_loss / len(adj_matrices_list)
            
            # 总损失
            loss = contrast_loss + 0.5 * recon_loss
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            if (epoch + 1) % 20 == 0:
                print(f"Epoch {epoch+1}/100, Loss: {loss.item():.4f}")
        
        # 提取学习到的嵌入和异常分数
        self.mvgnn_model.eval()
        with torch.no_grad():
            embeddings, anomaly_scores, view_weights = self.mvgnn_model(
                features_list, adj_matrices_list, type_indices
            )
            
            # 保存节点嵌入
            embeddings = embeddings.numpy()
            anomaly_scores = anomaly_scores.numpy().flatten()
            view_weights = view_weights.numpy()
            
            # 按节点类型分组
            type_to_nodes = defaultdict(list)
            type_to_embeddings = defaultdict(list)
            
            for i, node in idx_to_node.items():
                self.node_embeddings[node] = embeddings[i]
                node_type = node_types[node]
                type_to_nodes[node_type].append(node)
                type_to_embeddings[node_type].append(embeddings[i])
            
            # 为每种节点类型训练一个孤立森林模型
            for node_type, nodes in type_to_nodes.items():
                X = np.array([embeddings[node_to_idx[node]] for node in nodes])
                model = IsolationForest(contamination=self.contamination, random_state=42)
                model.fit(X)
                
                # 计算异常分数 (-1 表示异常，1 表示正常，转换为0-1范围，0表示正常，1表示异常)
                scores = (0.5 - model.decision_function(X) / 2).flatten()
                
                # 保存模型和分数
                self.models[node_type] = model
                for i, node in enumerate(nodes):
                    # 融合GNN和孤立森林的分数
                    gnn_score = anomaly_scores[node_to_idx[node]]
                    if_score = scores[i]
                    combined_score = self.alpha * gnn_score + (1 - self.alpha) * if_score
                    self.node_scores[node] = combined_score
        
        # 保存视图权重
        self.view_weights = view_weights
        
        return self.node_scores
    
    def detect_anomalies(self, graph, threshold=None):
        """检测异常节点"""
        if not self.models:
            print("模型尚未训练")
            return {}
            
        # 确保输入是图对象
        graph = self._ensure_graph(graph)
        
        # 检查图是否为空 
        if len(graph.nodes) == 0:
            print("错误：图没有节点")
            return {}
        
        if threshold is None:
            threshold = 0.7
        
        # 创建多个视图
        views = self._create_views(graph)
        
        # 提取特征
        features, node_types = self._extract_features(graph)
        
        # 准备模型输入
        features_list, adj_matrices_list, type_indices, node_to_idx, idx_to_node, num_node_types = self._prepare_model_input(
            views, features, node_types
        )
        
        # 使用模型计算嵌入和异常分数
        self.mvgnn_model.eval()
        with torch.no_grad():
            embeddings, anomaly_scores, _ = self.mvgnn_model(
                features_list, adj_matrices_list, type_indices
            )
            
            embeddings = embeddings.numpy()
            anomaly_scores = anomaly_scores.numpy().flatten()
            
            # 对每个节点应用相应类型的模型
            anomalous_nodes = {}
            
            for i, node in idx_to_node.items():
                node_type = node_types[node]
                node_emb = embeddings[i].reshape(1, -1)
                
                # 如果有此类型的模型，使用它
                if node_type in self.models:
                    # 孤立森林分数
                    if_score = (0.5 - self.models[node_type].decision_function(node_emb) / 2)[0]
                    
                    # GNN分数
                    gnn_score = anomaly_scores[i]
                    
                    # 融合分数
                    combined_score = self.alpha * gnn_score + (1 - self.alpha) * if_score
                else:
                    # 如果没有此类型的模型，只使用GNN分数
                    combined_score = anomaly_scores[i]
                
                # 高于阈值视为异常
                if combined_score > threshold:
                    anomalous_nodes[node] = float(combined_score)
        
        return anomalous_nodes
    
    def update_with_temporal_data(self, graph_sequence):
        """使用时序数据更新模型"""
        # 对每个图训练模型并记录分数
        temporal_scores = []
        
        for i, graph in enumerate(graph_sequence):
            print(f"处理时间步 {i+1}/{len(graph_sequence)}")
            
            # 确保输入是图对象
            graph = self._ensure_graph(graph)
            
            # 检查图是否为空
            if len(graph.nodes) == 0:
                print(f"警告：时间步 {i+1} 的图没有节点，跳过")
                continue
            
            # 训练或更新模型
            scores = self.fit(graph)
            
            # 记录分数
            temporal_scores.append(scores)
        
        # 记录时序分数
        self.temporal_scores = temporal_scores
        
        # 分析时序异常
        return self._detect_temporal_anomalies()
    
    def _detect_temporal_anomalies(self, change_threshold=0.3):
        """检测时序异常"""
        if len(self.temporal_scores) < 2:
            return {}
        
        # 查找分数变化显著的节点
        temporal_anomalies = {}
        
        # 获取所有时间步的节点
        all_nodes = set()
        for scores in self.temporal_scores:
            all_nodes.update(scores.keys())
        
        for node in all_nodes:
            # 收集此节点在各时间步的分数
            node_scores = []
            for scores in self.temporal_scores:
                if node in scores:
                    node_scores.append(scores[node])
                else:
                    node_scores.append(None)
            
            # 计算分数变化
            changes = []
            for i in range(1, len(node_scores)):
                if node_scores[i] is not None and node_scores[i-1] is not None:
                    change = abs(node_scores[i] - node_scores[i-1])
                    changes.append(change)
            
            # 如果变化超过阈值，标记为时序异常
            if changes and max(changes) > change_threshold:
                max_change_idx = changes.index(max(changes)) + 1
                temporal_anomalies[node] = {
                    'time_step': max_change_idx,
                    'change': max(changes),
                    'score_before': node_scores[max_change_idx-1],
                    'score_after': node_scores[max_change_idx]
                }
        
        return temporal_anomalies
    
    def explain_anomaly(self, node, graph=None):
        """解释节点为何被标记为异常"""
        if node not in self.node_scores:
            return {"error": "未找到该节点"}
        
        # 确保图对象有效
        if graph is not None:
            graph = self._ensure_graph(graph)
        
        # 基本信息
        explanation = {
            "node": node,
            "anomaly_score": float(self.node_scores[node]),
            "is_anomalous": self.node_scores[node] > 0.7
        }
        
        # 如果提供了图，添加更多分析
        if graph and len(graph.nodes) > 0 and node in graph.nodes:
            # 节点类型信息
            try:
                node_type = graph.nodes[node].get('type', 'unknown')
                explanation["node_type"] = node_type
                
                # 节点特征异常性
                node_features, _ = self._extract_features(graph)
                if node in node_features:
                    # 与同类型节点对比
                    same_type_nodes = [n for n, attr in graph.nodes(data=True)
                                      if attr.get('type') == node_type and n != node]
                    
                    if same_type_nodes:
                        # 计算度的异常性
                        node_degree = graph.degree(node)
                        avg_degree = sum(graph.degree(n) for n in same_type_nodes) / len(same_type_nodes)
                        degree_diff = abs(node_degree - avg_degree) / max(avg_degree, 1)
                        
                        # 聚类系数异常性
                        node_clustering = nx.clustering(graph, node) if graph.degree(node) > 1 else 0
                        clusterings = [nx.clustering(graph, n) if graph.degree(n) > 1 else 0 for n in same_type_nodes]
                        avg_clustering = sum(clusterings) / len(clusterings) if clusterings else 0
                        clustering_diff = abs(node_clustering - avg_clustering) / max(avg_clustering, 0.01)
                        
                        # 邻居类型分布异常性
                        node_neighbor_types = {}
                        for neighbor in graph.neighbors(node):
                            n_type = graph.nodes[neighbor].get('type', 'unknown')
                            node_neighbor_types[n_type] = node_neighbor_types.get(n_type, 0) + 1
                        
                        # 计算平均邻居类型分布
                        avg_neighbor_types = defaultdict(float)
                        for n in same_type_nodes:
                            for neighbor in graph.neighbors(n):
                                n_type = graph.nodes[neighbor].get('type', 'unknown')
                                avg_neighbor_types[n_type] += 1
                        
                        for t in avg_neighbor_types:
                            avg_neighbor_types[t] /= len(same_type_nodes)
                        
                        # 邻居类型分布差异
                        type_diffs = {}
                        for t, count in node_neighbor_types.items():
                            if t in avg_neighbor_types:
                                diff = abs(count - avg_neighbor_types[t]) / max(avg_neighbor_types[t], 1)
                                type_diffs[t] = float(diff)
                        
                        # 添加特征异常性解释
                        explanation["feature_anomalies"] = {
                            "degree": {
                                "value": node_degree,
                                "avg_value": float(avg_degree),
                                "difference": float(degree_diff)
                            },
                            "clustering": {
                                "value": float(node_clustering),
                                "avg_value": float(avg_clustering),
                                "difference": float(clustering_diff)
                            },
                            "neighbor_type_distribution": {
                                "value": node_neighbor_types,
                                "avg_value": {k: float(v) for k, v in avg_neighbor_types.items()},
                                "differences": type_diffs
                            }
                        }
            except Exception as e:
                explanation["error_detail"] = f"分析图数据时出错: {str(e)}"
        
        # 如果有时序数据，添加时序异常分析
        if hasattr(self, 'temporal_scores') and self.temporal_scores:
            temporal_info = self._detect_temporal_anomalies().get(node)
            if temporal_info:
                explanation["temporal_anomaly"] = temporal_info
        
        return explanation 