import numpy as np
import networkx as nx
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics.pairwise import cosine_similarity
#基于随机森林的异常边检测服务 (AEDS)
class AnomalousEdgeDetectionService:
    def __init__(self):
        self.edge_features = {}
    
    def extract_edge_features(self, graph):
        """提取边特征"""
        features = []
        edge_ids = []
        
        # 计算全图的平均度和平均权重
        avg_degree = sum(dict(graph.degree()).values()) / graph.number_of_nodes()
        
        for u, v, data in graph.edges(data=True):
            # 节点特征
            u_degree = graph.degree(u)
            v_degree = graph.degree(v)
            common_neighbors = len(list(nx.common_neighbors(graph, u, v)))
            
            # 边特征
            weight = data.get('weight', 1.0)
            timestamp = data.get('timestamp', 0)
            degree_diff = abs(u_degree - v_degree)
            
            # 归一化特征
            normalized_features = [
                u_degree / avg_degree,  # 节点u的相对度
                v_degree / avg_degree,  # 节点v的相对度
                weight,  # 边权重
                common_neighbors / min(u_degree, v_degree) if min(u_degree, v_degree) > 0 else 0,  # 相对共同邻居数
                degree_diff / avg_degree  # 相对度差
            ]
            
            features.append(normalized_features)
            edge_ids.append((u, v))
            
        return np.array(features), edge_ids

    def detect(self, graph, threshold=0.7):
        """检测异常边"""
        X, edge_ids = self.extract_edge_features(graph)
        
        if len(X) == 0:
            return {}
            
        # 计算所有边的相似度矩阵
        similarity_matrix = cosine_similarity(X)
        
        # 对每条边计算异常分数
        anomalous_edges = {}
        for i, (u, v) in enumerate(edge_ids):
            # 基础特征
            weight = graph[u][v].get('weight', 1.0)
            degree_diff = abs(graph.degree(u) - graph.degree(v))
            common_neighbors = len(list(nx.common_neighbors(graph, u, v)))
            
            # 计算相似度异常分数
            similarity_scores = similarity_matrix[i]
            similarity_scores = np.delete(similarity_scores, i)  # 删除自身的相似度
            avg_similarity = np.mean(similarity_scores) if len(similarity_scores) > 0 else 0
            
            # 计算结构异常分数
            max_degree = max(graph.degree(u), graph.degree(v))
            min_degree = min(graph.degree(u), graph.degree(v))
            structural_score = (degree_diff / (max_degree + 1)) * (1 / (common_neighbors + 1))
            
            # 计算权重异常分数
            weight_score = 1 - weight
            
            # 综合异常分数 (加权组合)
            anomaly_score = (
                0.4 * (1 - avg_similarity) +  # 相似度异常
                0.4 * structural_score +      # 结构异常
                0.2 * weight_score           # 权重异常
            )
            
            if anomaly_score > threshold:
                anomalous_edges[(u, v)] = anomaly_score
        
        return anomalous_edges 