import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import networkx as nx
from sklearn.preprocessing import StandardScaler
#异构图卷积神经网络服务 (HGCN)
class HeterogeneousGCN:
    def __init__(self, hidden_dims=[64, 32], learning_rate=0.01, epochs=100):
        self.hidden_dims = hidden_dims
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.model = None
        self.node_to_idx = {}
        self.idx_to_node = {}
        self.type_to_idx = {}
        self.idx_to_type = {}
        self.node_features = {}
        
    class HGCNModel(nn.Module):
        def __init__(self, num_node_types, in_dim, hidden_dims, out_dim):
            super().__init__()
            
            # 为每种节点类型创建一组GCN层
            self.type_specific_layers = nn.ModuleDict()
            for t in range(num_node_types):
                layers = []
                # 第一层
                layers.append(nn.Linear(in_dim, hidden_dims[0]))
                # 中间层
                for i in range(len(hidden_dims)-1):
                    layers.append(nn.Linear(hidden_dims[i], hidden_dims[i+1]))
                # 输出层
                layers.append(nn.Linear(hidden_dims[-1], out_dim))
                
                self.type_specific_layers[str(t)] = nn.ModuleList(layers)
                
            # 用于节点分类的输出层
            self.classifier = nn.Linear(out_dim, out_dim)
                
        def forward(self, features, adj_matrices, node_types):
            num_nodes = features.shape[0]
            x = features
            
            # 对每一层进行消息传递
            for layer_idx in range(len(next(iter(self.type_specific_layers.values())))):
                # 储存每种类型的节点经过此层的结果
                type_outputs = torch.zeros((num_nodes, self.type_specific_layers['0'][layer_idx].out_features))
                type_counts = torch.zeros(num_nodes) + 1e-10  # 避免除零
                
                for t in range(len(adj_matrices)):
                    # 获取此类型的邻接矩阵
                    adj = adj_matrices[t]
                    
                    # 消息传递
                    if adj.sum() > 0:  # 如果有这种类型的边
                        h = torch.mm(adj, x)
                        h = self.type_specific_layers[str(t)][layer_idx](h)
                        h = F.relu(h)
                        
                        # 聚合来自不同类型的消息
                        mask = (adj.sum(1) > 0).float().unsqueeze(1)
                        type_outputs += h * mask
                        type_counts += mask.squeeze()
                
                # 平均聚合
                x = type_outputs / type_counts.unsqueeze(1)
                
                if layer_idx < len(next(iter(self.type_specific_layers.values()))) - 1:
                    x = F.relu(x)
                    x = F.dropout(x, 0.5, training=self.training)
            
            # 节点分类
            node_pred = self.classifier(x)
            return x, node_pred
            
    def _prepare_data(self, graph):
        """准备模型输入数据"""
        # 节点和类型映射
        nodes = list(graph.nodes())
        self.node_to_idx = {node: i for i, node in enumerate(nodes)}
        self.idx_to_node = {i: node for node, i in self.node_to_idx.items()}
        
        # 获取所有节点类型
        node_types = set()
        for node in nodes:
            node_types.add(graph.nodes[node]['type'])
        
        self.type_to_idx = {t: i for i, t in enumerate(node_types)}
        self.idx_to_type = {i: t for t, i in self.type_to_idx.items()}
        
        # 节点类型向量
        node_type_indices = [self.type_to_idx[graph.nodes[node]['type']] for node in nodes]
        
        # 提取节点特征
        features = []
        for node in nodes:
            # 使用节点属性作为特征
            feat = []
            for k, v in graph.nodes[node].items():
                if k != 'type' and isinstance(v, (int, float)):
                    feat.append(v)
            
            # 如果没有数值特征，使用简单的度特征
            if not feat:
                feat = [graph.degree(node)]
                
            features.append(feat)
        
        # 标准化特征
        scaler = StandardScaler()
        if len(features[0]) > 0:  # 确保有特征
            features = scaler.fit_transform(features)
        else:
            # 如果没有特征，使用单位向量
            features = np.ones((len(nodes), 1))
        
        # 为每种边类型创建邻接矩阵
        edge_types = set()
        for u, v, data in graph.edges(data=True):
            edge_types.add(data.get('type', 'default'))
        
        adj_matrices = []
        for edge_type in edge_types:
            adj = np.zeros((len(nodes), len(nodes)))
            for u, v, data in graph.edges(data=True):
                if data.get('type', 'default') == edge_type:
                    u_idx = self.node_to_idx[u]
                    v_idx = self.node_to_idx[v]
                    adj[u_idx][v_idx] = 1
                    adj[v_idx][u_idx] = 1  # 假设边是无向的
            
            # 归一化邻接矩阵
            rowsum = np.array(adj.sum(1))
            # 避免零除错误，为度数为0的节点设置一个安全值
            rowsum_safe = rowsum.copy()
            rowsum_safe[rowsum_safe == 0] = 1.0  # 将为0的元素替换为1
            degree_mat_inv_sqrt = np.diag(np.power(rowsum_safe, -0.5).flatten())
            adj_normalized = adj.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt)
            
            adj_matrices.append(torch.FloatTensor(adj_normalized))
        
        return (
            torch.FloatTensor(features),
            adj_matrices,
            torch.LongTensor(node_type_indices)
        )
    
    def fit(self, graph, labels=None, task='node_classification'):
        """训练模型"""
        # 准备数据
        features, adj_matrices, node_types = self._prepare_data(graph)
        
        # 保存为类的属性，供predict方法使用
        self.features = features
        self.adj_matrices = adj_matrices
        self.node_types = node_types
        
        # 如果没有提供标签，默认使用节点类型作为标签
        if labels is None:
            labels = node_types
        else:
            # 确保标签是张量
            if not isinstance(labels, torch.Tensor):
                labels = torch.LongTensor(labels)
        
        # 初始化模型
        in_dim = features.shape[1]
        out_dim = len(set(node_types.numpy()))
        self.model = self.HGCNModel(
            num_node_types=len(self.type_to_idx),
            in_dim=in_dim,
            hidden_dims=self.hidden_dims,
            out_dim=out_dim
        )
        
        optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
        
        # 训练模型
        self.model.train()
        for epoch in range(self.epochs):
            optimizer.zero_grad()
            
            # 前向传播
            embeddings, node_pred = self.model(features, adj_matrices, node_types)
            
            # 计算损失
            if task == 'node_classification':
                loss = F.cross_entropy(node_pred, labels)
            else:  # 链接预测等其他任务
                # 为了简单起见，这里仅实现节点分类
                loss = F.cross_entropy(node_pred, labels)
            
            # 反向传播
            loss.backward()
            optimizer.step()
            
            if (epoch+1) % 10 == 0:
                print(f"Epoch {epoch+1}/{self.epochs}, Loss: {loss.item():.4f}")
        
        # 保存节点嵌入
        self.model.eval()
        with torch.no_grad():
            embeddings, _ = self.model(features, adj_matrices, node_types)
            embeddings = embeddings.numpy()
            
            for i, node in self.idx_to_node.items():
                self.node_features[node] = embeddings[i]
        
        # 保存任务类型，供predict方法使用
        self.task = task
    
    def get_embeddings(self):
        """获取所有节点的嵌入"""
        return self.node_features
    
    def predict(self, node=None):
        """
        执行节点预测（节点分类或社团检测）
        
        Args:
            node: 要预测的特定节点ID（可选）。如果未指定，则预测所有节点。
            
        Returns:
            如果指定了node参数，返回该节点的预测标签
            如果未指定node参数，返回所有节点的预测标签字典 Dict[str, int]
        """
        if not hasattr(self, 'model') or not self.model:
            raise ValueError("模型尚未训练，无法执行预测")
        
        # 设置为评估模式
        self.model.eval()
        
        # 如果是社团检测任务
        if hasattr(self, 'task') and self.task == 'community_detection':
            with torch.no_grad():
                # 获取节点嵌入
                embeddings, _ = self.model(self.features, self.adj_matrices, self.node_types)
                
                # 使用K-means对嵌入进行聚类
                from sklearn.cluster import KMeans
                
                # 估计社团数量（这里简单地使用节点类型数量作为社团数量）
                n_clusters = len(self.type_to_idx)
                # 如果社团数量过少，至少使用2个
                n_clusters = max(n_clusters, 2)
                
                kmeans = KMeans(n_clusters=n_clusters, random_state=0)
                cluster_labels = kmeans.fit_predict(embeddings.numpy())
                
                # 将聚类结果映射回节点ID
                predictions = {}
                for node_id, idx in self.node_to_idx.items():
                    predictions[node_id] = int(cluster_labels[idx])
                
                # 如果请求特定节点的预测
                if node is not None:
                    if node not in self.node_to_idx:
                        raise ValueError(f"节点 {node} 不在图中")
                    node_idx = self.node_to_idx[node]
                    return cluster_labels[node_idx]
                
                return predictions
        
        # 节点分类任务（原有逻辑）
        else:
            with torch.no_grad():
                # 获取模型预测
                _, logits = self.model(self.features, self.adj_matrices, self.node_types)
                _, predicted = torch.max(logits, 1)
            
            # 如果请求预测特定节点
            if node is not None:
                # 获取节点索引
                if node not in self.node_to_idx:
                    raise ValueError(f"节点 {node} 不在图中")
                node_idx = self.node_to_idx[node]
                return predicted[node_idx].item()
            
            # 预测所有节点
            else:
                predictions = {}
                # 将预测结果映射回节点ID
                for node_id, idx in self.node_to_idx.items():
                    predictions[node_id] = predicted[idx].item()
                
                return predictions 