import torch
import torch.nn as nn
import torch.nn.functional as F

class GraphConvLayer(nn.Module):
    def __init__(self, in_features, out_features,aggregation_type="sum",combination_type="concat",normalize=False,dropout_rate=0.2):
        super(GraphConvLayer, self).__init__()
        self.aggregation_type = aggregation_type
        self.combination_type = combination_type
        self.normalize = normalize
        self.ffn_prepare = FFN(in_features, out_features, dropout_rate)
        self.dropout = nn.Dropout(dropout_rate)
        if self.combination_type == "gated":
            self.update_fn = nn.GRU(out_features, out_features, batch_first=True)
        else:
            self.update_fn = FFN(out_features * 2, out_features, dropout_rate)
    def prepare(self, node_features):
        messages = self.ffn_prepare(node_features)
        return messages
    def aggregate(self, messages, adj_matrix):
        if self.aggregation_type == "mean":
            aggregated_messages = torch.spmm(adj_matrix, messages)
            aggregated_messages = aggregated_messages / adj_matrix.sum(dim=1, keepdim=True)
            # 除0
            aggregated_messages = torch.nan_to_num(aggregated_messages, nan=0.0,posinf=0.0,neginf=0.0,)
        elif self.aggregation_type == "sum":
            aggregated_messages = torch.spmm(adj_matrix, messages)
        else:
            raise ValueError("Unsupported aggregation type")
        return aggregated_messages
    def combine(self, aggregated_messages, node_features):
        if self.combination_type == "concat":
            combined_features = torch.cat([node_features, aggregated_messages], dim=1)
        elif self.combination_type == "gated":
            combined_features = torch.stack([node_features, aggregated_messages], dim=1)
        elif self.combination_type == "add":
            combined_features = node_features + aggregated_messages
        else:
            raise ValueError("Unsupported combination type")
        node_embeddings = self.update_fn(combined_features)
        if self.combination_type == "gated":
            node_embeddings = node_embeddings.squeeze(1)
        if self.normalize:
            node_embeddings = F.normalize(node_embeddings, p=2, dim=1)
        return node_embeddings
    def forward(self, node_features, adj_matrix):
        messages = self.prepare(node_features)
        aggregated_messages = self.aggregate(messages, adj_matrix)
        x = self.combine(aggregated_messages, node_features)
        return x
class FFN(nn.Module):
    def __init__(self, in_features, out_features, dropout_rate=0.2):
        super(FFN, self).__init__()
        self.batch_norm = nn.BatchNorm1d(in_features)
        self.dropout = nn.Dropout(dropout_rate)
        self.linear = nn.Linear(in_features, out_features)
        self.gelu = nn.GELU()
    
    def forward(self, x):
        x = self.batch_norm(x)
        x = self.dropout(x) 
        x = self.linear(x)
        x = self.gelu(x)
        return x
class GCN(nn.Module):
    def __init__(self, in_features, hidden_features, num_classes, dropout_rate=0.2):
        super(GCN, self).__init__()
        self.preprocess = FFN(in_features, hidden_features)
        self.conv1 = GraphConvLayer(hidden_features, hidden_features)
        self.conv2 = GraphConvLayer(hidden_features, hidden_features)
        self.postprecess = FFN(hidden_features, hidden_features)
        self.score = nn.Linear(hidden_features, num_classes)
    def forward(self, node_features, adj_matrix):
        x = self.preprocess(node_features)
        x = self.conv1(x, adj_matrix)
        x = self.conv2(x, adj_matrix)
        x = self.postprecess(x)
        x = self.score(x)
        # x = F.log_softmax(x, dim=1)
        return x.squeeze(1)
