import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv

from model.prediction import Linear


class DynamicGraphConvModel(nn.Module):
    def __init__(self, hidden_size, output_dim, param):
        super(DynamicGraphConvModel, self).__init__()
        self.hidden_size = hidden_size
        self.output_dim = output_dim
        self.num_gcn_layers = param["num_gcn_layers"]
        self.dropout_rate = param["dropout_rate"]
        self.dropout = nn.Dropout(p=self.dropout_rate)
        self.gcn_layers = nn.ModuleList([GCNConv(hidden_size, hidden_size) for _ in range(param["num_gcn_layers"])])
        # 在 GCN 层后添加 Dropout 层
        # self.gcn_layers = nn.ModuleList([nn.Sequential(
        #     GCNConv(hidden_size, hidden_size),
        #     nn.Dropout(p=self.dropout_rate)
        # ) for _ in range(param["num_gcn_layers"])])

        self.mlp = Linear(hidden_size)

    def forward(self, delta_adj, adj_now, feature_h0):
        feature_h1 = self.gcn_forward(delta_adj, feature_h0)
        feature_h2 = self.gcn_2hop_forward(adj_now, feature_h1, feature_h0)
        final_embedding = feature_h2
        # return final_embedding
        pred = self.mlp(final_embedding)
        return pred, final_embedding

    def gcn_forward(self, adj, feature):
        # here the adj is the delta_adj
        H0 = feature
        for gcn_layer in self.gcn_layers:
            H0 = F.relu(gcn_layer(H0, adj))
        return H0

    def gcn_2hop_forward(self, adj, then_embedding_x, original_embedding_x):
        # here the adj is the whole adj of graph at this time
        delta_H = then_embedding_x - original_embedding_x
        H0 = delta_H
        # this is the delta representation we need to propagate to the neighbouring node
        for gcn_layer in self.gcn_layers:
            H0 = F.relu(gcn_layer(H0, adj))
        final_2order_embedding = H0
        return final_2order_embedding

