import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GINConv


class GraphMAE2Model(nn.Module):
    def __init__(self,
                 in_dim,
                 hidden_dim=128,
                 out_dim=2,
                 num_layers=6,
                 dropout=0.3,
                 residual=True,
                 use_nfn=True):
        super().__init__()
        self.residual = residual
        self.use_nfn = use_nfn
        self.dropout = dropout
        self.num_layers = num_layers

        if self.use_nfn:
            self.nfn = nn.LayerNorm(in_dim, elementwise_affine=False)
        self.alphas = nn.ParameterList([
            nn.Parameter(torch.zeros(1)) for _ in range(num_layers)
        ])

        self.convs = nn.ModuleList()
        self.bns = nn.ModuleList()
        self.convs.append(self.build_gin_layer(in_dim, hidden_dim))
        self.bns.append(nn.BatchNorm1d(hidden_dim))

        for i in range(1, num_layers - 1):
            self.convs.append(self.build_gin_layer(hidden_dim, hidden_dim))
            self.bns.append(nn.BatchNorm1d(hidden_dim))

        self.convs.append(self.build_gin_layer(hidden_dim, hidden_dim))
        self.bns.append(nn.BatchNorm1d(hidden_dim))
        self.fc = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(p=dropout),
            nn.Linear(hidden_dim // 2, out_dim)
        )

    def build_gin_layer(self, in_dim, out_dim):
        return GINConv(
            nn.Sequential(
                nn.Linear(in_dim, out_dim),
                nn.BatchNorm1d(out_dim),
                nn.ReLU(),
                nn.Linear(out_dim, out_dim)
            ), train_eps=True
        )

    def forward(self, x, edge_index):
        if self.use_nfn:
            x = self.nfn(x)

        h = x
        for i in range(self.num_layers):
            h_prev = h
            h = self.convs[i](h, edge_index)
            h = self.bns[i](h)
            h = F.relu(h)

            if self.residual:
                alpha = torch.sigmoid(self.alphas[i])
                if h_prev.shape[-1] != h.shape[-1]:
                    h_prev = F.linear(h_prev,
                                      torch.eye(h.shape[-1], h_prev.shape[-1]).to(h.device))
                h = alpha * h + (1 - alpha) * h_prev

            dropout_rate = self.dropout
            if self.training:
                dropout_rate = min(0.8, self.dropout * 2)
            h = F.dropout(h, p=dropout_rate, training=self.training)
        return self.fc(h)