import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import (
    add_self_loops,
    remove_self_loops,
)

from gat_rse_conv import GATwithRSE
from gatv2_rse_conv import GATv2withRSE


class ArchiteCADNet(torch.nn.Module):
    def __init__(self,
                 *,
                 node_features_dim=128,
                 edge_features_dim=128,
                 gat_stages=4,
                 heads=8,
                 out_channels=9,
                 dropout_rate=0.2,
                 gat_version='v2',
                 use_cuda=True):
        '''
        Initialize ArchiteCADNet architecture.
        
        Args:
            node_features_dim (int): Embedded node feature dimensions
            edge_features_dim (int): Embedded edge feature dimensions
            gat_stages (int): Number of GATv2 (with RSE) layers
            heads (int): Number of attention heads
            out_channels (int): Output channel dimension
            dropout_rate (float): Dropout rate
            gat_version (str): GAT version ('v1' or 'v2')
            use_cuda (bool): Enable GPU usage
        '''
        super().__init__()
        self.use_cuda = use_cuda
        self.dropout_rate = dropout_rate

        self.node_features_dim = node_features_dim
        self.edge_features_dim = edge_features_dim
        self.gat_stages = gat_stages
        self.heads = heads

        # hidden_layer = input_layer * 2
        self.node_mlp = nn.Sequential(
            nn.LazyLinear(self.node_features_dim * 2),
            nn.ReLU(),
            nn.Dropout(p=self.dropout_rate),
            nn.LazyLinear(self.node_features_dim)
        )

        self.edge_mlp = nn.Sequential(
            nn.LazyLinear(self.edge_features_dim * 2),
            nn.ReLU(),
            nn.Dropout(p=self.dropout_rate),
            nn.LazyLinear(self.edge_features_dim)
        )

        self.rse_mlp = nn.Sequential(
            nn.LazyLinear(self.edge_features_dim * 2),
            nn.ReLU(),
            nn.Dropout(p=self.dropout_rate),
            nn.LazyLinear(heads)
        )

        self.gat_layers = self.get_gat_layers(gat_version)

        self.out_sem_mlp = nn.Sequential(
            nn.LazyLinear(self.node_features_dim),
            nn.ReLU(),
            nn.Dropout(p=self.dropout_rate),
            nn.LazyLinear(out_channels)
        )

        self.out_ins_mlp = nn.Sequential(
            nn.LazyLinear(self.edge_features_dim),
            nn.ReLU(),
            nn.Dropout(p=self.dropout_rate),
            nn.LazyLinear(1)
        )

    def get_gat_layers(self, version):
        type_map = {
            'v1': GATwithRSE,
            'v2': GATv2withRSE
        }

        t = type_map[version]

        gat_layers = nn.ModuleList()

        gat_layers.append(
            t(
                self.node_features_dim,
                self.node_features_dim * 2,
                self.heads,
                # cached=True,
                edge_dim=self.edge_features_dim
            )
        )

        for _ in range(self.gat_stages - 2):
            gat_layer = t(
                self.node_features_dim * 2 * self.heads,
                self.node_features_dim * 2,
                self.heads,
                # cached=True,
                edge_dim=self.edge_features_dim
            )
            gat_layers.append(gat_layer)

        # The multi-head attentions are AVERAGED in the LAST layer of GAT stage
        gat_layers.append(
            t(
                self.node_features_dim * 2 * self.heads,
                self.node_features_dim * 2,
                self.heads,
                # cached=True,
                edge_dim=self.edge_features_dim,
                concat=False
            )
        )

        if self.use_cuda:
            for i in range(len(gat_layers)):
                gat_layers[i] = gat_layers[i].cuda()

        return gat_layers

    def gat_stage(self, x, edge_attr, edge_index, rse):
        sum_alpha = None
        for gat_layer in self.gat_layers:
            x, (adj, alpha) = gat_layer(x, edge_index, edge_attr, rse=rse, return_attention_weights=True)
            x = x.relu()
            x = F.dropout(x, p=self.dropout_rate, training=self.training)

            if sum_alpha is None:
                sum_alpha = alpha.clone().detach()
            else:
                sum_alpha += alpha

        return x, adj, alpha

    def forward(self, data):
        x, edge_attr, edge_index = data.x.to(torch.float32), data.edge_attr.to(torch.float32), data.edge_index.to(
            torch.int64)
        if self.use_cuda:
            x = x.cuda()
            edge_attr = edge_attr.cuda()
            edge_index = edge_index.cuda()

        x = self.node_mlp(x)
        edge_attr1 = self.edge_mlp(edge_attr)

        _edge_index, _edge_attr = remove_self_loops(
            edge_index, edge_attr)
        _edge_index, _edge_attr = add_self_loops(
            _edge_index, _edge_attr, fill_value='mean',
            num_nodes=x.shape[0])
        rse = self.rse_mlp(_edge_attr)

        x, adj, alpha = self.gat_stage(x, edge_attr1, edge_index, rse)

        N, D = x.shape
        E, H = alpha.shape

        row, col = edge_index
        x_i = x[row]
        x_j = x[col]
        x_i_j_cat = torch.cat((x_i, x_j), dim=1)

        row, col = adj
        non_self_loops_mask = (row != col)
        alpha1 = alpha[non_self_loops_mask]
        adj1 = adj[:, non_self_loops_mask]
        row1, col1 = adj1

        edge_attr2 = torch.cat((alpha1, x_i_j_cat), dim=1)

        out_sem = self.out_sem_mlp(x)
        out_ins = self.out_ins_mlp(edge_attr2)

        full_out_ins = torch.sparse_coo_tensor(
            indices=torch.stack((row1, col1)),
            values=out_ins,
            size=(N, N, 1),
            device=alpha.device
        ).to_dense().reshape((N * N, 1))

        return out_sem, full_out_ins
