from loguru import logger
import torch
import torch.nn as nn
from torch_geometric.nn import GCN2Conv, GCNConv
from model.net import MLP

atom_list = [
    'divide', 'abs', 'expand_dims', 'negative', 'upsampling', 'batch_matmul', 'global_max_pool2d', 'subtract',
    'minimum', 'concatenate', 'add', 'relu', 'bias_add', 'global_avg_pool2d', 'exp', 'cast', 'multiply', 'clip',
    'upsampling3d', 'tanh', 'max_pool3d', 'log', 'strided_slice', 'pad', 'avg_pool3d', 'max_pool2d', 'maximum', 'dense',
    'transpose', 'sqrt', 'sigmoid', 'adaptive_avg_pool3d', 'batch_flatten', 'conv3d', 'avg_pool2d', 'softmax',
    'greater', 'leaky_relu', 'adaptive_max_pool3d', 'conv2d'
]

CLASS_NUM = len(atom_list) + 2


class CorrectGNN(nn.Module):
    def __init__(self, embed_dim=200, alpha=0.1, theta=0.5, gcn_layer_num=3):
        super().__init__()
        # Embedding Layer [Convert the category to a vector]
        self.embed_layer = nn.Embedding(num_embeddings=CLASS_NUM, embedding_dim=embed_dim)

        self.gcn2conv = GCN2Conv(embed_dim, alpha, theta, shared_weights=False, layer=gcn_layer_num)

        mid_dimenstion_mlp = 200
        # MLP for the binary classification
        self.mlp = MLP(in_c=embed_dim, out_c=2, mid_c=mid_dimenstion_mlp)

    def forward(self, x, adj):
        """We try to infer the type of nodes and correct them.

        :param x: A list of node type (e.g. [1,2,3,2])
        :type x: Tensor
        :param adj: A adjdecent matrix
        :type adj: Tensor
        :return: The type prediction of masked nodes
        :rtype: Tensor
        """
        features = self.embed_layer(x)
        logger.debug(features.shape)
        logger.debug(adj.shape)
        if adj.shape[0] != 2 and adj.shape[1] == 2:
            adj = adj.T
        node_embeddings = self.gcn2conv(features, features, adj)
        return self.mlp(node_embeddings)


class CorrectGCN(nn.Module):
    def __init__(self, embed_dim=200, alpha=0., theta=0.5, gcn_layer_num=1):
        super().__init__()
        # Embedding Layer [Convert the category to a vector]
        self.embed_layer = nn.Embedding(num_embeddings=CLASS_NUM, embedding_dim=embed_dim)

        # self.gcn2conv = GCN2Conv(embed_dim, alpha, theta, shared_weights=False, layer=gcn_layer_num)
        self.gcn = GCNConv(embed_dim, embed_dim, normalize=True)

        mid_dimenstion_mlp = 200
        # MLP for the binary classification
        self.mlp = MLP(in_c=embed_dim, out_c=2, mid_c=mid_dimenstion_mlp)

    def forward(self, x, adj):
        """We try to infer the type of nodes and correct them.

        :param x: A list of node type (e.g. [1,2,3,2])
        :type x: Tensor
        :param adj: A adjdecent matrix
        :type adj: Tensor
        :return: The type prediction of masked nodes
        :rtype: Tensor
        """
        features = self.embed_layer(x)
        # adj = adj.long()
        logger.debug(features.dtype)
        logger.debug(adj.dtype)
        if adj.shape[0] != 2 and adj.shape[1] == 2:
            adj = adj.T
        node_embeddings = self.gcn(features, adj)
        return self.mlp(node_embeddings)


class CorrectGNNWithShapeDim(nn.Module):
    def __init__(self, embed_dim=200, alpha=0.1, theta=0.5, gcn_layer_num=1):
        super().__init__()
        # Embedding Layer [Convert the category to a vector]
        self.embed_layer = nn.Embedding(num_embeddings=CLASS_NUM, embedding_dim=embed_dim)
        self.dim_embed_layer = nn.Embedding(num_embeddings=6, embedding_dim=embed_dim)

        self.gcn2conv = GCN2Conv(2 * embed_dim, alpha, theta, shared_weights=False, layer=gcn_layer_num)

        mid_dimenstion_mlp = 200
        # MLP for the binary classification
        self.mlp = MLP(in_c=2 * embed_dim, out_c=2, mid_c=mid_dimenstion_mlp)

    def forward(self, x, adj, weight_dim):
        """We try to infer the type of nodes and correct them.

        :param x: A list of node type (e.g. [1,2,3,2])
        :type x: Tensor
        :param adj: A adjdecent matrix
        :type adj: Tensor
        :param weight_dim: The max dim of weight nodes
        :type weight_dim: Tensor
        :return: The type prediction of masked nodes
        :rtype: Tensor
        """
        features = self.embed_layer(x)
        dim_features = self.dim_embed_layer(weight_dim)
        features = torch.cat([features, dim_features], dim=1)
        logger.debug(features.shape)
        logger.debug(adj.shape)
        if adj.shape[0] != 2 and adj.shape[1] == 2:
            adj = adj.T
        node_embeddings = self.gcn2conv(features, features, adj)
        return self.mlp(node_embeddings)


class CorrectGCNWithBB(nn.Module):
    def __init__(self, embed_dim=200, alpha=0.1, theta=0.5, gcn_layer_num=3):
        super().__init__()
        # Embedding Layer [Convert the category to a vector]
        self.embed_layer = nn.Embedding(num_embeddings=CLASS_NUM, embedding_dim=embed_dim)

        self.gcn2conv = GCN2Conv(embed_dim, alpha, theta, shared_weights=False, layer=gcn_layer_num)

        mid_dimenstion_mlp = 200
        # MLP for the binary classification
        self.mlp = MLP(in_c=embed_dim * 2, out_c=2, mid_c=mid_dimenstion_mlp)

        in_d = 200
        hidden = 200
        num_layers = 2
        self.bilstm_bb = torch.nn.LSTM(
            input_size=in_d,
            hidden_size=hidden,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.gcn2conv_bb = GCN2Conv(embed_dim, alpha, theta, shared_weights=False, layer=gcn_layer_num)

    def forward(self, x, adj, x_bb, adj_bb, intervals_bb):
        """We try to infer the type of nodes and correct them.

        :param x: A list of node type (e.g. [1,2,3,2])
        :type x: Tensor
        :param adj: A adjdecent matrix
        :type adj: Tensor
        :return: The type prediction of masked nodes
        :rtype: Tensor
        """
        features = self.embed_layer(x)
        logger.debug(features.shape)
        logger.debug(adj.shape)
        if adj.shape[0] != 2 and adj.shape[1] == 2:
            adj = adj.T
        node_embeddings = self.gcn2conv(features, features, adj)
        # Add basicblock graph embeddings
        bb_embeddings = []
        for bb in x_bb:
            bb = bb.squeeze(0).cuda()
            # o, (hn, cn) = self.bilstm_bb(bb.cuda())
            # bb_embeddings.append(hn[-1].squeeze(0))
            bb_embeddings.append(torch.mean(bb.squeeze(0), dim=0))
        # print([emb.shape for emb in bb_embeddings])
        bb_embeddings = torch.stack(bb_embeddings)
        # print(adj_bb.shape)
        # print(adj_bb.squeeze(0).T.shape)
        # print(bb_embeddings.shape)
        gcn_embeddings_bb = self.gcn2conv_bb(bb_embeddings, bb_embeddings, adj_bb.squeeze(0).T)

        bb_node_embeddings = []
        # print(len(intervals_bb))
        for interval in intervals_bb:
            if interval[0] == -1:
                bb_node_embeddings.append(torch.zeros(200).cuda())
                continue
            interval_emb = gcn_embeddings_bb[interval[0]:interval[1]]
            bb_node_embeddings.append(torch.mean(interval_emb, dim=0))
        node_embeddings = torch.cat([node_embeddings, torch.stack(bb_node_embeddings)], dim=1)

        return self.mlp(node_embeddings)


if __name__ == "__main__":
    model = CorrectGNN()
    x = [1, 2, 3]
    x = torch.tensor(x)
    edges = [[0, 1], [1, 2], [2, 0]]
    edges = torch.LongTensor(edges).T
    print(model(x, edges).shape)
