import torch
from torch.nn import Linear
import torch.nn.functional as F
from torch_geometric.nn import GCN2Conv
from .net import MLP
from torch_geometric.data import Data
import torch_geometric.transforms as T


class GCNCLSNet(torch.nn.Module):
    def __init__(self,
                 in_d,
                 hidden_channels,
                 num_layers,
                 num_layers_gcn,
                 alpha=0.1,
                 theta=0.5,
                 shared_weights=True,
                 has_shape=False,
                 has_value=False,
                 has_weight_info=False,
                 dropout=0.0,
                 num_classes=10):
        super(GCNCLSNet, self).__init__()

        num_features = in_d
        # shape_features = 120

        self.lstm_layer = torch.nn.LSTM(
            input_size=num_features,
            hidden_size=hidden_channels,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.block_linear = Linear(2 * hidden_channels, 2 * hidden_channels)
        self.value_lstm_layer = torch.nn.LSTM(
            input_size=num_features,
            hidden_size=hidden_channels,
            num_layers=num_layers,
            bidirectional=True,
            batch_first=True,
        )
        self.value_linear = Linear(2 * hidden_channels, 2 * hidden_channels)

        self.gcn2convnet_block = GCN2ConvNet(hidden_channels,
                                             num_layers_gcn,
                                             alpha,
                                             theta,
                                             shared_weights=shared_weights,
                                             dropout=dropout)
        self.gcn2convnet_value = GCN2ConvNet(hidden_channels,
                                             num_layers_gcn,
                                             alpha,
                                             theta,
                                             shared_weights=shared_weights,
                                             dropout=dropout)

        self.mlp = MLP(in_c=2 * hidden_channels, out_c=num_classes)

    def forward(self, blocks_input, adj_t, device, shape_info=None, value_data=None):

        is_one_node = False
        if len(adj_t) == 0:
            adj_t = [[0, 0]]
            is_one_node = True

        adj_t = torch.LongTensor(adj_t).to(device)

        blocks = blocks_input[0]
        value_seq_arr = value_data[0]

        block_embeddings = []
        value_seq_embeddings = []
        for block, value_seq in zip(blocks, value_seq_arr):
            block = block.squeeze(0)
            value_seq = value_seq.squeeze(0)
            # logger.debug(block.shape)
            o, (_, _) = self.lstm_layer(block.to(device))
            v_o, (_, _) = self.value_lstm_layer(value_seq.to(device))
            pred = self.block_linear(o[-1][-1].unsqueeze(0))
            v_pred = self.value_linear(v_o[-1][-1].unsqueeze(0))
            block_embeddings.append(pred)
            value_seq_embeddings.append(v_pred)

        block_embeddings = torch.stack(block_embeddings, dim=1).squeeze(0)
        value_seq_embeddings = torch.stack(value_seq_embeddings, dim=1).squeeze(0)

        block_data = Data(block_embeddings, adj_t)
        transform = T.Compose([T.ToSparseTensor()])
        if not is_one_node:
            block_data = transform(block_data)
        else:
            block_data.adj_t = adj_t

        blocks_graph_embedding = self.gcn2convnet_block(block_embeddings, block_data.adj_t, is_one_node)
        value_seqs_graph_embedding = self.gcn2convnet_value(value_seq_embeddings, block_data.adj_t, is_one_node)

        # in_out_shape_info = shape_info[:, :12 * 5 * 2]
        # out = torch.cat([, in_out_shape_info.squeeze(0).float().to(device)])
        out = torch.cat([blocks_graph_embedding, value_seqs_graph_embedding])
        pred = self.mlp(out.unsqueeze(0))
        return pred


class GCN2ConvNet(torch.nn.Module):
    def __init__(self, hidden_channels, num_layers, alpha, theta, shared_weights=True, dropout=0.0):
        super(GCN2ConvNet, self).__init__()

        self.lins = torch.nn.ModuleList()
        self.lins.append(Linear(2 * hidden_channels, hidden_channels))
        self.lins.append(Linear(hidden_channels, hidden_channels))

        self.convs = torch.nn.ModuleList()
        for layer in range(num_layers):
            self.convs.append(GCN2Conv(hidden_channels, alpha, theta, layer + 1, shared_weights, normalize=False))

        self.dropout = dropout

    def forward(self, blocks_input, adj_t, is_one_node):

        x = F.dropout(blocks_input, self.dropout, training=self.training)
        x = x_0 = self.lins[0](x).relu()
        if not is_one_node:
            for conv in self.convs:
                x = F.dropout(x, self.dropout, training=self.training)
                x = conv(x, x_0, adj_t)
                x = x.relu()
        x = F.dropout(x, self.dropout, training=self.training)
        x = self.lins[1](x)
        return x.mean(dim=0)
