import torch.nn as nn
import torch.nn.functional as functional
import torch.utils.data as tud

import model_utils
from gdn_util.env import *
from gdn_util.graph_layer import GraphLayer
from gdn_util.time import *
from model_utils import device, AnomalyConfusionMatrix, train_and_test_model, create_dataloader


def get_batch_edge_index(org_edge_index, batch_num, node_num):
    # org_edge_index:(2, edge_num)
    edge_index = org_edge_index.clone().detach()
    edge_num = org_edge_index.shape[1]
    batch_edge_index = edge_index.repeat(1, batch_num).contiguous()

    for i in range(batch_num):
        batch_edge_index[:, i * edge_num:(i + 1) * edge_num] += i * node_num

    return batch_edge_index.long()


class OutLayer(nn.Module):
    def __init__(self, in_num, node_num, layer_num, inter_num=512):
        super(OutLayer, self).__init__()

        self.node_num = node_num
        modules = []

        for i in range(layer_num):
            # last layer, output shape:1
            if i == layer_num - 1:
                modules.append(
                    nn.Linear(in_num if layer_num == 1 else inter_num, 1))
            else:
                layer_in_num = in_num if i == 0 else inter_num
                modules.append(nn.Linear(layer_in_num, inter_num))
                modules.append(nn.BatchNorm1d(inter_num))
                modules.append(nn.ReLU())

        self.mlp = nn.ModuleList(modules)

    def forward(self, x):
        out = x

        for mod in self.mlp:
            if isinstance(mod, nn.BatchNorm1d):
                out = out.permute(0, 2, 1)
                out = mod(out)
                out = out.permute(0, 2, 1)
            else:
                out = mod(out)

        return out


class GNNLayer(nn.Module):
    def __init__(self, in_channel, out_channel, inter_dim=0, heads=1, node_num=100):
        super(GNNLayer, self).__init__()

        self.edge_index_1 = None
        self.att_weight_1 = None
        self.node_num = node_num
        self.gnn = GraphLayer(in_channel, out_channel,
                              inter_dim=inter_dim, heads=heads, concat=False)

        self.bn = nn.BatchNorm1d(out_channel)
        self.relu = nn.ReLU()
        self.leaky_relu = nn.LeakyReLU()

    def forward(self, x, edge_index, embedding=None):
        out, (new_edge_index, att_weight) = self.gnn(
            x, edge_index, embedding, return_attention_weights=True)
        self.att_weight_1 = att_weight
        self.edge_index_1 = new_edge_index

        out = self.bn(out)

        return self.relu(out)


class GDN(nn.Module):
    def __init__(self, edge_index_sets, node_num, dim=64, out_layer_inter_dim=256, input_dim=10, out_layer_num=1,
                 top_k=20):
        self.name = 'GDN'

        super(GDN, self).__init__()

        self.edge_index_sets = edge_index_sets

        embed_dim = dim
        self.embedding = nn.Embedding(node_num, embed_dim)
        self.bn_out_layer_in = nn.BatchNorm1d(embed_dim)

        edge_set_num = len(edge_index_sets)
        self.gnn_layers = nn.ModuleList([
            GNNLayer(input_dim, dim, inter_dim=dim + embed_dim, heads=1) for _ in range(edge_set_num)
        ])

        self.node_embedding = None
        self.top_k = top_k
        self.learned_graph = None

        self.out_layer = OutLayer(
            dim * edge_set_num, node_num, out_layer_num, inter_num=out_layer_inter_dim)

        self.cache_edge_index_sets = [None] * edge_set_num
        self.cache_embed_index = None

        self.dp = nn.Dropout(0.2)

        self.init_params()

    def init_params(self):
        nn.init.kaiming_uniform_(self.embedding.weight, a=math.sqrt(5))

    def forward(self, data):
        x = data.clone().detach()
        x = x.view(x.shape[0], x.shape[-1], -1).contiguous()
        edge_index_sets = self.edge_index_sets

        batch_num, node_num, all_feature = x.shape
        x = x.view(-1, all_feature).contiguous()

        gcn_outs = []
        for i, edge_index in enumerate(edge_index_sets):
            edge_num = edge_index.shape[1]
            cache_edge_index = self.cache_edge_index_sets[i]

            if cache_edge_index is None or cache_edge_index.shape[1] != edge_num * batch_num:
                self.cache_edge_index_sets[i] = get_batch_edge_index(
                    edge_index, batch_num, node_num).to(device)

            all_embeddings = self.embedding(torch.arange(node_num).to(device))

            weights_arr = all_embeddings.detach().clone()
            all_embeddings = all_embeddings.repeat(batch_num, 1)

            weights = weights_arr.view(node_num, -1)

            cos_ji_mat = torch.matmul(weights, weights.T)
            normed_mat = torch.matmul(weights.norm(
                dim=-1).view(-1, 1), weights.norm(dim=-1).view(1, -1))
            cos_ji_mat = cos_ji_mat / normed_mat

            top_k_num = self.top_k

            top_k_indices_ji = torch.topk(cos_ji_mat, top_k_num, dim=-1)[1]

            self.learned_graph = top_k_indices_ji

            gated_i = torch.arange(0, node_num).T.unsqueeze(1).repeat(
                1, top_k_num).flatten().to(device).unsqueeze(0)
            gated_j = top_k_indices_ji.flatten().unsqueeze(0)
            gated_edge_index = torch.cat((gated_j, gated_i), dim=0)

            batch_gated_edge_index = get_batch_edge_index(
                gated_edge_index, batch_num, node_num).to(device)
            gcn_out = self.gnn_layers[i](
                x, batch_gated_edge_index, embedding=all_embeddings)

            gcn_outs.append(gcn_out)

        x = torch.cat(gcn_outs, dim=1)
        x = x.view(batch_num, node_num, -1)

        indexes = torch.arange(0, node_num).to(device)
        out = torch.mul(x, self.embedding(indexes))

        out = out.permute(0, 2, 1)
        out = functional.relu(self.bn_out_layer_in(out))
        out = out.permute(0, 2, 1)

        out = self.dp(out)
        out = self.out_layer(out)
        out = out.view(-1, node_num)

        return x, out


def compute_gdn_loss(model, x, y, **kwargs):
    _, out = model(x)
    return nn.MSELoss().to(device)(out.squeeze(), y.squeeze())


def y_pred_func(model, data, **kwargs):
    x = data[0].to(device)

    y_true = data[1].int().to(device)
    _, out = model(x)

    y = data[2].to(device)

    out, y = out.squeeze(), y.squeeze()
    dist = torch.pairwise_distance(out, y)
    dist = torch.unsqueeze(dist, dim=-1)
    return dist, y_true


def get_optimizer(model):
    return torch.optim.Adam(lr=1e-4, betas=(0.9, 0.99), params=model.parameters())


def build_gdn_model(ds_path, **kwargs):
    fc_edge_index, feature_map = model_utils.get_edge_index(ds_path)
    edge_index_sets = [fc_edge_index]
    gdn_model = GDN(edge_index_sets=edge_index_sets, node_num=len(feature_map),
                    input_dim=model_utils.INPUT_SIZE).to(device)
    optimizer = get_optimizer(gdn_model)
    return gdn_model, optimizer


def train_and_test_gdn_model(normal_dataloader: tud.DataLoader,
                             attack_dataloader: tud.DataLoader,
                             epochs: int,
                             continue_train: bool,
                             dataset_name: str,
                             data_dir: str) -> AnomalyConfusionMatrix:
    return train_and_test_model(normal_dl=normal_dataloader,
                                attack_dl=attack_dataloader,
                                epochs=epochs,
                                compute_loss=compute_gdn_loss,
                                build_model=build_gdn_model,
                                model_name='GDN',
                                is_recur=True,
                                test_pred_func=y_pred_func,
                                continue_train=continue_train,
                                dataset=dataset_name,
                                data_dir=data_dir)


if __name__ == "__main__":
    normal_swat_dl, attack_swat_dl = create_dataloader('./data/SWAT',
                                                       timemode=True,
                                                       reverse_label=True)
    confusion_matrix = train_and_test_gdn_model(normal_dataloader=normal_swat_dl,
                                                attack_dataloader=attack_swat_dl,
                                                epochs=model_utils.EPOCHS,
                                                dataset_name='SWAT')
