from typing import Any

import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as tud
from torch.autograd import Variable

import model_utils
from model_utils import AnomalyConfusionMatrix, device, create_dataloader

layer1size = 256
layer2size = 128
layer3size = 64
z_dim = 3
n_gmm = 2


class DAGMM(nn.Module):
    def __init__(self, input_size):
        """Network for DAGMM (KDDCup99)"""
        super(DAGMM, self).__init__()
        self.name = 'DAGMM'
        # Encoder network
        self.fc1 = nn.Linear(input_size, layer1size)
        self.fc2 = nn.Linear(layer1size, layer2size)
        self.fc3 = nn.Linear(layer2size, layer3size)
        self.fc4 = nn.Linear(layer3size, z_dim)

        # Decoder network
        self.fc5 = nn.Linear(z_dim, layer3size)
        self.fc6 = nn.Linear(layer3size, layer2size)
        self.fc7 = nn.Linear(layer2size, layer1size)
        self.fc8 = nn.Linear(layer1size, input_size)

        # Estimation network
        self.fc9 = nn.Linear(z_dim + 2, layer3size)
        self.fc10 = nn.Linear(layer3size, n_gmm)

    def encode(self, x):
        h = torch.tanh(self.fc1(x))
        h = torch.tanh(self.fc2(h))
        h = torch.tanh(self.fc3(h))
        return self.fc4(h)

    def decode(self, x):
        h = torch.tanh(self.fc5(x))
        h = torch.tanh(self.fc6(h))
        h = torch.tanh(self.fc7(h))
        return self.fc8(h)

    def estimate(self, z):
        h = F.dropout(torch.tanh(self.fc9(z)), 0.5)
        return F.softmax(self.fc10(h), dim=1)

    @staticmethod
    def compute_reconstruction(x, x_hat):
        relative_euclidean_distance = (
            x - x_hat).norm(2, dim=1) / x.norm(2, dim=1)
        cosine_similarity = F.cosine_similarity(x, x_hat, dim=1)
        return relative_euclidean_distance, cosine_similarity

    def forward(self, x):
        x = x.view(x.shape[0], -1)
        z_c = self.encode(x)
        decoded = self.decode(z_c)
        rec_1, rec_2 = self.compute_reconstruction(x, decoded)
        z = torch.cat([z_c, rec_1.unsqueeze(-1), rec_2.unsqueeze(-1)], dim=1)
        gamma = self.estimate(z)
        return (z_c, z, gamma), decoded


class ComputeLoss:
    def __init__(self, model, lambda_energy, lambda_cov):
        self.model = model
        self.lambda_energy = lambda_energy
        self.lambda_cov = lambda_cov
        self.n_gmm = n_gmm

    def forward(self, x, decoded, z, gamma):
        """Computing the loss function for DAGMM."""
        x = x.view(x.shape[0], -1)
        reconst_loss = nn.MSELoss().to(device)(decoded, x)

        sample_energy, cov_diag = self.compute_energy(z, gamma)

        loss = reconst_loss + self.lambda_energy * \
            sample_energy + self.lambda_cov * cov_diag
        return Variable(loss, requires_grad=True)

    # noinspection PyTypeChecker
    def compute_energy(self, z, gamma, phi=None, mu=None, cov=None, sample_mean=True):
        """Computing the sample energy function"""
        if (phi is None) or (mu is None) or (cov is None):
            phi, mu, cov = self.compute_params(z, gamma)

        z_mu = (z.unsqueeze(1) - mu.unsqueeze(0))

        eps = 1e-12
        cov_inverse = []
        det_cov = []
        cov_diag = 0
        for k in range(self.n_gmm):
            cov_k = cov[k] + (torch.eye(cov[k].size(-1)) * eps).to(device)
            cov_inverse.append(torch.inverse(cov_k).unsqueeze(0))
            det_cov.append(np.linalg.det(cov_k.data.cpu().numpy() * 2 *
                           np.pi))
            cov_diag += torch.sum(1 / cov_k.diag())

        cov_inverse = torch.cat(cov_inverse, dim=0)
        det_cov = Variable(torch.from_numpy(np.float32(
            np.array(det_cov))).to(device), volatile=False)

        e_z = -0.5 * torch.sum(torch.sum(z_mu.unsqueeze(-1)
                               * cov_inverse.unsqueeze(0), dim=-2) * z_mu, dim=-1)
        e_z = torch.exp(e_z)
        e_z = -torch.log(torch.sum(phi.unsqueeze(0) * e_z /
                         (torch.sqrt(det_cov)).unsqueeze(0), dim=1) + eps)
        if sample_mean:
            e_z = torch.mean(e_z)
        return e_z, cov_diag

    @staticmethod
    def compute_params(z, gamma):
        """Computing the parameters phi, mu and gamma for sample energy function
        # K: number of Gaussian mixture components
        # N: Number of samples
        # D: Latent dimension
        # z = NxD
        # gamma = NxK
        """

        # phi = D
        phi = torch.sum(gamma, dim=0) / gamma.size(0)

        # mu = KxD
        mu = torch.sum(z.unsqueeze(1) * gamma.unsqueeze(-1), dim=0)
        mu /= torch.sum(gamma, dim=0).unsqueeze(-1)

        z_mu = (z.unsqueeze(1) - mu.unsqueeze(0))
        z_mu_z_mu_t = z_mu.unsqueeze(-1) * z_mu.unsqueeze(-2)

        # cov = K x D x D
        cov = torch.sum(gamma.unsqueeze(-1).unsqueeze(-1) * z_mu_z_mu_t, dim=0)
        cov /= torch.sum(gamma, dim=0).unsqueeze(-1).unsqueeze(-1)

        return phi, mu, cov


def compute_dagmm_loss(model, x, **kwargs):
    (z_c, z, gamma), decoded = model(x)
    compute = ComputeLoss(model=model, lambda_energy=0.1, lambda_cov=0.005)
    return compute.forward(x, decoded, z, gamma)


def build_dagmm_model(input_size, **kwargs):
    model = DAGMM(input_size=input_size).to(device)
    return model, torch.optim.Adam(lr=1e-4, betas=(0.9, 0.99), params=model.parameters())


def train_and_test_dagmm_model(normal_dataloader: tud.DataLoader,
                               attack_dataloader: tud.DataLoader,
                               epochs: int,
                               continue_train: bool,
                               dataset_name: str,
                               data_dir: str) -> AnomalyConfusionMatrix:
    return model_utils.train_and_test_model(normal_dl=normal_dataloader,
                                            attack_dl=attack_dataloader,
                                            epochs=epochs,
                                            model_name='DAGMM',
                                            is_recur=False,
                                            compute_loss=compute_dagmm_loss,
                                            build_model=build_dagmm_model,
                                            continue_train=continue_train,
                                            dataset=dataset_name,
                                            data_dir=data_dir)


if __name__ == '__main__':
    ds_name = 'SWAT'
    normal_dl, attack_dl = create_dataloader(
        f'./data_demo/{ds_name}', timemode=False, reverse_label=False, batch_size=64)

    train_and_test_dagmm_model(normal_dataloader=normal_dl,
                               attack_dataloader=attack_dl,
                               epochs=50,
                               data_dir=f'./data_demo/{ds_name}',
                               continue_train=True,
                               dataset_name=ds_name)
