import torch
import torch.utils.data as tud
from torch import nn
from torch.autograd import Variable

from model_utils import device, AnomalyConfusionMatrix, train_and_test_model, create_dataloader, EPOCHS


class NormalizingFlow(nn.Module):
    def __init__(self, latent_dim, k):
        super(NormalizingFlow, self).__init__()
        self.latent_dim = latent_dim
        self.K = k
        w_k = []
        b_k = []
        u_k = []
        for k in range(self.K):
            w_k.append(nn.Parameter(torch.rand(
                [1, self.latent_dim])).to(device))
            b_k.append(nn.Parameter(torch.rand(1)).to(device))
            u_k.append(nn.Parameter(torch.rand(
                [1, self.latent_dim])).to(device))
        self.w = w_k
        self.b = b_k
        self.u = u_k

    def forward(self, z):
        for k in range(self.K):
            z = z + self.u[k] * \
                torch.tanh(torch.mm(z, self.w[k].T) + self.b[k])
        return z


class Encoder(nn.Module):
    def __init__(self, x_dim, h_dim, dense_dim, z_dim, rolling_size, use_pnf, pnf_layers):
        super(Encoder, self).__init__()
        self.x_dim = x_dim
        self.h_dim = h_dim
        self.dense_dim = dense_dim
        self.z_dim = z_dim
        self.rolling_size = rolling_size

        self.gru_cell = nn.GRUCell(
            input_size=self.x_dim, hidden_size=self.h_dim)
        self.use_PNF = use_pnf
        self.PNF_layers = pnf_layers

        if self.use_PNF:
            self.PNF = nn.ModuleList(NormalizingFlow(latent_dim=self.z_dim, k=self.PNF_layers).to(device)
                                     for _ in range(self.rolling_size))

        self.phi_enc = nn.Sequential(
            nn.Linear(self.h_dim + self.z_dim, self.dense_dim),
            nn.ReLU()).to(device)

        self.enc_means = nn.Sequential(
            nn.Linear(self.dense_dim, self.z_dim),
            nn.Sigmoid())

        self.enc_stds = nn.Sequential(
            nn.Linear(self.dense_dim, self.z_dim),
            nn.Softplus())

    def reparameterized_sample(self, mean, std):
        """using std to sample"""
        eps = torch.FloatTensor(std.size()).normal_()
        eps = Variable(eps).to(device)
        return eps.mul(std).add_(mean)

    def forward(self, x):
        h_i = Variable(torch.zeros(
            x.shape[0], self.h_dim), requires_grad=True).to(device)
        z_i = Variable(torch.zeros(
            x.shape[0], self.z_dim), requires_grad=True).to(device)
        # hs = []
        z_means = []
        z_stds = []
        zs = []
        for i in range(x.shape[1]):
            h_i = self.gru_cell(x[:, i], h_i)
            h_z = torch.cat([h_i, z_i], dim=1)
            phi_z = self.phi_enc(h_z)
            z_mean = self.enc_means(phi_z)
            z_std = self.enc_stds(phi_z)
            z = self.reparameterized_sample(mean=z_mean, std=z_std)
            if self.use_PNF:
                z = self.PNF[i](z)
            # hs.append(h_i)
            z_means.append(z_mean)
            z_stds.append(z_std)
            zs.append(z)
        # return the list to tensor
        zs = torch.stack(zs)
        z_means = torch.stack(z_means)
        z_stds = torch.stack(z_stds)
        return zs.permute(1, 0, 2), z_means.permute(1, 0, 2), z_stds.permute(1, 0, 2)


class Decoder(nn.Module):
    def __init__(self, x_dim, h_dim, dense_dim, z_dim, rolling_size):
        super(Decoder, self).__init__()
        self.x_dim = x_dim
        self.h_dim = h_dim
        self.dense_dim = dense_dim
        self.z_dim = z_dim
        self.rolling_size = rolling_size

        self.gru_cell = nn.GRUCell(
            input_size=self.z_dim, hidden_size=self.h_dim)

        self.phi_dec = nn.Sequential(
            nn.Linear(self.h_dim, self.dense_dim),
            nn.ReLU())

        self.dec_means = nn.Sequential(
            nn.Linear(self.dense_dim, self.x_dim),
            nn.Sigmoid())

        self.dec_stds = nn.Sequential(
            nn.Linear(self.dense_dim, self.x_dim),
            nn.Softplus())

    def reparameterized_sample(self, mean, std):
        """using std to sample"""
        eps = torch.FloatTensor(std.size()).normal_()
        eps = Variable(eps).to(device)
        return eps.mul(std).add_(mean)

    def forward(self, input):
        h_i = Variable(torch.zeros(
            input.shape[0], self.h_dim), requires_grad=True).to(device)
        # hs = []
        x_means = []
        x_stds = []
        xs = []
        for i in range(input.shape[1]):
            h_i = self.gru_cell(input[:, i], h_i)
            phi_x = self.phi_dec(h_i)
            x_mean = self.dec_means(phi_x)
            x_std = self.dec_stds(phi_x)
            x = self.reparameterized_sample(mean=x_mean, std=x_std)
            # hs.append(h_i)
            x_means.append(x_mean)
            x_stds.append(x_std)
            xs.append(x)
            # return the list to tensor
        xs = torch.stack(xs)
        x_means = torch.stack(x_means)
        x_stds = torch.stack(x_stds)
        return xs.permute(1, 0, 2), x_means.permute(1, 0, 2), x_stds.permute(1, 0, 2)


hidden_size = 16
z_dim = 16


class OmniAnomaly(nn.Module):
    def __init__(self, input_size):
        super(OmniAnomaly, self).__init__()
        self.name = 'OmniAnomaly'

        # dim info
        self.x_dim = input_size
        self.h_dim = hidden_size
        self.dense_dim = hidden_size
        self.z_dim = z_dim

        # sequence info
        self.rolling_size = 64

        # optimization info
        self.lmbda = 1e-4

        # layers
        self.use_PNF = True
        self.PNF_layers = 10

        self.encoder = Encoder(x_dim=self.x_dim, h_dim=self.h_dim, dense_dim=self.dense_dim, z_dim=self.z_dim,
                               rolling_size=self.rolling_size, use_pnf=self.use_PNF, pnf_layers=self.PNF_layers)
        self.decoder = Decoder(x_dim=self.x_dim, h_dim=self.h_dim, dense_dim=self.dense_dim, z_dim=self.z_dim,
                               rolling_size=self.rolling_size, )

    def nll_bernoulli(self, theta, x):
        return - torch.sum(x * torch.log(theta) + (1 - x) * torch.log(1 - theta))

    def nll_gaussian_1(self, mean, std, x):
        # Owned definition
        return 0.5 * (torch.sum(std) + torch.sum(((x - mean) / std.mul(0.5).exp_()) ** 2))

    def nll_gaussian_2(self, mean, std, x):
        return torch.sum(torch.log(std) + (x - mean).pow(2) / (2 * std.pow(2)))

    def mse(self, mean, x):
        return nn.MSELoss().to(device)(mean, x)

    def kld_gaussian(self, mean_1, std_1, mean_2, std_2):
        if mean_2 is not None and std_2 is not None:
            kl_loss = 0.5 * torch.sum(
                2 * torch.log(std_2) - 2 * torch.log(std_1) + (std_1.pow(2) + (mean_1 - mean_2).pow(2)) / std_2.pow(
                    2) - 1)
        else:
            kl_loss = -0.5 * torch.sum(1 + std_1 - mean_1.pow(2) - std_1.exp())
        return kl_loss

    def kld_gaussian_1(self, mean, std):
        """Using std to compute KLD"""
        return -0.5 * torch.sum(1 + torch.log(std) - mean.pow(2) - std)

    def kld_gaussian_2(self, mean_1, std_1, mean_2, std_2):
        """Using std to compute KLD"""
        kld_element = (
            2 * torch.log(std_2) - 2 * torch.log(std_1) + (std_1.pow(2) +
                                                           (mean_1 - mean_2).pow(2)) / std_2.pow(2) - 1)
        return 0.5 * torch.sum(kld_element)

    def forward(self, input_x):
        kld_loss = 0  # KL in ELBO
        nll_loss = 0  # -loglikihood in ELBO
        z, z_mean, z_std = self.encoder(input_x)
        x, x_mean, x_std = self.decoder(z)
        nll_loss += self.mse(x=input_x, mean=x_mean)
        # nll_loss += self.mse(mean=x_mean, x=input)
        kld_loss += self.kld_gaussian(mean_1=z_mean, std_1=z_std, mean_2=None, std_2=None) + self.kld_gaussian(
            mean_1=x_mean, std_1=x_std, mean_2=None, std_2=None)
        return (nll_loss, kld_loss, z, z_mean, z_std, x_mean, x_std), x


def compute_omni_anomaly_loss(model, x, **kwargs):
    epoch = kwargs.get('epoch', 0) + 1
    (nll_loss, kld_loss, batch_z, batch_z_mean, batch_z_std,
     batch_x_mean, batch_x_std), batch_x_reconstruct = model(x)
    batch_loss = nll_loss + model.lmbda * epoch * kld_loss
    return batch_loss


def get_optimizer(model):
    return torch.optim.Adam(lr=1e-3, weight_decay=1e-8, params=model.parameters())


def build_omni_anomaly_model(input_size, **kwargs):
    model = OmniAnomaly(input_size=input_size).to(device)
    optimizer = get_optimizer(model)
    return model, optimizer


def train_and_test_omni_anomaly_model(normal_dataloader: tud.DataLoader,
                                      attack_dataloader: tud.DataLoader,
                                      epochs: int,
                                      dataset_name: str,
                                      data_dir: str,
                                      continue_train: bool) -> AnomalyConfusionMatrix:
    return train_and_test_model(compute_loss=compute_omni_anomaly_loss,
                                build_model=build_omni_anomaly_model,
                                normal_dl=normal_dataloader,
                                attack_dl=attack_dataloader,
                                epochs=epochs,
                                model_name='OmniAnomaly',
                                continue_train=continue_train,
                                dataset=dataset_name,
                                data_dir=data_dir,
                                is_recur=True)


if __name__ == "__main__":
    normal_dl, attack_dl = create_dataloader(
        ds_path='./data_demo/MSL/C-1', timemode=False, reverse_label=False)

    train_and_test_omni_anomaly_model(normal_dataloader=normal_dl,
                                      attack_dataloader=attack_dl,
                                      epochs=EPOCHS,
                                      dataset_name='MSL_C-1')
