import os.path

import torch
import torch.utils.data as tud
from torch import nn

from model_utils import EPOCHS, AnomalyConfusionMatrix, create_all_dataloader, device, train_and_test_model

lstm_hidden_size = 256
fc1_size = 64

lstm_num_layers = 8


def reparameterize(mu, log_var):
    std = torch.exp(log_var / 2)
    eps = torch.randn_like(std)
    return mu + eps * std


def compute_lstm_vae_loss(x: torch.Tensor, model: nn.Module, **kwargs):
    (mu, log_var), decoded = model(x)
    return vae_loss(pred=decoded, target=x, mu=mu, log_var=log_var)


def vae_loss(pred: torch.Tensor, target: torch.Tensor, mu, log_var):
    reconstruct_loss = nn.MSELoss().to(device)(pred, target)
    kl_div = - 0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
    loss = reconstruct_loss + kl_div
    return loss


def init_hidden(batch_size: int):
    return (torch.rand(lstm_num_layers, batch_size, lstm_hidden_size).to(device),
            torch.rand(lstm_num_layers, batch_size, lstm_hidden_size).to(device))


class LSTMVAE(torch.nn.Module):
    def __init__(self, input_size: int):
        super(LSTMVAE, self).__init__()
        self.name = 'LSTM-VAE'
        self.encoder_lstm = nn.LSTM(input_size=input_size,
                                    hidden_size=lstm_hidden_size,
                                    num_layers=lstm_num_layers,
                                    batch_first=True)
        self.encoder_relu = nn.ReLU()
        self.encoder_fc11 = nn.Linear(lstm_hidden_size, fc1_size)
        self.encoder_fc12 = nn.Linear(lstm_hidden_size, fc1_size)

        self.decoder_lstm = nn.LSTM(input_size=fc1_size,
                                    hidden_size=lstm_hidden_size,
                                    num_layers=lstm_num_layers,
                                    batch_first=True)
        self.decoder_relu = nn.ReLU()
        self.decoder_fc21 = nn.Linear(lstm_hidden_size, input_size)

    def encode(self, x):
        lstm = self.encoder_relu(self.encoder_lstm(
            x, init_hidden(batch_size=x.shape[0]))[0])
        return self.encoder_fc11(lstm), self.encoder_fc12(lstm)

    def decode(self, z):
        lstm = self.decoder_relu(self.decoder_lstm(
            z, init_hidden(batch_size=z.shape[0]))[0])
        return self.decoder_fc21(lstm)

    def forward(self, x):
        mu, log_var = self.encode(x)
        z = reparameterize(mu, log_var)
        decoded = self.decode(z)
        return (mu, log_var), decoded


def build_lstm_vae_model(input_size, **kwargs):
    model = LSTMVAE(input_size=input_size).to(device)
    optimizer = torch.optim.Adam(
        lr=1e-4, betas=(0.9, 0.99), params=model.parameters())
    return model, optimizer


def train_and_test_lstm_vae_model(normal_dataloader: tud.DataLoader,
                                  attack_dataloader: tud.DataLoader,
                                  epochs: int,
                                  continue_train: bool,
                                  dataset_name: str,
                                  data_dir: str) -> AnomalyConfusionMatrix:
    return train_and_test_model(model_name='LSTM-VAE',
                                normal_dl=normal_dataloader,
                                attack_dl=attack_dataloader,
                                epochs=epochs,
                                is_recur=True,
                                compute_loss=compute_lstm_vae_loss,
                                build_model=build_lstm_vae_model,
                                continue_train=continue_train,
                                dataset=dataset_name,
                                data_dir=data_dir)
