from typing import Tuple
import torch
import torch.nn as nn
import torch.utils.data as tud

from convolution_lstm import ConvLSTM
from model_utils import AnomalyConfusionMatrix, create_dataloader, device, train_and_test_model


def attention(conv_lstm_out):
    attention_w = []
    cl_out_shape = conv_lstm_out.shape
    for k in range(cl_out_shape[0]):
        attention_w.append(
            torch.sum(torch.mul(conv_lstm_out[k], conv_lstm_out[-1])) / cl_out_shape[0])
    m = nn.Softmax(dim=0)
    attention_w = torch.reshape(
        m(torch.stack(attention_w)), (-1, cl_out_shape[0]))
    conv_lstm_out = torch.reshape(conv_lstm_out, (cl_out_shape[0], -1))
    conv_lstm_out = torch.matmul(attention_w, conv_lstm_out)
    conv_lstm_out = torch.reshape(
        conv_lstm_out, (cl_out_shape[1], cl_out_shape[2], cl_out_shape[3]))
    return conv_lstm_out


class CnnEncoder(nn.Module):
    def __init__(self, input_size: int, in_channels: int, out_channels: int):
        super(CnnEncoder, self).__init__()
        self.input_size = input_size
        self.conv1_config = {
            'kernel_size': (3, 3),
            'stride': (1, 1),
            'padding': 1
        }
        self.conv2_config = {
            'kernel_size': (3, 3),
            'stride': (2, 2),
            'padding': 1,
        }
        self.conv3_config = {
            'kernel_size': (2, 2),
            'stride': (2, 2),
            'padding': 1
        }
        self.conv4_config = {
            'kernel_size': (2, 2),
            'stride': (2, 2),
            'padding': 1
        }
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=in_channels, out_channels=32,
                      kernel_size=self.conv1_config['kernel_size'],
                      stride=self.conv1_config['stride'],
                      padding=self.conv1_config['padding']),
            nn.SELU()
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels=64,
                      kernel_size=self.conv2_config['kernel_size'],
                      stride=self.conv2_config['stride'],
                      padding=self.conv2_config['padding']),
            nn.SELU()
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=128,
                      kernel_size=self.conv3_config['kernel_size'],
                      stride=self.conv3_config['stride'],
                      padding=self.conv3_config['padding']),
            nn.SELU()
        )
        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=out_channels,
                      kernel_size=self.conv4_config['kernel_size'],
                      stride=self.conv4_config['stride'],
                      padding=self.conv4_config['padding']),
            nn.SELU()
        )

    def forward(self, x):
        conv1_out = self.conv1(x)
        conv2_out = self.conv2(conv1_out)
        conv3_out = self.conv3(conv2_out)
        conv4_out = self.conv4(conv3_out)
        return conv1_out, conv2_out, conv3_out, conv4_out


class ConvLSTMUnit(nn.Module):
    def __init__(self):
        super(ConvLSTMUnit, self).__init__()
        self.conv1_lstm = ConvLSTM(input_channels=32, hidden_channels=[32],
                                   kernel_size=3, step=5, effective_step=[4])
        self.conv2_lstm = ConvLSTM(input_channels=64, hidden_channels=[64],
                                   kernel_size=3, step=5, effective_step=[4])
        self.conv3_lstm = ConvLSTM(input_channels=128, hidden_channels=[128],
                                   kernel_size=3, step=5, effective_step=[4])
        self.conv4_lstm = ConvLSTM(input_channels=256, hidden_channels=[256],
                                   kernel_size=3, step=5, effective_step=[4])

    def forward(self, conv1_out, conv2_out,
                conv3_out, conv4_out):
        conv1_lstm_out = self.conv1_lstm(conv1_out)
        conv1_lstm_out = attention(conv1_lstm_out[0][0])
        conv2_lstm_out = self.conv2_lstm(conv2_out)
        conv2_lstm_out = attention(conv2_lstm_out[0][0])
        conv3_lstm_out = self.conv3_lstm(conv3_out)
        conv3_lstm_out = attention(conv3_lstm_out[0][0])
        conv4_lstm_out = self.conv4_lstm(conv4_out)
        conv4_lstm_out = attention(conv4_lstm_out[0][0])
        return conv1_lstm_out.unsqueeze(0), conv2_lstm_out.unsqueeze(0), conv3_lstm_out.unsqueeze(
            0), conv4_lstm_out.unsqueeze(0)


class CnnDecoder(nn.Module):
    def __init__(self, input_size: int, in_channels: int, out_channels: int):
        self.input_size = input_size
        super(CnnDecoder, self).__init__()
        self.deconv1_config = {
            'kernel_size': (3, 3),
            'stride': (1, 1),
            'padding': (1, 1),
        }
        self.deconv2_config = {
            'kernel_size': (3, 3),
            'stride': (2, 2),
            'padding': (1, 1),
            'output_padding': (0, 0)
        }
        self.deconv3_config = {
            'kernel_size': (2, 2),
            'stride': (2, 2),
            'padding': (1, 1),
            'output_padding': (0, 0)
        }
        self.deconv4_config = {
            'kernel_size': (2, 2),
            'stride': (2, 2),
            'padding': (1, 1),
            'output_padding': (0, 0)
        }
        self.deconv1_config['size'] = int((input_size + 2 * self.deconv1_config['padding'][0] -
                                           self.deconv1_config['kernel_size'][0]) / self.deconv1_config['stride'][0] +
                                          1)
        self.deconv2_config['size'] = int((self.deconv1_config['size'] + 2 * self.deconv2_config['padding'][0] -
                                           self.deconv2_config['kernel_size'][0]) / self.deconv2_config['stride'][0] +
                                          1)
        self.deconv3_config['size'] = int((self.deconv2_config['size'] + 2 * self.deconv3_config['padding'][0] -
                                           self.deconv3_config['kernel_size'][0]) / self.deconv3_config['stride'][0] +
                                          1)
        self.deconv4_config['size'] = int((self.deconv3_config['size'] + 2 * self.deconv4_config['padding'][0] -
                                           self.deconv4_config['kernel_size'][0]) / self.deconv4_config['stride'][0] +
                                          1)
        self.deconv4_config['up_size'] = (self.deconv4_config['size'] - 1) * self.deconv4_config['stride'][0] - 2 * \
            self.deconv4_config['padding'][0] + \
            self.deconv4_config['kernel_size'][0]
        self.deconv3_config['up_size'] = (self.deconv3_config['size'] - 1) * self.deconv3_config['stride'][0] - 2 * \
            self.deconv3_config['padding'][0] + \
            self.deconv3_config['kernel_size'][0]
        self.deconv2_config['up_size'] = (self.deconv2_config['size'] - 1) * self.deconv2_config['stride'][0] - 2 * \
            self.deconv2_config['padding'][0] + \
            self.deconv2_config['kernel_size'][0]
        self.deconv1_config['up_size'] = (self.deconv1_config['size'] - 1) * self.deconv1_config['stride'][0] - 2 * \
            self.deconv1_config['padding'][0] + \
            self.deconv1_config['kernel_size'][0]
        if self.deconv4_config['up_size'] != self.deconv3_config['size']:
            self.deconv4_config['output_padding'] = (1, 1)
        if self.deconv3_config['up_size'] != self.deconv2_config['size']:
            self.deconv3_config['output_padding'] = (1, 1)
        if self.deconv2_config['up_size'] != self.deconv1_config['size']:
            self.deconv2_config['output_padding'] = (1, 1)
        self.deconv4 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=in_channels, out_channels=128,
                               kernel_size=self.deconv4_config['kernel_size'],
                               stride=self.deconv4_config['stride'],
                               padding=self.deconv4_config['padding'],
                               output_padding=self.deconv4_config['output_padding']),
            nn.SELU()
        )
        self.deconv3 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=256, out_channels=64,
                               kernel_size=self.deconv3_config['kernel_size'],
                               stride=self.deconv3_config['stride'],
                               padding=self.deconv3_config['padding'],
                               output_padding=self.deconv3_config['output_padding']),
            nn.SELU()
        )
        self.deconv2 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=128, out_channels=32,
                               kernel_size=self.deconv2_config['kernel_size'],
                               stride=self.deconv2_config['stride'],
                               padding=self.deconv2_config['padding'],
                               output_padding=self.deconv2_config['output_padding']),
            nn.SELU()
        )
        self.deconv1 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=64, out_channels=out_channels,
                               kernel_size=self.deconv1_config['kernel_size'],
                               stride=self.deconv1_config['stride'],
                               padding=self.deconv1_config['padding'],
                               output_padding=(0, 0)),
            nn.SELU()
        )

    def forward(self, conv1_lstm_out, conv2_lstm_out, conv3_lstm_out, conv4_lstm_out):
        decoder_conv4 = self.deconv4(conv4_lstm_out)
        decoder_conv4_concat = torch.cat(
            (decoder_conv4, conv3_lstm_out), dim=1)
        decoder_conv3 = self.deconv3(decoder_conv4_concat)
        decoder_conv3_concat = torch.cat(
            (decoder_conv3, conv2_lstm_out), dim=1)
        decoder_conv2 = self.deconv2(decoder_conv3_concat)
        decoder_conv2_concat = torch.cat(
            (decoder_conv2, conv1_lstm_out), dim=1)
        decoder_conv1 = self.deconv1(decoder_conv2_concat)
        return decoder_conv1


class MSCRED(nn.Module):
    def __init__(self, input_size, in_channels_encoder, in_channels_decoder):
        super(MSCRED, self).__init__()
        self.input_size = input_size
        self.name = 'MSCRED'
        self.cnn_encoder = CnnEncoder(input_size=input_size,
                                      in_channels=in_channels_encoder,
                                      out_channels=in_channels_decoder)
        self.conv_lstm = ConvLSTMUnit()
        self.cnn_decoder = CnnDecoder(input_size=input_size,
                                      in_channels=in_channels_decoder,
                                      out_channels=in_channels_encoder)

    def forward(self, x):
        x = generate_signature_matrix_node(x)
        conv1_out, conv2_out, conv3_out, conv4_out = self.cnn_encoder(x)
        conv1_lstm_out, conv2_lstm_out, conv3_lstm_out, conv4_lstm_out = self.conv_lstm(
            conv1_out, conv2_out, conv3_out, conv4_out)

        gen_x = self.cnn_decoder(conv1_lstm_out, conv2_lstm_out,
                                 conv3_lstm_out, conv4_lstm_out)
        # just for fun :-)
        return 'nothing', gen_x


def compute_mscred_loss(model, x, **kwargs):
    matrix = generate_signature_matrix_node(x)
    # because model(x) returns ('nothing', gen_x), we need to extract gen_x by using model(x)[1]
    return torch.mean((model(x)[1] - matrix[-1].unsqueeze(0)) ** 2)


def mscred_y_pred_func(model: nn.Module, data, **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
    x = data[0].to(device)

    y_true = data[1].int().to(device)[-1]
    _, out = model(x)

    x = generate_signature_matrix_node(x)
    out, x = out.view(out.shape[0], -1), x.view(x.shape[0], -1)
    return torch.unsqueeze(torch.pairwise_distance(out, x[-1]), dim=-1), y_true


def get_optimizer(model):
    return torch.optim.Adam(model.parameters(), lr=0.0002)


def build_mscred_model(input_size, **kwargs):
    model = MSCRED(input_size=input_size, in_channels_encoder=1,
                   in_channels_decoder=256).to(device)
    optimizer = get_optimizer(model)
    return model, optimizer


def train_and_test_mscred_model(normal_dataloader: tud.DataLoader,
                                attack_dataloader: tud.DataLoader,
                                epochs: int,
                                dataset_name: str,
                                data_dir: str,
                                continue_train: bool) -> AnomalyConfusionMatrix:
    return train_and_test_model(model_name='MSCRED',
                                compute_loss=compute_mscred_loss,
                                normal_dl=normal_dataloader,
                                attack_dl=attack_dataloader,
                                epochs=epochs,
                                test_pred_func=mscred_y_pred_func,
                                is_recur=True,
                                continue_train=continue_train,
                                build_model=build_mscred_model,
                                dataset=dataset_name,
                                data_dir=data_dir)


def generate_signature_matrix_node(data: torch.Tensor) -> torch.Tensor:
    # We assume that data tensor size is (batch_size, time_len, feature_size) whereas feature_size is num of sensors
    # b -> batch, t -> time, i and j -> sensor ID
    matrix = torch.einsum('bti,btj->bij', [data, data]) / data.shape[1]
    matrix = matrix.unsqueeze(1)
    return matrix


if __name__ == "__main__":
    normal_dl, attack_dl = create_dataloader(ds_path='./data/SMD/machine-1-1/',
                                             timemode=False,
                                             batch_size=64,
                                             reverse_label=False)

    train_and_test_mscred_model(normal_dataloader=normal_dl,
                                attack_dataloader=attack_dl,
                                epochs=1,
                                dataset_name='SMD/machine-1-1',
                                continue_train=False)
