#
# Created on March 2022
#
# Copyright (c) 2022 Meitar Ronen
#

import torch
import torch.nn as nn
from collections import OrderedDict
import math


class AutoEncoder(nn.Module):
    def __init__(self, args, input_dim):
        super(AutoEncoder, self).__init__()
        self.args = args
        self.input_dim = input_dim
        self.output_dim = input_dim
        self.latent_dim = args.latent_dim
        self.hidden_dims = args.hidden_dims
        self.hidden_dims.append(self.latent_dim)
        self.dims_list = (
            args.hidden_dims + args.hidden_dims[:-1][::-1]
        )  # mirrored structure
        self.n_layers = len(self.dims_list)
        self.n_clusters = args.n_clusters

        # Validation check
        assert self.n_layers % 2 > 0
        assert self.dims_list[self.n_layers // 2] == args.latent_dim

        # Encoder Network
        layers = OrderedDict()
        for idx, hidden_dim in enumerate(self.hidden_dims):
            if idx == 0:
                layers.update(
                    {
                        "linear0": nn.Linear(self.input_dim, hidden_dim),
                        "activation0": nn.ReLU(),
                    }
                )
            else:
                layers.update(
                    {
                        "linear{}".format(idx): nn.Linear(
                            self.hidden_dims[idx - 1], hidden_dim
                        ),
                        "activation{}".format(idx): nn.ReLU(),
                        "bn{}".format(idx): nn.BatchNorm1d(self.hidden_dims[idx]),
                    }
                )
        self.encoder = nn.Sequential(layers)

        # Decoder Network
        layers = OrderedDict()
        tmp_hidden_dims = self.hidden_dims[::-1]
        for idx, hidden_dim in enumerate(tmp_hidden_dims):
            if idx == len(tmp_hidden_dims) - 1:
                layers.update(
                    {
                        "linear{}".format(idx): nn.Linear(hidden_dim, self.output_dim),
                    }
                )
            else:
                layers.update(
                    {
                        "linear{}".format(idx): nn.Linear(
                            hidden_dim, tmp_hidden_dims[idx + 1]
                        ),
                        "activation{}".format(idx): nn.ReLU(),
                        "bn{}".format(idx): nn.BatchNorm1d(tmp_hidden_dims[idx + 1]),
                    }
                )
        self.decoder = nn.Sequential(layers)

    def __repr__(self):
        repr_str = "[Structure]: {}-".format(self.input_dim)
        for idx, dim in enumerate(self.dims_list):
            repr_str += "{}-".format(dim)
        repr_str += str(self.output_dim) + "\n"
        repr_str += "[n_layers]: {}".format(self.n_layers) + "\n"
        repr_str += "[n_clusters]: {}".format(self.n_clusters) + "\n"
        repr_str += "[input_dims]: {}".format(self.input_dim)
        return repr_str

    def __str__(self):
        return self.__repr__()

    def forward(self, X, latent=False):
        output = self.encoder(X)
        if latent:
            return output
        return self.decoder(output)

    def decode(self, latent_X):
        return self.decoder(latent_X)


class Flatten(torch.nn.Module):
    def forward(self, x):
        return x.view(x.size()[0], -1).float()


class UnFlatten(torch.nn.Module):

    def __init__(self, channel, width) -> None:
        super().__init__()
        self.channel = channel
        self.width = width

    def forward(self, x):
        return x.reshape(-1, self.channel, self.width, self.width)


class ConvAutoEncoder(nn.Module):
    def __init__(self, args, input_dim):
        super(ConvAutoEncoder, self).__init__()
        self.args = args
        self.input_dim = input_dim
        self.output_dim = self.input_dim
        self.latent_dim = args.latent_dim

        # encoder #
        self.encoder_conv = nn.Sequential(
            UnFlatten(channel=1, width=16),                       # [batch, 1, 16, 16]
            nn.Conv2d(1, 32, 5, stride=1),                       # [batch, 32, 12, 12]
            nn.BatchNorm2d(32),                                  # [batch, 32, 12, 12]
            nn.ReLU(),

        )
        self.encoder_maxPool = nn.MaxPool2d(2, stride=2, return_indices=True)  # [batch, 32, 6, 6]
        self.encoder_linear = nn.Sequential(
            Flatten(),                                           # [batch, 1152]
            nn.Linear(32 * 6 * 6, self.latent_dim)
        )

        # decoder #
        self.decoder_linear = nn.Sequential(
            nn.Linear(self.latent_dim, 32 * 6 * 6),
            UnFlatten(channel=32, width=6),
        )                                                       # [batch, 32, 6, 6]
        self.decoder_maxPool = nn.MaxUnpool2d(2, stride=2)      # [batch, 32, 12, 12]
        self.decoder_conv = nn.Sequential(
            nn.ReLU(),
            nn.BatchNorm2d(32),
            nn.ConvTranspose2d(32, 1, 5, stride=1),              # [batch, 1, 16, 16]
            Flatten()
        )

    def forward(self, X, latent=False):
        output = self.encode(X)
        if latent:
            return output
        return self.decode(output)

    def encode(self, X):
        out = self.encoder_conv(X)
        out, self.ind = self.encoder_maxPool(out)
        return self.encoder_linear(out)

    def decode(self, X):
        out = self.decoder_linear(X)
        out = self.decoder_maxPool(out, self.ind)
        return self.decoder_conv(out)

class DFConvAE(nn.Module):
    def __init__(self, single_data_length=5000, latent_dim=10, kernel_size=8, alpha_=1.0, dropout=0.1):
        super(DFConvAE, self).__init__()
        
        self.single_data_length = single_data_length
        self.in_channels = 1
        self.latent_dim = latent_dim
        
        self.kernel_size = kernel_size
        self.conv_stride = 1
        self.pool_stride = 4
        self.padding = 1
        self.alpha_ = alpha_
        self.dropout = dropout
        
        self.filter = [32, 64, 128, 256]
        # This parameter would chane if the input size is different
        self.flatten_dim = 128 * 75
         
        # Encoder
        self.Conv1 = nn.Sequential(
            nn.Conv1d(in_channels=self.in_channels, out_channels=self.filter[0],
                      kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
            nn.BatchNorm1d(self.filter[0]),
            nn.ELU(alpha=self.alpha_)
        )
        
        self.Maxpool1 = nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.pool_stride, return_indices=True)
        self.Dropout1 = nn.Dropout(p=self.dropout)
        
        self.Conv2 = nn.Sequential(     
            nn.Conv1d(in_channels=self.filter[0], out_channels=self.filter[1], 
                      kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
            nn.BatchNorm1d(self.filter[1]),
            nn.ELU(alpha=self.alpha_)
        )
        
        self.Maxpool2 = nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.pool_stride, return_indices=True)
        self.Dropout2 = nn.Dropout(p=self.dropout)
        
        self.Conv3 = nn.Sequential(
            nn.Conv1d(in_channels=self.filter[1], out_channels=self.filter[2], 
                      kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
            nn.BatchNorm1d(self.filter[2]),
            nn.ELU(alpha=self.alpha_)
        )
        
        self.Maxpool3 = nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.pool_stride, return_indices=True)
        self.Dropout3 = nn.Dropout(p=self.dropout)
        
        self.Dropout4 = nn.Dropout(p=self.dropout)
        
        # Embedding & Dense
        self.Linear_encode = nn.Sequential(
            nn.Flatten(),
            nn.Linear(in_features=self.flatten_dim, out_features=self.latent_dim),
            nn.Dropout(p=self.dropout)
        )
        
        self.Linear_decode = nn.Sequential(
            nn.Linear(in_features=self.latent_dim, out_features=self.flatten_dim),
            nn.Unflatten(dim=1, unflattened_size=(self.filter[2], self.flatten_dim//self.filter[2]))
        )
        
        # Decoder
        self.UnMaxpool3 = nn.MaxUnpool1d(kernel_size=self.kernel_size, stride=self.pool_stride)
        
        self.UnConv3 = nn.Sequential(
            nn.ELU(alpha=self.alpha_),
            nn.BatchNorm1d(self.filter[2]),
            nn.ConvTranspose1d(in_channels=self.filter[2], out_channels=self.filter[1], 
                               kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
        )
        
        self.UnMaxpool2 = nn.MaxUnpool1d(kernel_size=self.kernel_size, stride=self.pool_stride)
        
        self.UnConv2 = nn.Sequential(
            nn.ELU(alpha=self.alpha_),
            nn.BatchNorm1d(self.filter[1]),
            nn.ConvTranspose1d(in_channels=self.filter[1], out_channels=self.filter[0], 
                               kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
        )
        
        self.UnMaxpool1 = nn.MaxUnpool1d(kernel_size=self.kernel_size, stride=self.pool_stride)
        
        self.UnConv1 = nn.Sequential(
            nn.ELU(alpha=self.alpha_),
            nn.BatchNorm1d(self.filter[0]),
            nn.ConvTranspose1d(in_channels=self.filter[0], out_channels=self.in_channels, 
                               kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
        )
        
    def forward(self, x, latent=False):
        latent_x = self.Encoder(x)
        if latent:
            return latent_x
        
        x = self.Decoder(latent_x)

        return x
    
    def encoder(self, x):
        x = self.Conv1(x)
        # print(x.shape)
        x, self.indices1 = self.Maxpool1(x)
        # print(x.shape)
        x = self.Dropout1(x)
        
        x = self.Conv2(x)
        # print(x.shape)
        x, self.indices2 = self.Maxpool2(x)
        # print(x.shape)
        x = self.Dropout2(x)
        
        x = self.Conv3(x)
        # print(x.shape)
        x, self.indices3 = self.Maxpool3(x)
        # print(x.shape)
        x = self.Dropout3(x)
        
        # print(x.shape)
        x = self.Linear_encode(x)
        # print(x.shape)
                
        return x
    
    def decoder(self, x):     
        x= self.Linear_decode(x)
        # print(x.shape)

        # print(x.shape)
        if x.shape[2] < self.indices3.shape[2]:
            x = nn.functional.pad(x, (0, self.indices3.shape[2] - x.shape[2]))
            
        x = self.UnMaxpool3(x, self.indices3)
        # print(x.shape)
        x = self.UnConv3(x)
        # print(x.shape)
        
        if x.shape[2] < self.indices2.shape[2]:
            # padding至和indice2一样长
            x = nn.functional.pad(x, (0, self.indices2.shape[2] - x.shape[2]))
            
        x = self.UnMaxpool2(x, self.indices2)
        # print(x.shape)
        x = self.UnConv2(x)
        # print(x.shape)
        
        if x.shape[2] < self.indices1.shape[2]:
            x = nn.functional.pad(x, (0, self.indices1.shape[2] - x.shape[2]))

        x = self.UnMaxpool1(x, self.indices1)
        # print(x.shape)
        x = self.UnConv1(x)
        # print(x.shape)
        x = nn.functional.pad(x, (0, self.single_data_length - x.shape[2]))
        return x