import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np

class DFConvAE(nn.Module):
    def __init__(self, single_data_length=5000, latent_dim=10, kernel_size=8, alpha_=1.0, dropout=0.1):
        super(DFConvAE, self).__init__()
        
        self.single_data_length = single_data_length
        self.in_channels = 1
        self.latent_dim = latent_dim
        
        self.kernel_size = kernel_size
        self.conv_stride = 1
        self.pool_stride = 4
        self.padding = 1
        self.alpha_ = alpha_
        self.dropout = dropout
        
        self.filter = [32, 64, 128, 256]
        # This parameter would chane if the input size is different
        self.flatten_dim = 128 * 75
         
        # Encoder
        self.Conv1 = nn.Sequential(
            nn.Conv1d(in_channels=self.in_channels, out_channels=self.filter[0],
                      kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
            nn.BatchNorm1d(self.filter[0]),
            nn.ELU(alpha=self.alpha_)
        )
        
        self.Maxpool1 = nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.pool_stride, return_indices=True)
        self.Dropout1 = nn.Dropout(p=self.dropout)
        
        self.Conv2 = nn.Sequential(     
            nn.Conv1d(in_channels=self.filter[0], out_channels=self.filter[1], 
                      kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
            nn.BatchNorm1d(self.filter[1]),
            nn.ELU(alpha=self.alpha_)
        )
        
        self.Maxpool2 = nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.pool_stride, return_indices=True)
        self.Dropout2 = nn.Dropout(p=self.dropout)
        
        self.Conv3 = nn.Sequential(
            nn.Conv1d(in_channels=self.filter[1], out_channels=self.filter[2], 
                      kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
            nn.BatchNorm1d(self.filter[2]),
            nn.ELU(alpha=self.alpha_)
        )
        
        self.Maxpool3 = nn.MaxPool1d(kernel_size=self.kernel_size, stride=self.pool_stride, return_indices=True)
        self.Dropout3 = nn.Dropout(p=self.dropout)
        
        self.Dropout4 = nn.Dropout(p=self.dropout)
        
        # Embedding & Dense
        self.Linear_encode = nn.Sequential(
            nn.Flatten(),
            nn.Linear(in_features=self.flatten_dim, out_features=self.latent_dim),
            nn.Dropout(p=self.dropout)
        )
        
        self.Linear_decode = nn.Sequential(
            nn.Linear(in_features=self.latent_dim, out_features=self.flatten_dim),
            nn.Unflatten(dim=1, unflattened_size=(self.filter[2], self.flatten_dim//self.filter[2]))
        )
        
        # Decoder
        self.UnMaxpool3 = nn.MaxUnpool1d(kernel_size=self.kernel_size, stride=self.pool_stride)
        
        self.UnConv3 = nn.Sequential(
            nn.ELU(alpha=self.alpha_),
            nn.BatchNorm1d(self.filter[2]),
            nn.ConvTranspose1d(in_channels=self.filter[2], out_channels=self.filter[1], 
                               kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
        )
        
        self.UnMaxpool2 = nn.MaxUnpool1d(kernel_size=self.kernel_size, stride=self.pool_stride)
        
        self.UnConv2 = nn.Sequential(
            nn.ELU(alpha=self.alpha_),
            nn.BatchNorm1d(self.filter[1]),
            nn.ConvTranspose1d(in_channels=self.filter[1], out_channels=self.filter[0], 
                               kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
        )
        
        self.UnMaxpool1 = nn.MaxUnpool1d(kernel_size=self.kernel_size, stride=self.pool_stride)
        
        self.UnConv1 = nn.Sequential(
            nn.ELU(alpha=self.alpha_),
            nn.BatchNorm1d(self.filter[0]),
            nn.ConvTranspose1d(in_channels=self.filter[0], out_channels=self.in_channels, 
                               kernel_size=self.kernel_size, stride=self.conv_stride, padding=self.padding),
        )
        
    def forward(self, x, latent=False):
        latent_x = self.Encoder(x)
        if latent:
            return latent_x
        
        x = self.Decoder(latent_x)

        return x
    
    def Encoder(self, x):
        x = self.Conv1(x)
        # print(x.shape)
        x, self.indices1 = self.Maxpool1(x)
        # print(x.shape)
        x = self.Dropout1(x)
        
        x = self.Conv2(x)
        # print(x.shape)
        x, self.indices2 = self.Maxpool2(x)
        # print(x.shape)
        x = self.Dropout2(x)
        
        x = self.Conv3(x)
        # print(x.shape)
        x, self.indices3 = self.Maxpool3(x)
        # print(x.shape)
        x = self.Dropout3(x)
        
        # print(x.shape)
        x = self.Linear_encode(x)
        # print(x.shape)
                
        return x
    
    def Decoder(self, x):     
        x= self.Linear_decode(x)
        # print(x.shape)

        # print(x.shape)
        if x.shape[2] < self.indices3.shape[2]:
            x = nn.functional.pad(x, (0, self.indices3.shape[2] - x.shape[2]))
            
        x = self.UnMaxpool3(x, self.indices3)
        # print(x.shape)
        x = self.UnConv3(x)
        # print(x.shape)
        
        if x.shape[2] < self.indices2.shape[2]:
            # padding至和indice2一样长
            x = nn.functional.pad(x, (0, self.indices2.shape[2] - x.shape[2]))
            
        x = self.UnMaxpool2(x, self.indices2)
        # print(x.shape)
        x = self.UnConv2(x)
        # print(x.shape)
        
        if x.shape[2] < self.indices1.shape[2]:
            x = nn.functional.pad(x, (0, self.indices1.shape[2] - x.shape[2]))

        x = self.UnMaxpool1(x, self.indices1)
        # print(x.shape)
        x = self.UnConv1(x)
        # print(x.shape)
        x = nn.functional.pad(x, (0, self.single_data_length - x.shape[2]))
        return x
            

class LinearAE(nn.Module):
    def __init__(self, input_dims=4096, latent_dim=128, alpha_=1.0):
        super(LinearAE, self).__init__()
        
        self.input_dim = input_dims
        self.latent_dim = latent_dim
        self.alpha_ = alpha_
        
        self. hidden_dims = [1024, 512, 256, self.latent_dim]
        
        # Encoder
        layers = OrderedDict()
        
        for idx, hidden_dim in enumerate(self.hidden_dims):
            if idx == 0:
                layers.update({
                    "Linear_{}".format(idx): nn.Linear(in_features=self.input_dim, out_features=hidden_dim),
                    "Activation_{}".format(idx): nn.ELU(alpha=self.alpha_),
                })
            else:
                layers.update({
                    "Linear_{}".format(idx): nn.Linear(in_features=self.hidden_dims[idx - 1], out_features=hidden_dim),
                    "Activation_{}".format(idx): nn.ELU(alpha=self.alpha_),
                    "BatchNorm_{}".format(idx): nn.BatchNorm1d(hidden_dim),
                    
                    "Dropout_{}".format(idx): nn.Dropout(p=0.2),
                })
                
        self.Encoder = nn.Sequential(layers)
        # LA LAB LAB LAB
        
        # Decoder
        layers = OrderedDict()
        
        tmp_hidden_dims = self.hidden_dims[::-1]
        
        for idx, hidden_dim in enumerate(tmp_hidden_dims):        
            
            if idx == len(tmp_hidden_dims) - 1:
                layers.update({
                    "Linear_{}".format(idx): nn.Linear(in_features=hidden_dim, out_features=self.input_dim),
                })
            elif idx == len(tmp_hidden_dims) - 2:
                layers.update({
                    "Linear_{}".format(idx): nn.Linear(in_features=hidden_dim, out_features=tmp_hidden_dims[idx + 1]),
                    "Activation_{}".format(idx): nn.ELU(alpha=self.alpha_),
                })
            else:
                layers.update({
                    "Linear_{}".format(idx): nn.Linear(in_features=hidden_dim, out_features=tmp_hidden_dims[idx + 1]),
                    "BatchNorm_{}".format(idx): nn.BatchNorm1d(tmp_hidden_dims[idx + 1]),
                    "Activation_{}".format(idx): nn.ELU(alpha=self.alpha_),
                })
                
        self.Decoder = nn.Sequential(layers)
        # LBA LBA LA L
        
    def forward(self, x, latent=False):
        latent_x = self.Encoder(x)
        if latent:
            return latent_x
        
        x = self.Decoder(latent_x)

        return x


def AE_test(test_obj="DF"):
    sample_tensor = torch.randn(5, 5000)
    
    if test_obj == "DF":
        sample_tensor = torch.Tensor(np.expand_dims(sample_tensor, axis=1))
        
        print(sample_tensor.shape[1])
        AE = DFConvAE(single_data_length=5000, latent_dim=10, kernel_size=8, alpha_=1.0)
        
    elif test_obj == "Linear":
        AE = LinearAE(input_dims=5000, latent_dim=128, alpha_=1.0)
    
    # print(sample_tensor.shape)
    
    # Running place
    latent = AE(sample_tensor, latent=True)
            
    print(latent.shape)
    
    
    print("=============================")
    output = AE(sample_tensor, latent = False)

    print(output.shape)
        
        
if __name__ == "__main__":
    AE_test(test_obj="DF")