from model.meta import nn
from torch.nn import functional as F
import torch
class ResNetVAE(nn.Module):
    def __init__(self, window_len, num_channels,num_classes, out_channels=64, latent_dim=128):
        super(ResNetVAE, self).__init__()
        self.feature = None
        
        self.latent_dim = latent_dim
        
        self.encoder1 = nn.Sequential(
            nn.Conv2d(in_channels=num_channels, out_channels=out_channels, kernel_size=(6, 1), stride=(1, 1), padding=(0, 0)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True),
            nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=(3, 1), stride=(1, 1), padding=(0, 0)),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )
        
        
        
        self.encoder2 = nn.Sequential(
            nn.Conv2d(in_channels=out_channels, out_channels=out_channels*2, kernel_size=(6, 1), stride=(1, 1), padding=(0, 0)),
            nn.BatchNorm2d(out_channels*2),
            nn.ReLU(True),
            nn.Conv2d(in_channels=out_channels*2, out_channels=out_channels*2, kernel_size=(3, 1), stride=(1, 1), padding=(0, 0)),
            nn.BatchNorm2d(out_channels*2),
            nn.ReLU(True)
        )
        
        
        
        self.encoder3 = nn.Sequential(
            nn.Conv2d(in_channels=out_channels*2, out_channels=out_channels*4, kernel_size=(3, 1), stride=(1, 1), padding=(0, 0)),
            nn.BatchNorm2d(out_channels*4),
            nn.ReLU(True),
            nn.Conv2d(in_channels=out_channels*4, out_channels=out_channels*4, kernel_size=(3, 1), stride=(1, 1), padding=(0, 0)),
            nn.BatchNorm2d(out_channels*4),
            nn.ReLU(True)
        )
        window_len = (window_len - 6 ) // 1 + 1
        window_len = (window_len - 3) // 1 + 1
        window_len = (window_len - 6 ) // 1 + 1
        window_len = (window_len - 3 ) // 1 + 1
        window_len = (window_len - 3) // 1 + 1
        window_len = (window_len - 3 ) // 1 + 1
        
        # Reparam
        self.fc_mean = nn.Linear(out_channels*4*window_len, self.latent_dim)
        self.fc_std = nn.Linear(out_channels*4*window_len, self.latent_dim)
        self.num_channels = num_channels
        self.out_channels = out_channels
        self.decoder_input = nn.Linear(self.latent_dim, out_channels*4*window_len)
        
        self.decoder3 = nn.Sequential(
            nn.ConvTranspose2d(out_channels*4, out_channels*4, kernel_size=(3,1), stride =(1,1), padding=(0, 0),output_padding=(0,0)), 
            nn.BatchNorm2d(out_channels*4),
            nn.ReLU(),
            nn.ConvTranspose2d(out_channels*4, out_channels*2, kernel_size=(3,1), stride =(1,1), padding=(0, 0),output_padding=(0,0)), 
            nn.BatchNorm2d(out_channels*2),
            nn.ReLU()
        )
        
        
        self.decoder2 = nn.Sequential(
            nn.ConvTranspose2d(out_channels*2, out_channels*2, kernel_size=(3,1), stride =(1,1), padding=(0, 0),output_padding=(0,0)), 
            nn.BatchNorm2d(out_channels*2),
            nn.ReLU(),
            nn.ConvTranspose2d(out_channels*2, out_channels, kernel_size=(6,1), stride =(1,1), padding=(0, 0),output_padding=(0,0)), 
            nn.BatchNorm2d(out_channels),
            nn.ReLU()
        )
        
        self.decoder1 = nn.Sequential(
            nn.ConvTranspose2d(out_channels, out_channels, kernel_size=(3,1), stride =(1,1), padding=(0, 0),output_padding=(0,0)), 
            nn.BatchNorm2d(out_channels),
            nn.ReLU(),
            nn.ConvTranspose2d(out_channels, 1, kernel_size=(6,1), stride =(1,1), padding=(0, 0),output_padding=(0,0)), 
            nn.BatchNorm2d(1),
            nn.ReLU()
        )
        self.fc = nn.Sequential(
            nn.Linear(self.latent_dim, num_classes)
        )
        
        # Reparam
        self.x, self.re_x, self.mean, self.std =0,0,0,0       
    def get_feature(self):
        return self.feature
    def encode(self, x, meta_loss):
        x = self.encoder1(x, meta_loss) #21
        x = self.encoder2(x, meta_loss) # 6
        x = self.encoder3(x, meta_loss) # 2
        x = torch.flatten(x, start_dim=1) 
        mean = self.fc_mean(x)
        std = self.fc_std(x)
        return mean, std
    def decode(self, z):
        """
        Maps the given latent codes
        onto the image space.
        :param z: (Tensor) [B x D]
        :return: (Tensor) [B x C x H x W]
        """
        z = self.decoder_input(z)
        z = z.view(z.shape[0], self.out_channels*4, -1, 1)
        z = self.decoder3(z)
        z = self.decoder2(z)
        re_x = self.decoder1(z)
        return re_x
    def reparameterize(self, mean, std):
        """
        Reparameterization trick to sample from N(mu, var) from
        N(0,1).
        :param mu: (Tensor) Mean of the latent Gaussian [B x D]
        :param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
        :return: (Tensor) [B x D]
        """
        std = torch.exp(0.5 * std)
        standard_distribution = torch.randn_like(std) # Standard Distribution
        return standard_distribution * std + mean
    def loss_function(self) -> dict:
        recons,input,mu,log_var = self.re_x,self.x, self.mean,self.std
        
        recons_loss =F.mse_loss(recons, input)
        kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)

        return recons_loss, kld_loss
    def forward(self, x, meta_loss=None):
        x = x.unsqueeze(1)
        x = x.permute(0,3,2,1)
        self.x = x
        self.mean, self.std = self.encode(x, meta_loss)
        z = self.reparameterize(self.mean, self.std)
        
        out = z.view(z.size(0), -1)
        out = self.fc(out)
        out = nn.LayerNorm(out.size())(out.cpu())
        out = out.cuda()
        if meta_loss is not None:
            with torch.no_grad():
                re_block_feature = self.decode(z)
        else:
            re_block_feature = self.decode(z)
        self.re_x = re_block_feature
        return out
if __name__ == '__main__':
    import torch
    x = torch.rand(8, 64, 113)
    model = ResNetVAE(64, 113, 18)
    _, (f, rf, mean, std) = model(x)
    print(model.loss_function(f, rf, mean, std))