"""
write by 谦谦君
使用卷积神经网络的变形自动编码器体系结构
"""

import torch
from autoencoder.parameter import Parameter
import torch.nn as nn
from autoencoder.utils import Mixer,Warper,GridSpatialIntegral

######### 编码器体系结构  ###################
class Encoders(nn.Module):
    def __init__(self,parm:Parameter):
        """
        创建卷积编码器体系结构
        :param parm: 超参数集合
        """
        super(Encoders, self).__init__()
        self.encoder=Encoder(parm,channel=parm.channel,ndf=parm.ndf,ndim=parm.zdim)
        self.zImixer=Mixer(parm,in_channel=parm.zdim,out_channel=parm.idim)
        self.zWmixer=Mixer(parm,in_channel=parm.zdim,out_channel=parm.wdim)

    def forward(self, x:Parameter):
        """
        :param x:
        :return:
        """
        self.z=self.encoder(x)
        print(self.z.shape)
        exit(0)
        self.zImg=self.zImixer(self.z)
        self.zWarp=self.zWmixer(self.z)

        return self.z,self.zImg,self.zWarp

class Encoder(nn.Module):
    def __init__(self,parm:Parameter,channel:int,ndf:int,ndim:int):
        """
        卷积神经网络编码器体系结构
        :param parm:  超参数集合
        :param channel:  初始的输入通道数目
        :param ndf:  建立卷积神经网络的结构
        :param ndim:  最终卷积神经网络的输出通道数目
        """
        super(Encoder, self).__init__()
        self.ndim=ndim
        self.net=nn.Sequential(
            nn.Conv2d(in_channels=channel,out_channels=ndf,kernel_size=4,stride=2,padding=1,bias=False),
            nn.LeakyReLU(0.2,False),

            nn.Conv2d(in_channels=ndf,out_channels=ndf*2,kernel_size=4,stride=2,padding=1,bias=False),
            nn.InstanceNorm2d(ndf*2),
            nn.LeakyReLU(0.2,False),

            nn.Conv2d(ndf*2,ndf*4,4,2,1,bias=False),
            nn.InstanceNorm2d(ndf*4),
            nn.LeakyReLU(0.2,False),

            nn.Conv2d(ndf*4,ndf*8,4,2,1,bias=False),
            nn.InstanceNorm2d(ndf*8),
            nn.LeakyReLU(0.2,False),

            nn.Conv2d(ndf*8,ndim,4,4,0,bias=False),
            nn.Sigmoid()
        )

    def forward(self, x:torch.Tensor):
        """
        前向函数
        :param x: 输入 tensor
        :return:
        """
        output=self.net(x).view(-1,self.ndim)
        return output

####################### 解码器体系结构  ######################
class Decoders(nn.Module):
    def __init__(self,parm:Parameter):
        """
        使用卷积神经网络体系结构的解码器
        :param parm: 超参数集合
        """
        super(Decoders, self).__init__()
        self.imageDimension=parm.imgSize
        self.idim=parm.idim
        self.wdim=parm.wdim
        self.decoderI=Decoder(parm=parm,in_dim=parm.idim,out_channel=parm.channel,ngf=parm.ngf,lb=0,ub=1)
        self.decoderW=Decoder(parm=parm,in_dim=parm.wdim,out_channel=2,ngf=parm.ngf,lb=0,ub=0.1)
        self.warper=Warper(parm)
        self.integrator=GridSpatialIntegral(parm)
        self.cutter=nn.Hardtanh(-1,1)

    def forward(self, zI:torch.Tensor,zW:torch.Tensor,baseGrid:torch.Tensor):
        """
        :param zI: 纹理变形的 tensor
        :param zW: 变形场的 tensor
        :param baseGrid:  基础变形场
        :return:
        """
        self.texture=self.decoderI(zI.view(-1,self.idim,1,1))
        self.diff_warp=self.decoderW(zW.view(-1,self.wdim,1,1)) * (5.0 / self.imageDimension)

        self.warping=self.integrator(self.diff_warp) - 1.2
        self.warping=self.cutter(self.warping)
        self.resWarping=self.warping -baseGrid
        self.output=self.warper(self.texture,self.warping)

        return self.texture,self.resWarping,self.output,self.warping

class Decoder(nn.Module):
    def __init__(self,parm:Parameter,in_dim:int,out_channel:int,ngf:int,lb:float=0,ub:float=1):
        """
        卷积神经网络解码器体系结果
        :param parm:  超参数集合
        :param in_dim： 输入的编码的初始长度
        :param out_channel: 解码器输出的图像的最终的通道数目
        :param ngf: 建立卷积神经网络的参数
        :param lb:  激活函数的参数
        :param ub:  激活函数的参数
        """
        super(Decoder, self).__init__()
        self.net=nn.Sequential(
            nn.ConvTranspose2d(in_channels=in_dim,out_channels=ngf*8,kernel_size=4,stride=1,padding=0,bias=False),
            nn.InstanceNorm2d(ngf*8),
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf*8,ngf*4,4,2,1,bias=False),
            nn.InstanceNorm2d(ngf*4),
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf*4,ngf*2,4,2,1,bias=False),
            nn.InstanceNorm2d(ngf*2),
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf*2,ngf,4,2,1,bias=False),
            nn.InstanceNorm2d(ngf),
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf,ngf,4,2,1,bias=False),
            nn.InstanceNorm2d(ngf),
            nn.ReLU(True),

            nn.ConvTranspose2d(ngf,out_channel,3,1,1,bias=False),
            nn.Hardtanh(lb,ub)
        )

    def forward(self, x:torch.Tensor):
        """
        前向传播函数
        :param x:  输入 tensor
        :return:
        """
        out=self.net(x)
        return out
