"""
write by qianqianjun
2020.01.29
"""
import torch.nn as nn
import torch
from autoencoder.parameter import Parameter
from autoencoder.utils import Mixer,Warper,GridSpatialIntegral

""" DenseNet Block 体系结构 """
class DenseEncoderBlock(nn.Module):
    def __init__(self, channels_num, convs_num, activation=nn.ReLU, args=None):
        """
        用于创建密集网络块 编码器体系结构
        :param channels_num:  通道数量
        :param convs_num:  卷积的数量
        :param activation:  激活函数
        :param args:  其他参数
        """
        super(DenseEncoderBlock, self).__init__()
        if args==None:
            args=[False]
        assert convs_num > 0, "n_convs 的值不可以小于 1"
        self.n_channels=channels_num
        self.n_convs=convs_num
        self.layers=nn.ModuleList()
        for i in range(convs_num):
            nn.ModuleList.append(self.layers,nn.Sequential(
                nn.InstanceNorm2d(channels_num),
                activation(*args),
                nn.Conv2d(in_channels=channels_num,
                          out_channels=channels_num,
                          kernel_size=3, stride=1,
                          padding=1, bias=False)
            ))

    def forward(self,inputs):
        """
        前向传播函数
        :param inputs: 输入的 tensor
        :return: 返回网络输出结果
        """
        outputs=[]
        for i,layer in enumerate(self.layers):
            if i > 0:
                next_output=0
                for no in outputs:
                    next_output+=no
                outputs.append(next_output)
            else:
                outputs.append(layer(inputs))
        return outputs[-1]

class DenseEncoderTransitionBlock(nn.Module):
    def __init__(self,channel_in:int,channel_out:int,maxpooling_size:int,
                 activation=nn.ReLU,args=None):
        """
        密集网络的过渡块
        :param channel_in: 输入额tensor 通道数目
        :param channel_out:  密集块输出的通道数目
        :param maxpooling_size: 最大池化的边长
        :param activation:  激活函数
        :param args: 激活函数参数列表
        """
        if args==None:
            args=[False]
        super(DenseEncoderTransitionBlock, self).__init__()
        self.channel_in=channel_in
        self.channel_out=channel_out
        self.maxpooling_size=maxpooling_size
        self.net=nn.Sequential(
            # 实例规范化层
            nn.InstanceNorm2d(channel_in),
            activation(*args),
            # 1 × 1 的卷积层，设置输出通道数目
            nn.Conv2d(channel_in,channel_out,kernel_size=1,stride=1,padding=0,bias=False),
            # 最大池化层，设置feature map 再缩小 maxpooling_size 倍数
            nn.MaxPool2d(maxpooling_size)
        )

    def forward(self, x:torch.Tensor):
        """
        网络前向传播函数
        :param x: 输入tensor
        :return:  tensor经过网络之后的结果
        """
        return self.net(x)


class DenseEncoder(nn.Module):
    def __init__(self,parm,channel=1,ndf=32,ndim=128,activation=nn.LeakyReLU,
                 args=None,f_activation=nn.Sigmoid,f_args=None):
        """
        创建密集连接块的编码器体系结构
        :param parm:  超参数集合
        :param channel:  输入图像初始通道
        :param ndf:  密集块的出初始通道数目
        :param ndim:
        :param activation: 激活函数
        :param args:  激活函数的参数列表
        :param f_activation:  全连接的激活函数
        :param f_args:  全连接的激活函数参数列表
        """
        super(DenseEncoder, self).__init__()
        if args is None:
            args = [0.2, False]
        if f_args is None:
            f_args=[]
        self.ndim=ndim
        self.net=nn.Sequential(
            nn.Conv2d(in_channels=channel,out_channels=ndf,kernel_size=4,stride=2,padding=1),
            # 密集连接块的
            DenseEncoderBlock(channels_num=ndf, convs_num=6),
            # 过渡块
            DenseEncoderTransitionBlock(channel_in=ndf, channel_out=ndf * 2,
                                        maxpooling_size=2, activation=activation, args=args),

            DenseEncoderBlock(ndf*2,12),
            DenseEncoderTransitionBlock(ndf*2,ndf*4,2,activation,args),

            DenseEncoderBlock(ndf*4,24),
            DenseEncoderTransitionBlock(ndf*4,ndf*8,2,activation,args),

            DenseEncoderBlock(ndf*8,16),
            DenseEncoderTransitionBlock(ndf*8,ndim,4,activation,args),

            # 这里省略了 *f_args
            f_activation()
        )

    def forward(self, x:torch.Tensor):
        """
        前向传播
        :param x: 输入 tensor
        :return:
        """
        out=self.net(x).view(-1,self.ndim)
        return out

##########################   编码器体系结构  ################################
class Encoders(nn.Module):
    def __init__(self,parm:Parameter):
        """
        使用 DenseNet 架构的变形自动编码器 编码器实现
        :param parm:  参数集合
        """
        super(Encoders, self).__init__()
        self.encoder=DenseEncoder(parm, channel=parm.channel, ndf=parm.ndf, ndim=parm.zdim)
        self.zImixer=Mixer(parm, nin=parm.zdim, nout=parm.idim)
        self.zWmixer=Mixer(parm, nin=parm.zdim, nout=parm.wdim)

    def forward(self,x:torch.Tensor):
        """
        :param x:
        :return:
        """
        self.z=self.encoder(x)
        self.zImg=self.zImixer(self.z)
        self.zWarp=self.zWmixer(self.z)
        return  self.z,self.zImg,self.zWarp



#########################   解码器体系结构   #################################
class Decoders(nn.Module):
    def __init__(self,parm:Parameter):
        """
        密集网络解码器体系结构
        :param parm: 参数set
        """
        super(Decoders, self).__init__()
        self.imageDimension=parm.imgSize
        self.idim=parm.idim
        self.wdim=parm.wdim

        self.decoderI=DenseDecoder(parm,nz=parm.idim,nc=parm.channel,ngf=parm.ngf)
        self.decoderW=DenseDecoder(parm,nz=parm.wdim,nc=2,ngf=parm.ngf,
                                   activation=nn.Tanh,args=[],f_activation=nn.Sigmoid,f_args=[])

        self.warper=Warper(parm)
        self.integrator=GridSpatialIntegral(parm)

        self.cutter=nn.Hardtanh(-1,1) # 这里有bug

    def forward(self,zI:torch.Tensor,zW:torch.Tensor,
                basegrid:torch.Tensor):
        """
        前向传播函数
        :param zI:  纹理潜在空间 向量表征
        :param zW:  变形场 潜在空间 向量表征
        :param basegrid:  基准变形场 向量表征
        :return:
        """
        self.texture=self.decoderI(zI.view(-1,self.idim,1,1))
        self.diffentialWarping=self.decoderW(zW.view(-1,self.wdim,1,1))*(5.0/self.imageDimension)
        self.warping=self.integrator(self.diffentialWarping)-1.2
        self.warping=self.cutter(self.warping)
        self.resWarping=self.warping-basegrid
        self.output=self.warper(self.texture,self.warping)
        return self.texture,self.resWarping,self.output,self.warping

class DenseDecoder(nn.Module):
    def __init__(self,parm:Parameter,nz:int=128,nc:int=1,ngf:int=32,
                 activation=nn.ReLU,args=None,f_activation=nn.Hardtanh,f_args=None):
        """
        密集块解码器体系结构
        :param parm:  参数集合
        :param nz: 潜在代码维度
        :param nc: 初始输入通道
        :param ngf:
        :param activation:
        :param args:
        :param f_activation:
        :param f_args:
        """
        super(DenseDecoder, self).__init__()
        if args==None:
            args=[False]
        if f_args==None:
            f_args=[0,1]

        self.net=nn.Sequential(
            # 去卷积操作
            nn.ConvTranspose2d(in_channels=nz,out_channels=ngf*8,kernel_size=4,stride=1,padding=0,bias=False),

            DenseDecoderBlock(n_channels=ngf*8,n_convs=16),
            DenseDecoderTransitionBlock(channel_in=ngf*8,channel_out=ngf*4),

            DenseDecoderBlock(ngf*4,24),
            DenseDecoderTransitionBlock(ngf*4,ngf*2),

            DenseDecoderBlock(ngf*2,12),
            DenseDecoderTransitionBlock(ngf*2,ngf),

            DenseDecoderBlock(ngf,6),
            DenseDecoderTransitionBlock(ngf,ngf),

            nn.InstanceNorm2d(ngf),
            activation(*args),
            nn.ConvTranspose2d(in_channels=ngf,out_channels=nc,kernel_size=3,
                               stride=1,padding=1,bias=False),
            f_activation(*f_args)
        )

    def forward(self, x:torch.Tensor):
        return self.net(x)


class DenseDecoderBlock(nn.Module):
    def __init__(self,n_channels,n_convs,activation=nn.ReLU,args=None):
        """
        用于创建密集连接块 解码器体系结构
        :param n_channels:  通道的数目
        :param n_convs:  有多少卷积层
        :param activation:  激活函数
        :param args:  其它参数
        """
        super(DenseDecoderBlock, self).__init__()
        if args==None:
            args=[False]
        assert n_convs>0 , "卷积层的层数必须大于 0 "
        self.n_channels=n_channels
        self.n_convs=n_convs
        self.layers=nn.ModuleList()
        for i in range(self.n_convs):
            nn.ModuleList.append(
                self.layers,
                nn.Sequential(
                    nn.InstanceNorm2d(n_channels),
                    activation(*args),
                    nn.ConvTranspose2d(in_channels=n_channels,out_channels=n_channels,
                                       kernel_size=3,stride=1,padding=1,bias=False)
                )
            )

    def forward(self, inputs):
        """
        前向传播函数
        :param inputs: 输入向量
        :return:  经过网络之后的结果
        """
        outputs=[]
        for i,layer in enumerate(self.layers):
            if i>0:
                next_output=0
                for no in outputs:
                    next_output+=no
                outputs.append(next_output)
            else:
                outputs.append(layer(inputs))
        return outputs[-1]


class DenseDecoderTransitionBlock(nn.Module):
    def __init__(self,channel_in:int,channel_out:int,activation=nn.ReLU,args=None):
        """
        密集连接块解码器过渡块
        :param channel_in: 输入通道数目
        :param channel_out: 输出通道数目
        :param activation: 激活函数
        :param args:  激活函数附加参数
        """
        if args==None:
            args=[False]
        super(DenseDecoderTransitionBlock, self).__init__()
        self.net=nn.Sequential(
            nn.InstanceNorm2d(channel_in),
            activation(*args),
            nn.ConvTranspose2d(channel_in,channel_out,kernel_size=4,
                               stride=2,padding=1,bias=False)
        )

    def forward(self,x:torch.Tensor):
        """
        :param x:
        :return:
        """
        out=self.net(x)
        return out


"""基础块相关结构"""
class Mixer(nn.Module):
    def __init__(self,parm,nin=128,nout=128):
        """
        全连接网络
        :param parm: 参数集合
        :param nin: 输入size
        :param nout: 输出size
        """
        super(Mixer, self).__init__()
        self.net=nn.Sequential(
            nn.Linear(nin,nout),
            nn.Sigmoid()
        )

    def forward(self,x:torch.Tensor):
        """
        :param x:
        :return:
        """
        out=self.net(x)
        return out

# class Warper(nn.Module):
#     def __init__(self,parm:Parameter):
#         """
#         通过变形场对图像进行变形
#         :param parm: 超参数集合
#         """
#         super(Warper, self).__init__()
#
#     def forward(self, input_image:torch.Tensor,input_grid:torch.Tensor):
#         """
#         前行传播函数
#         :param input_image: 输入的图像 Tensor
#         :param input_grid:  变形场 Tensor
#         :return:
#         """
#         # 下面会有BUG ，一个warning
#         self.warp=input_grid.permute(0,2,3,1)
#         self.output=F.grid_sample(input_image,self.warp)
#         return self.output
#
# class GridSpatialIntegral(nn.Module):
#     def __init__(self,parm:Parameter):
#         """
#         对预测的网格偏移进行积分以获得网格（变形场）
#         :param parm: 超参数集合
#         """
#         super(GridSpatialIntegral, self).__init__()
#         self.parm=parm
#         self.w=parm.imgSize
#
#         # 这里有了较大的改动
#         self.filterx=torch.tensor(np.ones(shape=(1,1,1,self.w)),dtype=torch.float32,requires_grad=False)
#         self.filtery=torch.tensor(np.ones(shape=(1,1,self.w,1)),dtype=torch.float32,requires_grad=False)
#
#         if self.parm.useCuda:
#             self.filterx=self.filterx.cuda()
#             self.filtery=self.filtery.cuda()
#     def forward(self, input_diffgrid:torch.Tensor):
#         """
#         :param input_diffgrid: 输入差分变形场
#         :return: 输出变形场信息
#         """
#         fullx=F.conv_transpose2d(
#             input_diffgrid[:,0,:,:].unsqueeze(1),
#             self.filterx,stride=1,padding=0
#         )
#         fully=F.conv_transpose2d(
#             input_diffgrid[:,1,:,:].unsqueeze(1),
#             self.filtery,stride=1,padding=0
#         )
#
#         output_grid=torch.cat((fullx[:,:,0:self.w,0:self.w],fully[:,:,0:self.w,0:self.w]),1)
#
#         return  output_grid