#! py -3
# coding:utf-8

import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim


class UNet(nn.Module):
    def contracting_block(self, in_channels, out_channels, kernel_size=3, padding=0):
        """
        This function creates one contracting block
        """
        # U型结构中的左半部分 横向卷积结构 input -> conv(3x3) -> relu -> conv(3x3) -> relu
        # 这里加了BatchNorm2d()进行输出的归一化(防止梯度浮点溢出) 也就是输出最后的两维，进行 求平均 均方差
        # out(,,,,) = gamma[?]*(x(,,,,)-mean)/(sqrt(均方差)+eps) + beta[?]
        # BatchNorm2d()中的 gamma (weight) beta(bias) 是可以学习的(默认值为1  且是可学习的)
        # num_features是gamma beta 向量的尺寸，一般与输入的通道数相等
        block = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels,
                            padding=padding),
            torch.nn.BatchNorm2d(out_channels),
            torch.nn.ReLU(),
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=out_channels, out_channels=out_channels,
                            padding=padding),
            torch.nn.BatchNorm2d(out_channels),
            torch.nn.ReLU(),
        )
        return block

    def expansive_block(self, in_channels, mid_channel, out_channels, kernel_size=3, padding=0):
        """
        This function creates one expansive block
        """
        # U型结构中的右半部分 横向卷积结构 input -> conv(3x3) -> relu -> conv(3x3) -> relu -> deconv(2x2)
        # deconv是转置卷积  用于扩大特征图(上采样)
        # ConvTranspose2d() 输出的计算公式为
        # H_{out} = (H_{in} - 1)*stride[0] - 2 *padding[0] + dilation[0]*(kernel_size[0] - 1) + output_padding[0] + 1
        # 这个公式就是正卷积求解得过来的 dilation是用于在卷积核中间增加间隔的(间隔补0)
        # 正卷积输出尺寸计算  out = (in - (dilation*(kernel_size-1) + 1) + 2*padding)/stride + 1
        # 所以反过来就是 (out - 1)*stride - 2*padding + dilation*(kernel_size-1) + 1 = in
        # output_padding是转置卷积后的补零(单边补零) 主要是为了尺寸上统一用的
        # 我是这样理解的 输入根据stride扩展尺寸为((H_{in} - 1)*stride[0]+1) 再减小padding边沿 newsize=((H_{in} - 1)*stride[0]+1)-2*padding[0]
        # 得到新的特征图尺寸  卷积核要与这个尺寸进行有重合就运算 运算的次数就是输出的大小 newsize = newsize + (kernel_size[0] -1)
        # 然后为了尺寸统一与下采样前相等 再增加一个output_padding用于卷积后的单边补零
        # padding 与 stride 一般设置为与上一级的下采样对应参数相等
        block = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel),
            torch.nn.ReLU(),
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channel, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel),
            torch.nn.ReLU(),
            # 本代码原作者这里的转置卷积是3x3核的 out = (in-1)*2 + 1 - 2*1 + (3-1) + 1 = 2in
            # torch.nn.ConvTranspose2d(in_channels=mid_channel, out_channels=out_channels, kernel_size=3, stride=2,
            #                          padding=1, output_padding=1),

            # 这里是unet论文对应的设置 转置卷积是2x2核 out = (in-1)*2 + 1 - 2*0 + (2-1) + 0 = 2in
            torch.nn.ConvTranspose2d(in_channels=mid_channel, out_channels=out_channels, kernel_size=2, stride=2,
                                     padding=0, output_padding=0)
        )
        return block

    def final_block(self, in_channels, mid_channel, out_channels, kernel_size=3, padding=0):
        """
        This returns final block
        """
        # U型结构中的最终输出横向结构 input -> conv(3x3) -> relu -> conv(3x3) -> relu -> conv(1x1) -> relu
        block = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel),
            torch.nn.ReLU(),
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channel, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel),
            torch.nn.ReLU(),
            # 本代码原作者这里是3x3核
            # torch.nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channel, out_channels=out_channels, padding=1),

            # 这里是unet论文对应的设置 卷积是1x1核
            torch.nn.Conv2d(kernel_size=1, in_channels=mid_channel, out_channels=out_channels, padding=0),
            torch.nn.BatchNorm2d(out_channels),
            # torch.nn.ReLU(),
        )
        return block

    # 本代码原作者这里的模型结构与Unet原论文不一样  最下面减小了一层
    #
    # def __init__(self, in_channel, out_channel):
    #     super(UNet, self).__init__()
    #     # Encode
    #     self.conv_encode1 = self.contracting_block(in_channels=in_channel, out_channels=64)
    #     self.conv_maxpool1 = torch.nn.MaxPool2d(kernel_size=2)
    #     self.conv_encode2 = self.contracting_block(64, 128)
    #     self.conv_maxpool2 = torch.nn.MaxPool2d(kernel_size=2)
    #     self.conv_encode3 = self.contracting_block(128, 256)
    #     self.conv_maxpool3 = torch.nn.MaxPool2d(kernel_size=2)
    #     # Bottleneck
    #     self.bottleneck = torch.nn.Sequential(
    #         torch.nn.Conv2d(kernel_size=3, in_channels=256, out_channels=512),
    #         torch.nn.ReLU(),
    #         torch.nn.BatchNorm2d(512),
    #         torch.nn.Conv2d(kernel_size=3, in_channels=512, out_channels=512),
    #         torch.nn.ReLU(),
    #         torch.nn.BatchNorm2d(512),
    #         torch.nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3, stride=2, padding=1,
    #                                  output_padding=1)
    #     )
    #     # Decode
    #     self.conv_decode3 = self.expansive_block(512, 256, 128)
    #     self.conv_decode2 = self.expansive_block(256, 128, 64)
    #     self.final_layer = self.final_block(128, 64, out_channel)

    # 这里是unet论文对应的结构 左边四次横向 一次瓶颈  右边四次横向
    def __init__(self, in_channel=1, out_channel=2):
        super(UNet, self).__init__()
        # Encode
        self.conv_encode1 = self.contracting_block(in_channels=in_channel, out_channels=64, padding=1)
        self.conv_maxpool1 = torch.nn.MaxPool2d(kernel_size=2)
        self.conv_encode2 = self.contracting_block(64, 128, padding=1)
        self.conv_maxpool2 = torch.nn.MaxPool2d(kernel_size=2)
        self.conv_encode3 = self.contracting_block(128, 256, padding=1)
        self.conv_maxpool3 = torch.nn.MaxPool2d(kernel_size=2)
        self.conv_encode4 = self.contracting_block(256, 512, padding=1)
        self.conv_maxpool4 = torch.nn.MaxPool2d(kernel_size=2)
        # Bottleneck
        self.bottleneck = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=3, in_channels=512, out_channels=1024, padding=1),
            torch.nn.BatchNorm2d(1024),
            torch.nn.ReLU(),
            torch.nn.Conv2d(kernel_size=3, in_channels=1024, out_channels=1024, padding=1),
            torch.nn.BatchNorm2d(1024),
            torch.nn.ReLU(),
            torch.nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=2, stride=2, padding=0,
                                     output_padding=0)
        )
        # Decode
        self.conv_decode4 = self.expansive_block(1024, 512, 256, padding=1)
        self.conv_decode3 = self.expansive_block(512, 256, 128, padding=1)
        self.conv_decode2 = self.expansive_block(256, 128, 64, padding=1)
        self.final_layer = self.final_block(128, 64, out_channel, padding=1)
        self.non_linear_layer = torch.nn.Softmax2d()  # 非线性激活层

    def crop_and_concat(self, target, dest, crop=False):
        """
        This layer crop the layer from contraction block and concat it with expansive block vector
        """
        # 按target的方形尺寸 对dest进行切边
        # 然后连接两个张量
        if crop:
            c = (dest.size()[2] - target.size()[2]) // 2
            dest = F.pad(dest, [-c, -c, -c, -c])  # 上下左右  小于0 就是剪切
        return torch.cat((target, dest), 1)

    def forward(self, x):
        # Encode
        x = x.to()
        encode_block1 = self.conv_encode1(x)  # 尺寸减4 变为64层
        encode_pool1 = self.conv_maxpool1(encode_block1)  # 尺寸除2
        encode_block2 = self.conv_encode2(encode_pool1)  # 尺寸减4 变为128层
        encode_pool2 = self.conv_maxpool2(encode_block2)  # 尺寸除2
        encode_block3 = self.conv_encode3(encode_pool2)  # 尺寸减4 变为256层
        encode_pool3 = self.conv_maxpool3(encode_block3)  # 尺寸除2

        # 本代码原作者这里没有这两层
        encode_block4 = self.conv_encode4(encode_pool3)  # 尺寸减4 变为512层
        encode_pool4 = self.conv_maxpool4(encode_block4)  # 尺寸除2
        # Bottleneck
        # bottleneck1 = self.bottleneck(encode_pool3)
        bottleneck1 = self.bottleneck(encode_pool4)  # 尺寸减4后乘以2 变为512层
        # Decode
        # 本代码原作者这里没有这两层
        decode_block4 = self.crop_and_concat(bottleneck1, encode_block4, crop=True)  # 合并为1024层
        cat_layer3 = self.conv_decode4(decode_block4)  # 尺寸减4后乘以2 变为256层

        decode_block3 = self.crop_and_concat(cat_layer3, encode_block3, crop=True)  # 合并为512层
        cat_layer2 = self.conv_decode3(decode_block3)  # 尺寸减4后乘以2 变为128层
        decode_block2 = self.crop_and_concat(cat_layer2, encode_block2, crop=True)  # 合并为256层
        cat_layer1 = self.conv_decode2(decode_block2)  # 尺寸减4后乘以2 变为64层
        decode_block1 = self.crop_and_concat(cat_layer1, encode_block1, crop=True)  # 合并为128层
        final_layer = self.final_layer(decode_block1)  # 尺寸减4后 变为out_channel层
        # softmaxfinal = self.softmax2d(final_layer)
        # shape = final_layer.shape
        # softmaxfinal = F.softmax(final_layer.view(shape[0], -1), 1)
        # softmaxfinal = softmaxfinal.view(shape)
        non_linear = self.non_linear_layer(final_layer)
        return non_linear


########################################################################################################################
########################################################################################################################
########################################################################################################################
########################################################################################################################
# 为节省计算资源 和 内存  减小下采样的次数 的UNet
class LessUNet(nn.Module):
    def contracting_block(self, in_channels, out_channels, kernel_size=3, padding=0, momentum=0.1,
                          track_running_stats=True):
        """
        This function creates one contracting block
        """
        # U型结构中的左半部分 横向卷积结构 input -> conv(3x3) -> relu -> conv(3x3) -> relu
        # 这里加了BatchNorm2d()进行输出的归一化(防止梯度浮点溢出) 也就是输出最后的两维，进行 求平均 均方差
        # out(,,,,) = gamma[?]*(x(,,,,)-mean)/(sqrt(均方差)+eps) + beta[?]
        # BatchNorm2d()中的 gamma (weight) beta(bias) 是可以学习的(默认值为1  且是可学习的)
        # num_features是gamma beta 向量的尺寸，一般与输入的通道数相等
        block = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels,
                            padding=padding),
            torch.nn.BatchNorm2d(out_channels, momentum=momentum, track_running_stats=track_running_stats),
            torch.nn.ReLU(inplace=True),
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=out_channels, out_channels=out_channels,
                            padding=padding),
            torch.nn.BatchNorm2d(out_channels, momentum=momentum, track_running_stats=track_running_stats),
            torch.nn.ReLU(inplace=True),
        )
        return block

    def expansive_block(self, in_channels, mid_channel, out_channels, kernel_size=3, padding=0, momentum=0.1,
                        track_running_stats=True):
        """
        This function creates one expansive block
        """
        # U型结构中的右半部分 横向卷积结构 input -> conv(3x3) -> relu -> conv(3x3) -> relu -> deconv(2x2)
        # deconv是转置卷积  用于扩大特征图(上采样)
        # ConvTranspose2d() 输出的计算公式为
        # H_{out} = (H_{in} - 1)*stride[0] - 2 *padding[0] + dilation[0]*(kernel_size[0] - 1) + output_padding[0] + 1
        # 这个公式就是正卷积求解得过来的 dilation是用于在卷积核中间增加间隔的(间隔补0)
        # 正卷积输出尺寸计算  out = (in - (dilation*(kernel_size-1) + 1) + 2*padding)/stride + 1
        # 所以反过来就是 (out - 1)*stride - 2*padding + dilation*(kernel_size-1) + 1 = in
        # output_padding是转置卷积后的补零(单边补零) 主要是为了尺寸上统一用的
        # 我是这样理解的 输入根据stride扩展尺寸为((H_{in} - 1)*stride[0]+1) 再减小padding边沿 newsize=((H_{in} - 1)*stride[0]+1)-2*padding[0]
        # 得到新的特征图尺寸  卷积核要与这个尺寸进行有重合就运算 运算的次数就是输出的大小 newsize = newsize + (kernel_size[0] -1)
        # 然后为了尺寸统一与下采样前相等 再增加一个output_padding用于卷积后的单边补零
        # padding 与 stride 一般设置为与上一级的下采样对应参数相等
        block = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel, momentum=momentum, track_running_stats=track_running_stats),
            torch.nn.ReLU(inplace=True),
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channel, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel, momentum=momentum, track_running_stats=track_running_stats),
            torch.nn.ReLU(inplace=True),
            # 本代码原作者这里的转置卷积是3x3核的 out = (in-1)*2 + 1 - 2*1 + (3-1) + 1 = 2in
            # torch.nn.ConvTranspose2d(in_channels=mid_channel, out_channels=out_channels, kernel_size=3, stride=2,
            #                          padding=1, output_padding=1),

            # 这里是unet论文对应的设置 转置卷积是2x2核 out = (in-1)*2 + 1 - 2*0 + (2-1) + 0 = 2in
            torch.nn.ConvTranspose2d(in_channels=mid_channel, out_channels=out_channels, kernel_size=2, stride=2,
                                     padding=0, output_padding=0)
        )
        return block

    def final_block(self, in_channels, mid_channel, out_channels, kernel_size=3, padding=0, momentum=0.1,
                    track_running_stats=True):
        """
        This returns final block
        """
        # U型结构中的最终输出横向结构 input -> conv(3x3) -> relu -> conv(3x3) -> relu -> conv(1x1) -> relu
        block = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=in_channels, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel, momentum=momentum, track_running_stats=track_running_stats),
            torch.nn.ReLU(inplace=True),
            torch.nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channel, out_channels=mid_channel,
                            padding=padding),
            torch.nn.BatchNorm2d(mid_channel, momentum=momentum, track_running_stats=track_running_stats),
            torch.nn.ReLU(inplace=True),
            # 本代码原作者这里是3x3核
            # torch.nn.Conv2d(kernel_size=kernel_size, in_channels=mid_channel, out_channels=out_channels, padding=1),

            # 这里是unet论文对应的设置 卷积是1x1核
            torch.nn.Conv2d(kernel_size=1, in_channels=mid_channel, out_channels=out_channels, padding=0),
            torch.nn.BatchNorm2d(out_channels, momentum=momentum, track_running_stats=track_running_stats),
            # torch.nn.ReLU(inplace=True),
        )
        return block

    # 这里是unet论文对应的结构 左边两次横向 一次瓶颈  右边两次横向
    def __init__(self, in_channel=1, out_channel=2):
        super(LessUNet, self).__init__()
        # Encode
        self.conv_encode1 = self.contracting_block(in_channels=in_channel, out_channels=32, padding=1,
                                                   momentum=0.01)
        # bn = self.conv_encode1[1]
        # bn: torch.nn.BatchNorm2d
        # bn.momentum = 0.001
        self.conv_maxpool1 = torch.nn.MaxPool2d(kernel_size=2)
        self.conv_encode2 = self.contracting_block(32, 64, padding=1, momentum=0.01)
        self.conv_maxpool2 = torch.nn.MaxPool2d(kernel_size=2)
        self.conv_encode3 = self.contracting_block(64, 128, padding=1, momentum=0.01)
        self.conv_maxpool3 = torch.nn.MaxPool2d(kernel_size=2)
        # self.conv_encode4 = self.contracting_block(256, 512, padding=1)
        # self.conv_maxpool4 = torch.nn.MaxPool2d(kernel_size=2)
        # Bottleneck
        # self.bottleneck = torch.nn.Sequential(
        #     torch.nn.Conv2d(kernel_size=3, in_channels=512, out_channels=1024, padding=1),
        #     torch.nn.BatchNorm2d(1024),
        #     torch.nn.ReLU(),
        #     torch.nn.Conv2d(kernel_size=3, in_channels=1024, out_channels=1024, padding=1),
        #     torch.nn.BatchNorm2d(1024),
        #     torch.nn.ReLU(),
        #     torch.nn.ConvTranspose2d(in_channels=1024, out_channels=512, kernel_size=2, stride=2, padding=0,
        #                              output_padding=0)
        # )
        self.bottleneck = torch.nn.Sequential(
            torch.nn.Conv2d(kernel_size=3, in_channels=128, out_channels=256, padding=1),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(inplace=True),
            torch.nn.Conv2d(kernel_size=3, in_channels=256, out_channels=256, padding=1),
            torch.nn.BatchNorm2d(256),
            torch.nn.ReLU(inplace=True),
            torch.nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=2, stride=2, padding=0,
                                     output_padding=0)
        )
        # Decode
        # self.conv_decode4 = self.expansive_block(1024, 512, 256, padding=1)
        self.conv_decode3 = self.expansive_block(256, 128, 64, padding=1)
        self.conv_decode2 = self.expansive_block(128, 64, 32, padding=1)
        self.final_layer = self.final_block(64, 32, out_channel, padding=1)
        self.non_linear_layer = torch.nn.Softmax2d()  # 非线性激活层

    def crop_and_concat(self, target, dest, crop=False):
        """
        This layer crop the layer from contraction block and concat it with expansive block vector
        """
        # 按target的方形尺寸 对dest进行切边
        # 然后连接两个张量
        if crop:
            c = (dest.size()[2] - target.size()[2]) // 2
            dest = F.pad(dest, [-c, -c, -c, -c])  # 上下左右  小于0 就是剪切
        return torch.cat((target, dest), 1)

    def forward(self, x):
        # Encode
        x = x.to()
        encode_block1 = self.conv_encode1(x)  # 尺寸减4 变为64层
        encode_pool1 = self.conv_maxpool1(encode_block1)  # 尺寸除2
        encode_block2 = self.conv_encode2(encode_pool1)  # 尺寸减4 变为128层
        encode_pool2 = self.conv_maxpool2(encode_block2)  # 尺寸除2
        # 为节省计算资源 和 内存  减小下采样的次数
        encode_block3 = self.conv_encode3(encode_pool2)  # 尺寸减4 变为256层
        encode_pool3 = self.conv_maxpool3(encode_block3)  # 尺寸除2

        # # 本代码原作者这里没有这两层
        # encode_block4 = self.conv_encode4(encode_pool3)  # 尺寸减4 变为512层
        # encode_pool4 = self.conv_maxpool4(encode_block4)  # 尺寸除2
        # Bottleneck
        bottleneck1 = self.bottleneck(encode_pool3)
        # bottleneck1 = self.bottleneck(encode_pool4)  # 尺寸减4后乘以2 变为512层

        # Decode
        # # 本代码原作者这里没有这两层
        # decode_block4 = self.crop_and_concat(bottleneck1, encode_block4, crop=True)  # 合并为1024层
        # cat_layer3 = self.conv_decode4(decode_block4)  # 尺寸减4后乘以2 变为256层

        decode_block3 = self.crop_and_concat(bottleneck1, encode_block3, crop=False)  # 合并为512层
        cat_layer2 = self.conv_decode3(decode_block3)  # 尺寸减4后乘以2 变为128层
        decode_block2 = self.crop_and_concat(cat_layer2, encode_block2, crop=False)  # 合并为256层
        cat_layer1 = self.conv_decode2(decode_block2)  # 尺寸减4后乘以2 变为64层
        decode_block1 = self.crop_and_concat(cat_layer1, encode_block1, crop=False)  # 合并为128层
        final_layer = self.final_layer(decode_block1)  # 尺寸减4后 变为out_channel层
        # softmaxfinal = self.softmax2d(final_layer)
        # shape = final_layer.shape
        # softmaxfinal = F.softmax(final_layer.view(shape[0], -1), 1)
        # softmaxfinal = softmaxfinal.view(shape)
        non_linear = self.non_linear_layer(final_layer)
        return non_linear


# 混合unet 加上一个可以直通的层
class MixUNet1(nn.Module):
    def __init__(self):
        super(MixUNet1, self).__init__()
        # self.instn0 = nn.InstanceNorm2d(1)    #tensorrt7 好像有点问题 不支持动态的InstanceNorm2d
        # self.instn0 = nn.GroupNorm(1, 1, affine=False)    #导出onnx时 出现不支持的ATen操作
        self.instn0 = MyInstanceNorm2d()  # 自己实现的Instance归一化
        self.unet_2layer = LessUNet(in_channel=1, out_channel=32)
        self.unet_2layer: nn.Module
        self.unet_2layer.add_module("non_linear_layer", MyNopLayer())
        self.conv1 = torch.nn.Conv2d(kernel_size=3, in_channels=32, out_channels=32, padding=1)
        self.conv2 = torch.nn.Conv2d(kernel_size=1, in_channels=32, out_channels=1, padding=0)
        self.bn1 = torch.nn.BatchNorm2d(32)
        self.bn2 = torch.nn.BatchNorm2d(1)

    def forward(self, x):
        instn = self.instn0(x)
        mid = self.unet_2layer(instn)
        res = torch.sigmoid(mid) * x
        rmid = self.conv1(res)
        rmid = self.bn1(rmid)
        rmid = F.relu(rmid + res)
        rmid = self.conv2(rmid)
        rmid = self.bn2(rmid)
        y = torch.sigmoid(rmid)
        return y


class MyInstanceNorm2d(nn.Module):
    def __init__(self, eps=1e-5):
        super(MyInstanceNorm2d, self).__init__()
        self.eps = eps
        # self.instn = nn.InstanceNorm2d(1)

    def forward(self, input):
        input: torch.Tensor
        assert type(input) is torch.Tensor
        assert len(input.shape) == 4
        mean = torch.mean(input, [2, 3])
        mean = mean[:, :, None, None]
        sub = input - mean
        var = torch.mean(sub ** 2, [2, 3])
        # mean.unsqueeze_(2)
        # mean.unsqueeze_(2)
        # var.unsqueeze_(2)
        # var.unsqueeze_(2)
        var = var[:, :, None, None]
        # print(var / torch.var(input, [2, 3], False))
        norm = sub / torch.sqrt(var + self.eps)
        # print(mean.requires_grad)
        # print(norm / self.instn(input))
        return norm


# 将Unet输出进行 降维 softmax
class MySoftmax1d(nn.Module):
    def forward(self, x):
        x: torch.Tensor
        shape = x.shape
        y = F.softmax(x.view(shape[0], -1), 1)
        y = y.view(shape)
        return y


# 将Unet输出进行 后进入sigmod
class MySigmoid(nn.Module):
    def forward(self, x):
        x: torch.Tensor
        return torch.sigmoid(x)


# 将Unet输出进行 后进入混合relu 和 sigmod
class MyReluMixSigmoid(nn.Module):
    def forward(self, x):
        x: torch.Tensor
        ch1 = torch.sigmoid(x[:, 0, :, :]).unsqueeze(1)
        ch2 = torch.sigmoid(x[:, 1, :, :]).unsqueeze(1)
        x = torch.cat((ch1, ch2), 1)
        return torch.sigmoid(x)


# 什么都不做的layer
class MyNopLayer(nn.Module):
    def forward(self, x):
        return x


class Criterion1(nn.Module):
    def forward(self, x, y):
        x: torch.Tensor
        y: torch.Tensor
        multiy = torch.where(y < 1 / 255, torch.ones_like(y) * 2, torch.ones_like(y))
        # loss = torch.sum(multiy * ((y - x) ** 2))
        loss = (y - x) ** 2
        # maxcol = torch.sum(loss.max(2)[0])
        # maxrow = torch.sum(loss.max(3)[0])
        #
        # mean = y.mean((2, 3))
        # mean = mean[:, :, None, None] / 2
        # mloss = loss.where((y > 1 / 255) & (y < mean), torch.zeros_like(loss))
        loss = torch.sum(loss)  # + mloss
        loss = torch.sqrt(loss)  # + maxcol + maxrow

        # loss = 1 / (loss + 0.00001)
        return loss


class Criterion2(nn.Module):
    def forward(self, orgimg, x, y):
        x: torch.Tensor
        y: torch.Tensor
        orgimg: torch.Tensor
        x = orgimg * (x[:, 0, :, :].unsqueeze(1)) + x[:, 1, :, :].unsqueeze(1)
        shape = x.shape
        loss = torch.sum((x.view(shape[0], -1) - y.view(shape[0], -1)) ** 2)
        return loss
