"""
This code is referenced from https://github.com/assassint2017/MICCAI-LITS2017
"""

import torch
import torch.nn as nn
import torch.nn.functional as F


# 通道注意力模块
class ChannelAttention(nn.Module):
    def __init__(self, in_channels, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool3d(1)  # 平均池化
        self.max_pool = nn.AdaptiveMaxPool3d(1)
        self.fc1 = nn.Conv3d(in_channels, in_channels // ratio, 1, bias=False)
        self.relu = nn.ReLU()
        self.fc2 = nn.Conv3d(in_channels // ratio, in_channels, 1, bias=False)
        self.sigmoid = nn.Sigmoid()

    # 池化->卷积->激活函数->卷积
    def forward(self, x):
        avg_out = self.fc2(self.relu(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)  # 通过sigmoid函数得到特征概率


# 空间注意力模块
class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1
        self.conv1 = nn.Conv3d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x = torch.cat([avg_out, max_out], dim=1)
        x = self.conv1(x)
        return self.sigmoid(x)  # 通过sigmoid函数得到特征概率


# 混合注意力模块
class CBAM(nn.Module):
    def __init__(self, in_channels, ratio=16, kernel_size=7):
        super(CBAM, self).__init__()
        self.channel_attention = ChannelAttention(in_channels, ratio=ratio)
        self.spatial_attention = SpatialAttention(kernel_size=kernel_size)

    def forward(self, x):
        x_channel = self.channel_attention(x)
        x_spatial = self.spatial_attention(x)
        x = x * x_channel * x_spatial
        return x


class ResUNet(nn.Module):
    def __init__(self, in_channel=1, out_channel=3, training=True):
        super().__init__()

        self.training = training
        self.dorp_rate = 0.2  # 设置dropout的比率为0.2

        # 编码器阶段1
        self.encoder_stage1 = nn.Sequential(
            nn.Conv3d(in_channel, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数

            nn.Conv3d(32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数
        )
        self.cbam_encoder_stage1 = CBAM(32)

        # 编码器阶段2
        self.encoder_stage2 = nn.Sequential(
            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数

            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数

            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数
        )
        self.cbam_encoder_stage2 = CBAM(64)
        # 编码器阶段3
        self.encoder_stage3 = nn.Sequential(
            nn.Conv3d(128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数

            nn.Conv3d(128, 128, 3, 1, padding=2, dilation=2),  # 3D卷积层,带孔卷积,增加感受野
            nn.PReLU(128),  # Parametric ReLU激活函数

            nn.Conv3d(128, 128, 3, 1, padding=4, dilation=4),  # 3D卷积层,带孔卷积
            nn.PReLU(128),  # Parametric ReLU激活函数
        )

        self.cbam_encoder_stage3 = CBAM(128)

        # 编码器阶段4
        self.encoder_stage4 = nn.Sequential(
            nn.Conv3d(256, 256, 3, 1, padding=3, dilation=3),  # 3D卷积层,带孔卷积
            nn.PReLU(256),  # Parametric ReLU激活函数

            nn.Conv3d(256, 256, 3, 1, padding=4, dilation=4),  # 3D卷积层,带孔卷积
            nn.PReLU(256),  # Parametric ReLU激活函数

            nn.Conv3d(256, 256, 3, 1, padding=5, dilation=5),  # 3D卷积层,带孔卷积
            nn.PReLU(256),  # Parametric ReLU激活函数
        )

        self.cbam_encoder_stage4 = CBAM(256)

        # 编码器阶段5
        self.encoder_stage5 = nn.Sequential(
            nn.Conv3d(512, 512, 3, 1, padding=3, dilation=3),  # 3D卷积层,带孔卷积
            nn.PReLU(512),  # Parametric ReLU激活函数

            nn.Conv3d(512, 512, 3, 1, padding=4, dilation=4),  # 3D卷积层,带孔卷积
            nn.PReLU(512),  # Parametric ReLU激活函数

            nn.Conv3d(512, 512, 3, 1, padding=5, dilation=5),  # 3D卷积层,带孔卷积
            nn.PReLU(512),  # Parametric ReLU激活函数
        )

        self.cbam_encoder_stage5 = CBAM(512)

        # 解码器阶段1
        self.decoder_stage1 = nn.Sequential(
            nn.Conv3d(256, 512, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(512),  # Parametric ReLU激活函数

            nn.Conv3d(512, 512, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(512),  # Parametric ReLU激活函数

            nn.Conv3d(512, 512, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(512),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage1 = CBAM(512)

        # 解码器阶段2
        self.decoder_stage2 = nn.Sequential(
            nn.Conv3d(256 + 128 + 128, 256, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(256),  # Parametric ReLU激活函数

            nn.Conv3d(256, 256, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(256),  # Parametric ReLU激活函数

            nn.Conv3d(256, 256, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(256),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage2 = CBAM(256)

        # 解码器阶段3
        self.decoder_stage3 = nn.Sequential(
            nn.Conv3d(128 + 64, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数

            nn.Conv3d(128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数

            nn.Conv3d(128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage3 = CBAM(128)

        # 解码器阶段4
        self.decoder_stage4 = nn.Sequential(
            nn.Conv3d(64 + 32, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数

            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage4 = CBAM(64)

        # 解码器阶段5
        self.decoder_stage5 = nn.Sequential(
            nn.Conv3d(32 + 32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数

            nn.Conv3d(32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数
        )
        self.cbam_decoder_stage5 = CBAM(32)

        # 解码器阶段6
        self.decoder_stage6 = nn.Sequential(
            nn.Conv3d(64 + 64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数

            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数
        )
        self.cbam_decoder_stage6 = CBAM(64)

        self.decoder_stage7 = nn.Sequential(
            nn.Conv3d(128 + 128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数

            nn.Conv3d(128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage7 = CBAM(128)

        self.decoder_stage8 = nn.Sequential(
            nn.Conv3d(64 + 64 + 64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数

            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage8 = CBAM(64)

        self.decoder_stage9 = nn.Sequential(
            nn.Conv3d(64 + 64 + 64 + 128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数

            nn.Conv3d(128, 128, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(128),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage9 = CBAM(128)

        self.decoder_stage10 = nn.Sequential(
            nn.Conv3d(32 + 32 + 32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数

            nn.Conv3d(32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage10 = CBAM(32)

        self.decoder_stage11 = nn.Sequential(
            nn.Conv3d(32 + 32 + 32 + 32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数

            nn.Conv3d(32, 32, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(32),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage11 = CBAM(32)

        self.decoder_stage12 = nn.Sequential(
            nn.Conv3d(32 + 32 + 32 + 32 + 64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数

            nn.Conv3d(64, 64, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(64),  # Parametric ReLU激活函数
        )

        self.cbam_decoder_stage12 = CBAM(64)

        # 下采样卷积层
        self.down_conv1 = nn.Sequential(
            nn.Conv3d(32, 64, 2, 2),  # 3D卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(64)  # Parametric ReLU激活函数
        )

        self.down_conv2 = nn.Sequential(
            nn.Conv3d(64, 128, 2, 2),  # 3D卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(128)  # Parametric ReLU激活函数
        )

        self.down_conv3 = nn.Sequential(
            nn.Conv3d(128, 256, 2, 2),  # 3D卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(256)  # Parametric ReLU激活函数
        )

        self.down_conv4 = nn.Sequential(
            nn.Conv3d(256, 512, 3, 1, padding=1),  # 3D卷积层,图像大小不变
            nn.PReLU(512)  # Parametric ReLU激活函数
        )

        # 上采样卷积层
        self.up_conv2 = nn.Sequential(
            nn.ConvTranspose3d(512, 256, 2, 2),  # 3D转置卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(256)  # Parametric ReLU激活函数
        )

        self.up_conv3 = nn.Sequential(
            nn.ConvTranspose3d(256, 128, 2, 2),  # 3D转置卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(128)  # Parametric ReLU激活函数
        )

        self.up_conv4 = nn.Sequential(
            nn.ConvTranspose3d(128, 64, 2, 2),  # 3D转置卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(64)  # Parametric ReLU激活函数
        )

        self.up_conv5 = nn.Sequential(
            nn.ConvTranspose3d(64, 32, 2, 2),  # 3D转置卷积层,[2,2,2]卷积核,步长为2
            nn.PReLU(32)  # Parametric ReLU激活函数
        )

        # 最后大尺度下的映射（256*256），下面的尺度依次递减
        self.map4 = nn.Sequential(
            nn.Conv3d(64, out_channel, 1, 1),  # [1,1,1]卷积核,步长为1
            # 三线性插值上采样
            nn.Upsample(scale_factor=(1, 1, 1), mode='trilinear', align_corners=False)
            # nn.Softmax(dim=1)  # Softmax激活函数
        )

        # 128*128 尺度下的映射
        self.map3 = nn.Sequential(
            nn.Conv3d(128, out_channel, 1, 1),  # [1,1,1]卷积核,步长为1
            # 三线性插值上采样
            nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear', align_corners=False)
            # nn.Softmax(dim=1)  # Softmax激活函数
        )

        # 64*64 尺度下的映射
        self.map2 = nn.Sequential(
            nn.Conv3d(256, out_channel, 1, 1),  # [1,1,1]卷积核,步长为1
            # 三线性插值上采样
            nn.Upsample(scale_factor=(4, 4, 4), mode='trilinear', align_corners=False)
            # nn.Softmax(dim=1)  # Softmax激活函数
        )

        # 32*32 尺度下的映射
        self.map1 = nn.Sequential(
            nn.Conv3d(512, out_channel, 1, 1),  # [1,1,1]卷积核,步长为1
            # 三线性插值上采样
            nn.Upsample(scale_factor=(8, 8, 8), mode='trilinear', align_corners=False)
            # nn.Softmax(dim=1)  # Softmax激活函数
        )

    def forward(self, inputs):

        # 编码器阶段1(in_channel -> 32)
        x0_0 = self.encoder_stage1(inputs) + inputs  # 得到长程残差连接结果
        cbam_x0_0 = self.cbam_encoder_stage1(x0_0)

        # 下采样阶段1(32 -> 64)
        short_range0 = self.down_conv1(cbam_x0_0)  # 下采样得到短程特征
        # print("short_range0.shape", short_range0.shape)

        # 编码器阶段2(64 -> 64)
        x1_0 = self.encoder_stage2(short_range0) + short_range0  # 得到长程残差连接结果
        x1_0 = F.dropout(x1_0, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        cbam_x1_0 = self.cbam_encoder_stage2(x1_0)
        # print("x1_0.shape", x1_0.shape)

        a1_0 = self.up_conv5(cbam_x1_0)
        # print("a1_0.shape", a1_0.shape)
        x0_1 = self.decoder_stage5(torch.cat([a1_0, cbam_x0_0], dim=1)) + a1_0  # 第一层第一阶段
        cbam_x0_1 = self.cbam_decoder_stage5(x0_1)
        # print("x0_1.shape", x0_1.shape)
        ########################################################

        # 下采样阶段2(64 -> 128)
        short_range1 = self.down_conv2(cbam_x1_0)  # 下采样得到短程特征
        # print("short_range1.shape", short_range1.shape)
        # 编码器阶段3(128 -> 128)
        x2_0 = self.encoder_stage3(short_range1) + short_range1  # 得到长程残差连接结果
        x2_0 = F.dropout(x2_0, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        cbam_x2_0 = self.cbam_encoder_stage3(x2_0)

        # print("x2_0.shape", x2_0.shape)
        a2_0 = self.up_conv4(cbam_x2_0)
        # print("a2_0.shape", a2_0.shape)
        x1_1 = self.decoder_stage6(torch.cat([a2_0, cbam_x1_0], dim=1)) + a2_0  # 第二层第一阶段
        cbam_x1_1 = self.cbam_decoder_stage6(x1_1)

        # print("x1_1.shape", x1_1.shape)
        ########################################################

        # 下采样阶段3(128 -> 256)
        short_range2 = self.down_conv3(cbam_x2_0)  # 下采样得到短程特征
        # print("short_range2.shape", short_range2.shape)
        # 编码器阶段4(256 -> 256)
        x3_0 = self.encoder_stage4(short_range2) + short_range2  # 得到长程残差连接结果
        x3_0 = F.dropout(x3_0, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        cbam_x3_0 = self.cbam_encoder_stage4(x3_0)

        # print("x3_0.shape", x3_0.shape)
        a3_0 = self.up_conv3(cbam_x3_0)  #
        # print("a3_0.shape", a3_0.shape)
        x2_1 = self.decoder_stage7(torch.cat([a3_0, cbam_x2_0], dim=1)) + a3_0  # 第三层第一阶段
        cbam_x2_1 = self.cbam_decoder_stage7(x2_1)

        # print("x2_1.shape", x2_1.shape)
        #######################################################

        # 下采样阶段3(256 -> 512)
        short_range3 = self.down_conv4(cbam_x3_0)
        # 编码器阶段4(512 -> 512)
        x3_1 = self.encoder_stage5(short_range3) + short_range3  # 得到长程残差连接结果
        x3_1 = F.dropout(x3_1, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        cbam_x3_1 = self.cbam_encoder_stage5(x3_1)

        # 上采样(512 -> 256)
        a3_1 = self.up_conv2(cbam_x3_1)
        # print("x3_1.shape", x3_1.shape)
        output1 = self.map1(cbam_x3_1)
        #######################################################
        x2_2 = self.decoder_stage2(torch.cat([a3_1, cbam_x2_0, cbam_x2_1], dim=1)) + a3_1  # 第三层第二阶段
        x2_2 = F.dropout(x2_2, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        cbam_x2_2 = self.cbam_decoder_stage2(x2_2)

        # print("x2_2.shape", x2_2.shape)
        output2 = self.map2(cbam_x2_2)
        #######################################################

        # 上采样(128 -> 64)
        a2_1 = self.up_conv4(cbam_x2_1)  # 看的不舒服把short_range改成a
        x1_2 = self.decoder_stage8(torch.cat([a2_1, cbam_x1_0, cbam_x1_1], dim=1)) + a2_1  # 第三层第二阶段
        cbam_x1_2 = self.cbam_decoder_stage8(x1_2)
        # print("x1_2.shape", x1_2.shape)

        #######################################################

        # 上采样(256 -> 128)
        a2_2 = self.up_conv3(cbam_x2_2)
        x1_3 = self.decoder_stage9(torch.cat([a2_2, cbam_x1_0, cbam_x1_1, cbam_x1_2], dim=1)) + a2_2  # 第三层第二阶段
        x1_3 = F.dropout(x1_3, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        # x1_3 = self.encoder_stage3(x1_3) + x1_3
        cbam_x1_3 = self.cbam_decoder_stage9(x1_3)
        # print("x1_3.shape", x1_3.shape)
        output3 = self.map3(cbam_x1_3)

        #######################################################

        # 上采样(64 -> 32)
        a1_1 = self.up_conv5(cbam_x1_1)
        x0_2 = self.decoder_stage10(torch.cat([a1_1, cbam_x0_0, cbam_x0_1], dim=1)) + a1_1  # 第三层第二阶段
        cbam_x0_2 = self.cbam_decoder_stage10(x0_2)
        # print("x0_2.shape", x0_2.shape)

        #######################################################

        # 上采样(64 -> 32)
        a1_2 = self.up_conv5(cbam_x1_2)
        x0_3 = self.decoder_stage11(torch.cat([a1_2, cbam_x0_0, cbam_x0_1, cbam_x0_2], dim=1)) + a1_2  # 第三层第二阶段
        cbam_x0_3 = self.cbam_decoder_stage11(x0_3)
        # print("x0_3.shape", x0_3.shape)

        #######################################################
        a1_3 = self.up_conv4(cbam_x1_3)
        x0_4 = self.decoder_stage12(torch.cat([a1_3, cbam_x0_0, cbam_x0_1, cbam_x0_2, cbam_x0_3], dim=1)) + a1_3  # 第三层第二阶段
        x0_4 = F.dropout(x0_4, self.dorp_rate, self.training)  # 使用Dropout进行正则化
        cbam_x0_4 = self.cbam_decoder_stage12(x0_4)

        # print("x0_4.shape", x0_4.shape)
        output4 = self.map4(cbam_x0_4)

        if self.training is True:  # 训练过程返回所有尺度下的输出结果
            return output1, output2, output3, output4
        else:  # 测试过程返回最大尺度下的输出结果
            return output4




