import torch
import torch.nn as nn
import torch.nn.functional as F
from thop import profile


class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, is_double_conv=True):
        super(ResidualBlock, self).__init__()
        if is_double_conv:
            self.conv = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
                nn.BatchNorm2d(out_channels),
                nn.ReLU(inplace=True),
                nn.Conv2d(out_channels, out_channels, kernel_size=1),
                nn.BatchNorm2d(out_channels),
            )
        else:
            self.conv = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
                nn.BatchNorm2d(out_channels),
            )

        self.residual_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        residual = self.residual_conv(x)
        residual = self.bn(residual)

        out = self.conv(x)
        out += residual
        out = F.relu(out, inplace=False)

        return out


class Down(nn.Module):
    def __init__(self, in_channels, out_channels, layer_type):
        super(Down, self).__init__()

        if layer_type == 1:
            self.conv = nn.Sequential(
                ResidualBlock(in_channels, out_channels),
                ResidualBlock(out_channels, out_channels),
            )
            self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        elif layer_type == 2:
            self.conv = nn.Sequential(
                ResidualBlock(in_channels, out_channels),
                ResidualBlock(out_channels, out_channels),
                ResidualBlock(out_channels, out_channels),
            )
            self.pool = nn.Conv2d(
                out_channels, out_channels, kernel_size=3, stride=2, padding=1
            )
        elif layer_type == 3:
            self.conv = nn.Sequential(
                ResidualBlock(in_channels, out_channels, is_double_conv=False)
            )
            self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
        else:
            raise ValueError("Invalid layer type")

    def forward(self, x):
        before_conv = x
        x = self.conv(x)
        before_pool = x
        x = self.pool(x)
        return x, before_conv, before_pool


class Up(nn.Module):
    def __init__(
        self, in_channels, out_channels, down_input_channel, before_pool_channel
    ):
        super(Up, self).__init__()
        self.up = nn.ConvTranspose2d(
            in_channels, out_channels, kernel_size=2, stride=2
        )
        self.conv = ResidualBlock(
            out_channels * 3, out_channels, is_double_conv=False
        )

        # 创建调整通道数的卷积层
        self.down_input_conv = nn.Conv2d(
            down_input_channel, out_channels, kernel_size=1
        )
        self.before_pool_conv = nn.Conv2d(
            before_pool_channel, out_channels, kernel_size=1
        )

    def forward(self, x1, down_input, before_pool):
        x1 = self.up(x1)

        # 使用初始化时创建的卷积层来调整通道数
        down_input = self.down_input_conv(down_input)
        before_pool = self.before_pool_conv(before_pool)

        # Concatenating the features
        x = torch.cat([x1, down_input, before_pool], dim=1)

        x = self.conv(x)

        return x


# 定义完整模型
class GrayDemoNet2(nn.Module):
    def __init__(self, output_channels=3):
        super(GrayDemoNet2, self).__init__()
        self.down1 = Down(1, 16, 1)  # 第一层下采样
        self.down2 = Down(16, 32, 1)  # 第二层下采样
        self.down3 = Down(32, 64, 2)  # 第二层下采样
        self.down4 = Down(64, 128, 3)  # 第三层下采样

        self.up1 = Up(128, 64, 128, 64)
        self.up2 = Up(64, 32, 64, 32)
        self.up3 = Up(32, 16, 32, 16)

        # self.adaptive_pool = nn.AdaptiveAvgPool2d((256, 256))
        self.out_conv = nn.Conv2d(16, output_channels, kernel_size=1)

    def forward(self, x):
        x1, before_pool1, down_input1 = self.down1(x)
        x2, before_pool2, down_input2 = self.down2(x1)
        x3, before_pool3, down_input3 = self.down3(x2)
        x4, before_pool4, down_input4 = self.down4(x3)

        d1 = self.up1(x4, down_input4, before_pool4)
        d2 = self.up2(d1, down_input3, before_pool3)
        d3 = self.up3(d2, down_input2, before_pool2)

        # x = self.adaptive_pool(d3)
        x = F.interpolate(
            d3, size=(128, 128), mode="bilinear", align_corners=True
        )
        x = self.out_conv(x)

        # # 应用 Sigmoid 激活函数于位置信息通道
        # code128_position_output = torch.sigmoid(x[:, :2, :, :])  # Code128位置
        # qr_position_output = torch.sigmoid(x[:, 4:6, :, :])  # QR位置
        # # 保持Code128和QR的角度信息通道的原始输出
        # code128_angle_output = x[:, 2:4, :, :]  # Code128角度
        # qr_angle_output = x[:, 6:, :, :]  # QR角度
        #
        # # 将位置和角度的部分输出合并回一个tensor
        # output = torch.cat([code128_position_output, code128_angle_output,
        #                     qr_position_output, qr_angle_output], dim=1)

        return x


if __name__ == "__main__":
    torch.autograd.set_detect_anomaly(True)
    model = GrayDemoNet2()
    print(model)
    x = torch.randn((1, 1, 128, 128))
    output = model(x)
    output.mean().backward()  # 使用一个简单的loss进行测试
    print(model(x).shape)

    flops, params = profile(model, inputs=(x,))
    print("FLOPs=", str(flops / 1e9) + "{}".format("G"))
    print("params=", str(params / 1e6) + "{}".format("M"))

    torch.save(model.state_dict(), "model.pt")
