import torch
import torch.nn as nn
import torch.nn.functional as F


class ResBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResBlock, self).__init__()
        self.conv1 = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=3,
            stride=stride,
            padding=1,
            bias=False,
        )
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(
            out_channels, out_channels, kernel_size=3, padding=1, bias=False
        )
        self.bn2 = nn.BatchNorm2d(out_channels)

        # 如果输入输出通道数不同或步长不为1，添加1x1卷积调整
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(
                    in_channels, out_channels, kernel_size=1, stride=stride, bias=False
                ),
                nn.BatchNorm2d(out_channels),
            )

    def forward(self, x):
        residual = x
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(residual)
        out = F.relu(out)
        return out


class DQN(nn.Module):
    def __init__(self, h, w, outputs, input_channels=12):
        super(DQN, self).__init__()

        # 不再降低分辨率，直接使用原始尺寸
        # 但使用较大步长的卷积和池化来逐步减小特征图尺寸

        # 第一阶段：初始卷积和池化
        self.conv1 = nn.Conv2d(
            input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False
        )
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        # 第二阶段：ResNet层
        # 每个阶段包含2个ResBlock，类似于ResNet18
        self.layer1 = nn.Sequential(ResBlock(64, 64), ResBlock(64, 64))

        self.layer2 = nn.Sequential(
            ResBlock(64, 128, stride=2),  # 降低空间尺寸
            ResBlock(128, 128),
        )

        self.layer3 = nn.Sequential(
            ResBlock(128, 256, stride=2),  # 进一步降低
            ResBlock(256, 256),
        )

        self.layer4 = nn.Sequential(
            ResBlock(256, 512, stride=2),  # 最终降低
            ResBlock(512, 512),
        )

        # 平均池化
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        # 全连接层
        self.fc = nn.Linear(512, outputs)

        # 初始化权重
        self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # 不再需要降采样预处理，直接使用原始输入

        # 第一阶段
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        # ResNet层
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        # 全局平均池化
        x = self.avgpool(x)
        x = torch.flatten(x, 1)

        # 全连接层
        x = self.fc(x)

        return x
