import torch
import torch.nn as nn
import torchvision.models as models
import ECA

# 定义一个带有 ECA 注意力的残差块
class ECABasicBlock(nn.Module):
    expansion = 1
    def __init__(self, in_planes, planes, stride=1, kernel_size=3):
        super(ECABasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        self.relu = nn.ReLU(inplace=True)

        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes)

        self.eca = ECA.ECAAttention(kernel_size=kernel_size)  # 添加 ECA 注意力模块

        self.downsample = None
        if stride != 1 or in_planes != planes:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes),
            )

    def forward(self, x):
        identity = x  # 残差连接

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        out = self.eca(out)  # 应用 ECA 注意力

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity  # 残差连接
        out = self.relu(out)

        return out

# 定义一个简单的 ResNet 模型，使用 ECABasicBlock
class ECAResNet(nn.Module):
    def __init__(self, num_classes=10):
        super(ECAResNet, self).__init__()
        self.in_planes = 64

        self.conv1 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(self.in_planes)
        self.relu = nn.ReLU(inplace=True)

        # 使用 ECABasicBlock 构建网络层
        self.layer1 = self._make_layer(ECABasicBlock, 64, 2, stride=1)
        self.layer2 = self._make_layer(ECABasicBlock, 128, 2, stride=2)
        self.layer3 = self._make_layer(ECABasicBlock, 256, 2, stride=2)
        self.layer4 = self._make_layer(ECABasicBlock, 512, 2, stride=2)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512 * ECABasicBlock.expansion, num_classes)

    #在深度神经网络，特别是 ResNet（残差网络）中，_make_layer 方法是一个常见的用于构建网络层的方法。
    # 它的主要作用是根据给定的参数，创建由多个残差块（Residual Block）组成的网络层。
    """
    构建一个包含多个残差块的网络层：_make_layer 方法用于创建一个包含若干个残差块的网络层。
    这些残差块按照顺序堆叠，以构成网络的一个阶段（stage）。
	处理通道数和步幅的变化：在网络的不同阶段，特征图的通道数和尺寸可能会发生变化。
	_make_layer 方法负责在这些变化中正确地构建残差块，以确保特征的连续性。
    """
    def _make_layer(self, block, planes, blocks, stride):
        layers = []
        layers.append(block(self.in_planes, planes, stride))
        self.in_planes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.in_planes, planes))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x

# 测试融合后的模型
if __name__ == '__main__':
    DEVICE = 'mps' if torch.backends.mps.is_available() else 'cpu'  # 支持 MPS 则使用，否则使用 CPU
    print(f"Using device: {DEVICE}")
    model = ECAResNet(num_classes=10).to(DEVICE)
    input = torch.randn(1, 3, 224, 224).to(DEVICE)  # 输入尺寸为 224x224 的图像
    output = model(input)
    print(f"Output shape: {output.shape}")