import torch.nn as nn
import torch.nn.functional as F

class ImageClassifier(nn.Module):
    """
    改进的CIFAR10图像分类模型，使用更现代的CNN架构设计
    
    架构特点:
    - 使用批量归一化(BatchNorm)加速收敛
    - 使用残差连接(Residual)缓解梯度消失
    - 使用dropout防止过拟合
    - 更深的网络结构
    """
    def __init__(self):
        super(ImageClassifier, self).__init__()
        
        # 卷积块1
        self.conv_block1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.Conv2d(64, 64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)
        )
        
        # 残差块1
        self.residual1 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU()
        )
        self.shortcut1 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=1),
            nn.BatchNorm2d(128)
        )
        self.pool1 = nn.MaxPool2d(2, 2)
        
        # 残差块2
        self.residual2 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU()
        )
        self.shortcut2 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=1),
            nn.BatchNorm2d(256)
        )
        self.pool2 = nn.MaxPool2d(2, 2)
        
        # 全连接层
        self.fc1 = nn.Linear(256 * 4 * 4, 1024)
        self.bn_fc1 = nn.BatchNorm1d(1024)
        self.dropout = nn.Dropout(0.5)
        self.fc2 = nn.Linear(1024, 512)
        self.bn_fc2 = nn.BatchNorm1d(512)
        self.fc3 = nn.Linear(512, 10)

    def forward(self, x):
        # 第一个卷积块
        x = self.conv_block1(x)
        
        # 第一个残差块
        residual = self.shortcut1(x)
        x = self.residual1(x) + residual
        x = F.relu(x)
        x = self.pool1(x)
        
        # 第二个残差块
        residual = self.shortcut2(x)
        x = self.residual2(x) + residual
        x = F.relu(x)
        x = self.pool2(x)
        
        # 展平
        x = x.view(-1, 256 * 4 * 4)
        
        # 全连接层
        x = F.relu(self.bn_fc1(self.fc1(x)))
        x = self.dropout(x)
        x = F.relu(self.bn_fc2(self.fc2(x)))
        x = self.fc3(x)
        
        return x