import os
import torch

class Net(torch.nn.Module):
    def __init__(self, activation='selu', normalization='bn'):
        super(Net, self).__init__()
        self.normalization = normalization
        
        # 输入通道从 1 修改为 3
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(3, 10, kernel_size=5),  # 3 通道输入
            self._get_normalization(10, 28, 28),  # 28 是 Conv1 的输出高度和宽度
            self._get_activation(activation),   
            torch.nn.MaxPool2d(kernel_size=2),
        )
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(10, 20, kernel_size=5),
            self._get_normalization(20, 10, 10),  # 10 是 Conv2 的输出高度和宽度
            self._get_activation(activation),   
            torch.nn.MaxPool2d(kernel_size=2),
        )
        
        # 修改输入特征为 500
        self.fc = torch.nn.Sequential(
            torch.nn.Linear(20 * 5 * 5, 50),  
            self._get_activation(activation),   
            torch.nn.Linear(50, 10),          # CIFAR-10 有 10 个类别
        )

    def _get_activation(self, activation):
        if activation == 'relu':
            return torch.nn.ReLU()
        elif activation == 'selu':
            return torch.nn.SELU()
        elif activation == 'elu':
            return torch.nn.ELU()
        elif activation == 'gelu':
            return torch.nn.GELU()
        else:
            raise ValueError(f'Unknown activation function: {activation}')
        
    def _get_normalization(self, num_features, height, width):
        if self.normalization == 'bn':
            return torch.nn.BatchNorm2d(num_features)
        elif self.normalization == 'ln':
            return torch.nn.LayerNorm([num_features, height, width])  # 在 2D 中应用
        elif self.normalization == 'gn':
            return torch.nn.GroupNorm(num_groups=5, num_channels=num_features)  # 分成 5 组
        else:
            return torch.nn.Identity()  # 不使用归一化

    def forward(self, x):
        batch_size = x.size(0)

        x = self.conv1(x)
        x = self.conv2(x)

        # 展平操作
        x = torch.flatten(x, start_dim=1)
        x = self.fc(x)
        
        return x

    def save(self, save_dir, name):
        log_dir = os.path.join(save_dir, name)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        torch.save({'model': self.state_dict()}, os.path.join(log_dir, 'model.pt'))

    def load(self, log_dir):
        checkpoint = torch.load(os.path.join(log_dir, 'model.pt'))
        self.load_state_dict(checkpoint['model'])
