import torch
import torch.nn as nn
import torch.nn.functional as F


class CNN(nn.Module):

    
    def __init__(self, num_conv_layers=3, base_channels=32, leaky_relu_alpha=0.1, 
                 dropout_rate=0.3, use_batch_norm=True, num_classes=10, alpha=None,
                 device=None, use_gmp=False, mlp_hidden_dims=(256, 128), use_mlp=True):
        """
        CNN与MLP结合的模型
        
        参数:
        - num_conv_layers: 卷积层数量 (1-5)
        - base_channels: 基础通道数
        - leaky_relu_alpha: LeakyReLU的负斜率参数
        - dropout_rate: Dropout比率
        - use_batch_norm: 是否使用批归一化
        - num_classes: 分类类别数
        - alpha: leaky_relu_alpha的别名（为了兼容性）
        - device: 设备（实际在外部使用.to(device)）
        - use_gmp: 是否使用全局最大池化
        - mlp_hidden_dims: MLP隐藏层维度列表
        - use_mlp: 是否使用MLP路径
        """
        super(CNN, self).__init__()
        
        # 确保卷积层数在有效范围内
        self.num_conv_layers = max(1, min(5, num_conv_layers))
        self.base_channels = base_channels

        self.leaky_relu_alpha = alpha if alpha is not None else leaky_relu_alpha
        self.dropout_rate = dropout_rate
        self.use_batch_norm = use_batch_norm
        self.num_classes = num_classes
        self.use_gmp = use_gmp
        self.use_mlp = use_mlp
        
        # 构建卷积层
        self.conv_layers = self._build_conv_layers()
        
        # 构建MLP路径
        if self.use_mlp:
            self.mlp_layers = self._build_mlp_layers(mlp_hidden_dims)
        
        # 构建融合后的全连接层
        self.fc_layers = self._build_fc_layers(use_mlp=use_mlp)
        
        # 初始化权重
        self._initialize_weights()
    
    def _build_conv_layers(self):
        """
        构建卷积层
        
        返回:
        - 包含所有卷积层的Sequential模块
        """
        layers = []
        
        # 输入通道数
        in_channels = 1  # MNIST是灰度图像
        
        # 通道数增长策略
        for i in range(self.num_conv_layers):
            out_channels = self.base_channels * (2 ** i)
            
            # 添加卷积层
            layers.append(nn.Conv2d(
                in_channels=in_channels,
                out_channels=out_channels,
                kernel_size=3,
                padding=1, 
                stride=1
            ))
            
            # 添加批归一化层
            if self.use_batch_norm:
                layers.append(nn.BatchNorm2d(out_channels))
            
            # 添加激活函数
            layers.append(nn.LeakyReLU(negative_slope=self.leaky_relu_alpha))
            
            # 除了最后一层卷积后，其他层后都添加池化
            if i < self.num_conv_layers - 1:
                layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
            
            # 添加Dropout层
            if self.dropout_rate > 0 and i < self.num_conv_layers - 1:
                layers.append(nn.Dropout2d(p=self.dropout_rate))
            
            # 更新输入通道数
            in_channels = out_channels
        
        # 根据配置选择池化方式
        if self.use_gmp:
            layers.append(nn.AdaptiveMaxPool2d((4, 4)))
        else:
            layers.append(nn.AdaptiveAvgPool2d((4, 4)))
        
        return nn.Sequential(*layers)
    
    def _build_mlp_layers(self, hidden_dims):
        """
        构建MLP路径，用于直接处理扁平化的输入图像
        
        参数:
        - hidden_dims: 隐藏层维度列表
        
        返回:
        - 包含所有MLP层的Sequential模块
        """
        layers = []
        
        # MLP输入维度（28x28的扁平化图像）
        mlp_input_dim = 28 * 28
        
        # 构建MLP隐藏层
        prev_dim = mlp_input_dim
        for dim in hidden_dims:
            layers.append(nn.Linear(prev_dim, dim))
            if self.use_batch_norm:
                layers.append(nn.BatchNorm1d(dim))
            layers.append(nn.LeakyReLU(negative_slope=self.leaky_relu_alpha))
            if self.dropout_rate > 0:
                layers.append(nn.Dropout(p=self.dropout_rate))
            prev_dim = dim
        
        return nn.Sequential(*layers)
    
    def _build_fc_layers(self, use_mlp=True):
        """
        构建全连接层
        
        参数:
        - use_mlp: 是否使用MLP路径
        
        返回:
        - 包含所有全连接层的Sequential模块
        """
        layers = []
        
        # 计算卷积路径的输出特征数
        last_conv_channels = self.base_channels * (2 ** (self.num_conv_layers - 1))
        conv_output_dim = last_conv_channels * 4 * 4
        
        # 如果使用MLP，计算融合后的输入维度
        if use_mlp and hasattr(self, 'mlp_layers'):
            # 获取MLP的输出维度（最后一个隐藏层的维度）
            mlp_output_dim = 0
            for layer in reversed(self.mlp_layers):
                if isinstance(layer, nn.Linear):
                    mlp_output_dim = layer.out_features
                    break
            fc_input_dim = conv_output_dim + mlp_output_dim
        else:
            fc_input_dim = conv_output_dim
        
        # 第一个全连接层（融合后的特征处理）
        layers.append(nn.Linear(fc_input_dim, 256))
        if self.use_batch_norm:
            layers.append(nn.BatchNorm1d(256))
        layers.append(nn.LeakyReLU(negative_slope=self.leaky_relu_alpha))
        if self.dropout_rate > 0:
            layers.append(nn.Dropout(p=self.dropout_rate))
        
        # 第二个全连接层
        layers.append(nn.Linear(256, 128))
        if self.use_batch_norm:
            layers.append(nn.BatchNorm1d(128))
        layers.append(nn.LeakyReLU(negative_slope=self.leaky_relu_alpha))
        if self.dropout_rate > 0:
            layers.append(nn.Dropout(p=self.dropout_rate))
        
        # 输出层
        layers.append(nn.Linear(128, self.num_classes))
        
        return nn.Sequential(*layers)
    
    def _initialize_weights(self):
        """
        初始化模型权重
        """
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                # 使用Kaiming初始化卷积层权重
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
                # 初始化批归一化层参数
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                # 初始化全连接层权重
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
                nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
        - x: 输入张量，形状为 [batch_size, 1, 28, 28]
        
        返回:
        - 输出张量，形状为 [batch_size, num_classes]
        """
        # 保存原始输入用于MLP路径
        original_x = x
        
        # 卷积特征提取
        conv_features = self.conv_layers(x)
        
        # 展平卷积特征
        conv_features_flat = torch.flatten(conv_features, 1)
        
        # 如果使用MLP路径
        if self.use_mlp and hasattr(self, 'mlp_layers'):
            # 扁平化原始输入（[batch, 1, 28, 28] -> [batch, 784]）
            mlp_input = torch.flatten(original_x, 1)
            
            # MLP特征提取
            mlp_features = self.mlp_layers(mlp_input)
            
            # 融合卷积特征和MLP特征
            combined_features = torch.cat([conv_features_flat, mlp_features], dim=1)
        else:
            combined_features = conv_features_flat
        
        # 全连接层分类
        x = self.fc_layers(combined_features)
        
        return x
