"""
深度学习模型基础类
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Optional, Tuple


class BaseModel(nn.Module):
    """所有麻将AI模型的基础类"""
    def __init__(self, input_dim: int, output_dim: int):
        super().__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播"""
        raise NotImplementedError("必须在子类中实现forward方法")
    
    def predict(self, x: torch.Tensor) -> torch.Tensor:
        """预测，用于推理阶段"""
        self.eval()
        with torch.no_grad():
            return self.forward(x)
    
    def save(self, filepath: str):
        """保存模型"""
        torch.save({
            'model_state_dict': self.state_dict(),
            'input_dim': self.input_dim,
            'output_dim': self.output_dim
        }, filepath)
    
    def load(self, filepath: str):
        """加载模型"""
        checkpoint = torch.load(filepath, map_location=self.device)
        self.load_state_dict(checkpoint['model_state_dict'])
        self.to(self.device)
    
    def to_device(self):
        """将模型移至指定设备"""
        self.to(self.device)
        return self


class MLP(BaseModel):
    """多层感知机模型"""
    def __init__(self, 
                 input_dim: int, 
                 output_dim: int, 
                 hidden_dims: List[int] = [128, 64],
                 activation: nn.Module = nn.ReLU(),
                 dropout_rate: float = 0.2):
        super().__init__(input_dim, output_dim)
        
        # 构建网络层
        layers = []
        prev_dim = input_dim
        
        for dim in hidden_dims:
            layers.append(nn.Linear(prev_dim, dim))
            layers.append(activation)
            if dropout_rate > 0:
                layers.append(nn.Dropout(dropout_rate))
            prev_dim = dim
        
        # 输出层
        layers.append(nn.Linear(prev_dim, output_dim))
        
        self.model = nn.Sequential(*layers)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return self.model(x)


class CNN(BaseModel):
    """卷积神经网络模型"""
    def __init__(self, 
                 input_channels: int,
                 output_dim: int,
                 num_filters: List[int] = [64, 128],
                 kernel_sizes: List[int] = [3, 3],
                 strides: List[int] = [1, 1],
                 hidden_dims: List[int] = [256, 128]):
        # 计算输入维度：这里假设输入是2D的麻将特征
        input_dim = input_channels * 4 * 9  # 简化假设
        super().__init__(input_dim, output_dim)
        
        # 构建卷积层
        self.conv_layers = nn.ModuleList()
        prev_channels = input_channels
        
        for i in range(len(num_filters)):
            self.conv_layers.append(
                nn.Conv2d(
                    in_channels=prev_channels,
                    out_channels=num_filters[i],
                    kernel_size=kernel_sizes[i],
                    stride=strides[i],
                    padding=kernel_sizes[i] // 2
                )
            )
            self.conv_layers.append(nn.ReLU())
            prev_channels = num_filters[i]
        
        # 计算卷积后的维度
        # 假设输入是 (batch_size, channels, 4, 9)
        dummy_input = torch.zeros(1, input_channels, 4, 9)
        dummy_output = self._forward_conv(dummy_input)
        conv_output_dim = dummy_output.numel()
        
        # 构建全连接层
        self.fc_layers = nn.Sequential()
        prev_dim = conv_output_dim
        
        for dim in hidden_dims:
            self.fc_layers.append(nn.Linear(prev_dim, dim))
            self.fc_layers.append(nn.ReLU())
            prev_dim = dim
        
        # 输出层
        self.fc_layers.append(nn.Linear(prev_dim, output_dim))
    
    def _forward_conv(self, x: torch.Tensor) -> torch.Tensor:
        """前向传播卷积部分"""
        for layer in self.conv_layers:
            x = layer(x)
        return x
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 重塑输入为2D格式
        # 假设输入是 (batch_size, input_dim)
        batch_size = x.size(0)
        # 重塑为 (batch_size, channels, 4, 9)
        # 这里需要根据实际输入格式调整
        x = x.view(batch_size, -1, 4, 9)
        
        # 卷积层
        x = self._forward_conv(x)
        
        # 展平
        x = x.view(batch_size, -1)
        
        # 全连接层
        x = self.fc_layers(x)
        
        return x


class ResidualBlock(nn.Module):
    """残差块"""
    def __init__(self, in_channels: int, out_channels: int, stride: int = 1):
        super().__init__()
        
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(out_channels)
        
        # 快捷连接
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride),
                nn.BatchNorm2d(out_channels)
            )
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        residual = x
        
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)
        
        out = self.conv2(out)
        out = self.bn2(out)
        
        out += self.shortcut(residual)
        out = self.relu(out)
        
        return out


class ResNet(BaseModel):
    """残差网络模型"""
    def __init__(self, 
                 input_channels: int,
                 output_dim: int,
                 block_sizes: List[int] = [3, 4, 6, 3],
                 channels: List[int] = [64, 128, 256, 512]):
        # 计算输入维度
        input_dim = input_channels * 4 * 9  # 简化假设
        super().__init__(input_dim, output_dim)
        
        # 初始卷积层
        self.in_channels = 64
        self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        
        # 构建残差层
        self.layer1 = self._make_layer(channels[0], block_sizes[0], stride=1)
        self.layer2 = self._make_layer(channels[1], block_sizes[1], stride=2)
        self.layer3 = self._make_layer(channels[2], block_sizes[2], stride=2)
        self.layer4 = self._make_layer(channels[3], block_sizes[3], stride=2)
        
        # 全局平均池化
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        
        # 全连接层
        self.fc = nn.Linear(channels[-1], output_dim)
    
    def _make_layer(self, out_channels: int, num_blocks: int, stride: int) -> nn.Sequential:
        """创建一个残差层"""
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        
        for stride in strides:
            layers.append(ResidualBlock(self.in_channels, out_channels, stride))
            self.in_channels = out_channels
        
        return nn.Sequential(*layers)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 重塑输入为2D格式
        batch_size = x.size(0)
        x = x.view(batch_size, -1, 4, 9)
        
        # 初始处理
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        
        # 残差层
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        
        # 池化和输出
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        
        return x


def create_model(model_type: str, 
                 input_dim: int, 
                 output_dim: int, 
                 config: Optional[Dict] = None) -> BaseModel:
    """
    创建模型工厂函数
    
    Args:
        model_type: 模型类型 ('mlp', 'cnn', 'resnet')
        input_dim: 输入维度
        output_dim: 输出维度
        config: 模型配置参数
    
    Returns:
        创建的模型实例
    """
    if config is None:
        config = {}
    
    if model_type.lower() == 'mlp':
        return MLP(input_dim, output_dim, **config)
    elif model_type.lower() == 'cnn':
        # CNN需要input_channels参数
        input_channels = config.pop('input_channels', 1)
        return CNN(input_channels, output_dim, **config)
    elif model_type.lower() == 'resnet':
        # ResNet需要input_channels参数
        input_channels = config.pop('input_channels', 1)
        return ResNet(input_channels, output_dim, **config)
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")