"""
四川麻将专用的深度学习模型
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, List, Optional, Tuple
from sichuanmajiang.model.base_model import BaseModel, MLP, CNN, ResNet


class MahjongPolicyModel(BaseModel):
    """
    麻将策略模型 - 预测应该打出哪张牌
    """
    def __init__(self, 
                 input_dim: int = 147,  # 标准特征维度
                 output_dim: int = 27,  # 27种不同的牌（无字符）
                 model_type: str = 'mlp',
                 **kwargs):
        super().__init__(input_dim, output_dim)
        
        # 根据model_type选择基础模型
        if model_type == 'mlp':
            hidden_dims = kwargs.get('hidden_dims', [256, 128, 64])
            self.model = MLP(input_dim, output_dim, hidden_dims=hidden_dims)
        elif model_type == 'cnn':
            input_channels = kwargs.get('input_channels', 5)
            num_filters = kwargs.get('num_filters', [64, 128])
            self.model = CNN(input_channels, output_dim, num_filters=num_filters)
        elif model_type == 'resnet':
            input_channels = kwargs.get('input_channels', 5)
            self.model = ResNet(input_channels, output_dim)
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        返回每张牌被打出的概率分布
        """
        logits = self.model(x)
        # 使用softmax确保输出是概率分布
        probabilities = F.softmax(logits, dim=1)
        return probabilities
    
    def predict_action(self, state: torch.Tensor, valid_actions: Optional[torch.Tensor] = None) -> Tuple[int, torch.Tensor]:
        """
        预测最佳动作
        
        Args:
            state: 游戏状态特征
            valid_actions: 有效的动作掩码（1表示有效，0表示无效）
        
        Returns:
            (最佳动作的索引, 动作概率分布)
        """
        self.eval()
        with torch.no_grad():
            probabilities = self.forward(state)
            
            # 如果有有效动作掩码，只考虑有效的动作
            if valid_actions is not None:
                probabilities = probabilities * valid_actions
                # 重新归一化概率
                probabilities = probabilities / probabilities.sum(dim=1, keepdim=True)
            
            # 选择概率最高的动作
            best_action = torch.argmax(probabilities, dim=1).item()
            
            return best_action, probabilities


class MahjongValueModel(BaseModel):
    """
    麻将价值模型 - 评估当前状态的胜率
    """
    def __init__(self, 
                 input_dim: int = 147,
                 model_type: str = 'mlp',
                 **kwargs):
        super().__init__(input_dim, 1)  # 输出是一个标量值
        
        # 根据model_type选择基础模型
        if model_type == 'mlp':
            hidden_dims = kwargs.get('hidden_dims', [256, 128, 64])
            self.model = MLP(input_dim, 1, hidden_dims=hidden_dims)
        elif model_type == 'cnn':
            input_channels = kwargs.get('input_channels', 5)
            num_filters = kwargs.get('num_filters', [64, 128])
            self.model = CNN(input_channels, 1, num_filters=num_filters)
        elif model_type == 'resnet':
            input_channels = kwargs.get('input_channels', 5)
            self.model = ResNet(input_channels, 1)
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
        
        # 输出层使用sigmoid限制在[0, 1]范围
        self.output_activation = nn.Sigmoid()
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        返回当前状态的价值估计（胜率）
        """
        logits = self.model(x)
        value = self.output_activation(logits)
        return value
    
    def predict_value(self, state: torch.Tensor) -> float:
        """
        预测状态价值
        
        Args:
            state: 游戏状态特征
        
        Returns:
            状态价值（0-1之间的概率值）
        """
        self.eval()
        with torch.no_grad():
            value = self.forward(state)
            return value.item()


class DuelingDQN(BaseModel):
    """
    Dueling DQN模型 - 分别估计状态价值和优势函数
    """
    def __init__(self, 
                 input_dim: int = 147,
                 output_dim: int = 27,
                 hidden_dims: List[int] = [256, 128]):
        super().__init__(input_dim, output_dim)
        
        # 特征提取层
        self.feature_layers = nn.Sequential()
        prev_dim = input_dim
        for dim in hidden_dims:
            self.feature_layers.add_module(f'fc_{prev_dim}_{dim}', nn.Linear(prev_dim, dim))
            self.feature_layers.add_module(f'relu_{dim}', nn.ReLU())
            prev_dim = dim
        
        # 价值流 - 估计状态价值
        self.value_stream = nn.Linear(prev_dim, 1)
        
        # 优势流 - 估计每个动作的优势
        self.advantage_stream = nn.Linear(prev_dim, output_dim)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        使用价值函数和优势函数计算Q值
        Q(s,a) = V(s) + A(s,a) - mean(A(s,a))
        """
        # 特征提取
        features = self.feature_layers(x)
        
        # 计算价值和优势
        value = self.value_stream(features)
        advantage = self.advantage_stream(features)
        
        # 计算Q值
        advantage_mean = advantage.mean(dim=1, keepdim=True)
        q_values = value + advantage - advantage_mean
        
        return q_values
    
    def predict_action(self, state: torch.Tensor, valid_actions: Optional[torch.Tensor] = None) -> Tuple[int, torch.Tensor]:
        """
        预测最佳动作
        """
        self.eval()
        with torch.no_grad():
            q_values = self.forward(state)
            
            # 如果有有效动作掩码，将无效动作的Q值设为很小
            if valid_actions is not None:
                q_values = q_values + (1 - valid_actions) * (-1e9)
            
            # 选择Q值最高的动作
            best_action = torch.argmax(q_values, dim=1).item()
            
            return best_action, q_values


class ActorCriticModel(BaseModel):
    """
    Actor-Critic模型 - 同时输出策略和价值
    """
    def __init__(self, 
                 input_dim: int = 147,
                 action_dim: int = 27,
                 shared_hidden_dims: List[int] = [256, 128],
                 actor_hidden_dims: List[int] = [64],
                 critic_hidden_dims: List[int] = [64]):
        super().__init__(input_dim, action_dim)
        
        # 共享特征提取层
        self.shared_layers = nn.Sequential()
        prev_dim = input_dim
        for dim in shared_hidden_dims:
            self.shared_layers.add_module(f'fc_shared_{prev_dim}_{dim}', nn.Linear(prev_dim, dim))
            self.shared_layers.add_module(f'relu_shared_{dim}', nn.ReLU())
            prev_dim = dim
        
        # Actor网络（策略）
        self.actor_layers = nn.Sequential()
        actor_dim = prev_dim
        for dim in actor_hidden_dims:
            self.actor_layers.add_module(f'fc_actor_{actor_dim}_{dim}', nn.Linear(actor_dim, dim))
            self.actor_layers.add_module(f'relu_actor_{dim}', nn.ReLU())
            actor_dim = dim
        self.actor_output = nn.Linear(actor_dim, action_dim)
        
        # Critic网络（价值）
        self.critic_layers = nn.Sequential()
        critic_dim = prev_dim
        for dim in critic_hidden_dims:
            self.critic_layers.add_module(f'fc_critic_{critic_dim}_{dim}', nn.Linear(critic_dim, dim))
            self.critic_layers.add_module(f'relu_critic_{dim}', nn.ReLU())
            critic_dim = dim
        self.critic_output = nn.Linear(critic_dim, 1)
        self.critic_activation = nn.Sigmoid()
    
    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        前向传播
        返回 (策略概率分布, 状态价值)
        """
        # 共享特征提取
        features = self.shared_layers(x)
        
        # Actor输出
        actor_features = self.actor_layers(features)
        actor_logits = self.actor_output(actor_features)
        policy = F.softmax(actor_logits, dim=1)
        
        # Critic输出
        critic_features = self.critic_layers(features)
        critic_logits = self.critic_output(critic_features)
        value = self.critic_activation(critic_logits)
        
        return policy, value
    
    def predict_action(self, state: torch.Tensor, valid_actions: Optional[torch.Tensor] = None) -> Tuple[int, torch.Tensor]:
        """
        预测动作
        """
        self.eval()
        with torch.no_grad():
            policy, _ = self.forward(state)
            
            # 应用有效动作掩码
            if valid_actions is not None:
                policy = policy * valid_actions
                policy = policy / policy.sum(dim=1, keepdim=True)
            
            # 选择概率最高的动作
            best_action = torch.argmax(policy, dim=1).item()
            
            return best_action, policy
    
    def predict_value(self, state: torch.Tensor) -> float:
        """
        预测状态价值
        """
        self.eval()
        with torch.no_grad():
            _, value = self.forward(state)
            return value.item()


class AttentionModel(BaseModel):
    """
    基于注意力机制的麻将模型
    """
    def __init__(self, 
                 input_dim: int = 147,
                 output_dim: int = 27,
                 embedding_dim: int = 64,
                 num_heads: int = 4,
                 num_layers: int = 2):
        super().__init__(input_dim, output_dim)
        
        # 输入嵌入
        self.input_embedding = nn.Linear(input_dim, embedding_dim)
        
        # Transformer编码器层
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=embedding_dim,
            nhead=num_heads,
            batch_first=True
        )
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
        
        # 输出层
        self.output_layer = nn.Linear(embedding_dim, output_dim)
    
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        """
        前向传播
        """
        # 嵌入
        x = self.input_embedding(x)
        
        # 添加序列维度（Transformer需要3D输入）
        x = x.unsqueeze(1)  # (batch_size, 1, embedding_dim)
        
        # Transformer编码
        x = self.transformer_encoder(x)
        
        # 移除序列维度
        x = x.squeeze(1)  # (batch_size, embedding_dim)
        
        # 输出
        logits = self.output_layer(x)
        probabilities = F.softmax(logits, dim=1)
        
        return probabilities


def create_mahjong_model(model_type: str, 
                         model_class: str = 'policy',
                         **kwargs) -> BaseModel:
    """
    创建麻将AI模型的工厂函数
    
    Args:
        model_type: 基础模型类型 ('mlp', 'cnn', 'resnet', 'attention')
        model_class: 模型类别 ('policy', 'value', 'dueling_dqn', 'actor_critic')
        **kwargs: 其他模型参数
    
    Returns:
        创建的麻将AI模型实例
    """
    if model_class == 'policy':
        return MahjongPolicyModel(model_type=model_type, **kwargs)
    elif model_class == 'value':
        return MahjongValueModel(model_type=model_type, **kwargs)
    elif model_class == 'dueling_dqn':
        return DuelingDQN(**kwargs)
    elif model_class == 'actor_critic':
        return ActorCriticModel(**kwargs)
    elif model_class == 'attention' and model_type == 'attention':
        return AttentionModel(**kwargs)
    else:
        raise ValueError(f"不支持的模型组合: {model_class} with {model_type}")