import torch
import torch.nn as nn
import torch.nn.functional as F
import math

class ResidualBlock(nn.Module):
    """残差块 - 提高网络表达能力和训练稳定性"""
    def __init__(self, hidden_dim, dropout_rate=0.1):
        super(ResidualBlock, self).__init__()
        self.linear1 = nn.Linear(hidden_dim, hidden_dim)
        self.linear2 = nn.Linear(hidden_dim, hidden_dim)
        self.dropout = nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity()
        self.layer_norm = nn.LayerNorm(hidden_dim)

    def forward(self, x):
        residual = x
        out = F.relu(self.linear1(x))
        out = self.dropout(out)
        out = self.linear2(out)
        out = out + residual  # 残差连接
        out = self.layer_norm(out)  # 层标准化
        return F.relu(out)

class NoisyLinear(nn.Module):
    """NoisyNet层 - 用参数化噪声替代epsilon贪心"""
    def __init__(self, in_features, out_features, std_init=0.5):
        super(NoisyLinear, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.std_init = std_init

        # 权重参数
        self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
        self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))

        # 偏置参数
        self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
        self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
        self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))

        self.reset_parameters()
        self.reset_noise()

    def reset_parameters(self):
        """初始化参数"""
        mu_range = 1 / math.sqrt(self.in_features)
        self.weight_mu.data.uniform_(-mu_range, mu_range)
        self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.in_features))
        self.bias_mu.data.uniform_(-mu_range, mu_range)
        self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.out_features))

    def reset_noise(self):
        """重置噪声"""
        epsilon_in = self._scale_noise(self.in_features)
        epsilon_out = self._scale_noise(self.out_features)
        self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
        self.bias_epsilon.copy_(epsilon_out)

    def _scale_noise(self, size):
        """生成缩放噪声"""
        x = torch.randn(size)
        return x.sign().mul_(x.abs().sqrt_())

    def forward(self, x):
        if self.training:
            weight = self.weight_mu + self.weight_sigma * self.weight_epsilon
            bias = self.bias_mu + self.bias_sigma * self.bias_epsilon
        else:
            weight = self.weight_mu
            bias = self.bias_mu
        return F.linear(x, weight, bias)

class QNetwork(nn.Module):
    """增强的基础Q网络"""
    def __init__(self, state_dim, action_dim, hidden_dim=128,
                 use_residual=False, dropout_rate=0.1, use_noisy=False):
        super(QNetwork, self).__init__()
        self.use_residual = use_residual
        self.use_noisy = use_noisy

        # 输入层
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.dropout1 = nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity()

        # 中间层
        if use_residual:
            self.residual_blocks = nn.ModuleList([
                ResidualBlock(hidden_dim, dropout_rate) for _ in range(2)
            ])
        else:
            self.fc2 = nn.Linear(hidden_dim, hidden_dim)
            self.dropout2 = nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity()

        # 输出层
        if use_noisy:
            self.fc_out = NoisyLinear(hidden_dim, action_dim)
        else:
            self.fc_out = nn.Linear(hidden_dim, action_dim)

        # 权重初始化
        self._init_weights()

    def _init_weights(self):
        """初始化权重"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, state):
        x = F.relu(self.fc1(state))
        x = self.dropout1(x)

        if self.use_residual:
            for block in self.residual_blocks:
                x = block(x)
        else:
            x = F.relu(self.fc2(x))
            x = self.dropout2(x)

        q_values = self.fc_out(x)
        return q_values

    def reset_noise(self):
        """重置NoisyNet的噪声"""
        if self.use_noisy:
            if hasattr(self.fc_out, 'reset_noise'):
                self.fc_out.reset_noise()

    def count_parameters(self):
        """计算模型参数量"""
        return sum(p.numel() for p in self.parameters() if p.requires_grad)

class DuelingQNetwork(nn.Module):
    """增强的Dueling DQN网络架构"""
    def __init__(self, state_dim, action_dim, hidden_dim=128,
                 use_residual=False, dropout_rate=0.1, use_noisy=False):
        super(DuelingQNetwork, self).__init__()
        self.use_residual = use_residual
        self.use_noisy = use_noisy
        self.action_dim = action_dim

        # 特征提取层 - 更深的网络
        self.feature_layers = nn.ModuleList([
            nn.Linear(state_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
        ])

        # 残差块或额外的特征层
        if use_residual:
            self.residual_blocks = nn.ModuleList([
                ResidualBlock(hidden_dim, dropout_rate) for _ in range(3)
            ])
            feature_output_dim = hidden_dim
        else:
            self.feature_layers.extend([
                nn.Linear(hidden_dim, hidden_dim),
                nn.ReLU(),
                nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
                nn.Linear(hidden_dim, hidden_dim),
                nn.ReLU(),
            ])
            feature_output_dim = hidden_dim

        # 价值流 (V(s)) - 更深的网络
        value_hidden = hidden_dim // 2
        if use_noisy:
            self.value_stream = nn.Sequential(
                nn.Linear(feature_output_dim, value_hidden),
                nn.ReLU(),
                nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
                NoisyLinear(value_hidden, 1)
            )
        else:
            self.value_stream = nn.Sequential(
                nn.Linear(feature_output_dim, value_hidden),
                nn.ReLU(),
                nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
                nn.Linear(value_hidden, 1)
            )

        # 优势流 (A(s,a)) - 更深的网络
        advantage_hidden = hidden_dim // 2
        if use_noisy:
            self.advantage_stream = nn.Sequential(
                nn.Linear(feature_output_dim, advantage_hidden),
                nn.ReLU(),
                nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
                NoisyLinear(advantage_hidden, action_dim)
            )
        else:
            self.advantage_stream = nn.Sequential(
                nn.Linear(feature_output_dim, advantage_hidden),
                nn.ReLU(),
                nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity(),
                nn.Linear(advantage_hidden, action_dim)
            )

        # 权重初始化
        self._init_weights()

    def _init_weights(self):
        """改进的权重初始化"""
        for m in self.modules():
            if isinstance(m, nn.Linear):
                # 使用He初始化，适合ReLU激活函数
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)

    def forward(self, state):
        # 特征提取
        x = state
        for i in range(0, len(self.feature_layers), 3):  # 每3个为一组
            if i + 2 < len(self.feature_layers):
                x = self.feature_layers[i](x)
                x = self.feature_layers[i + 1](x)  # ReLU
                x = self.feature_layers[i + 2](x)  # Dropout
            else:
                for j in range(i, len(self.feature_layers)):
                    x = self.feature_layers[j](x)

        # 残差块处理
        if self.use_residual:
            for block in self.residual_blocks:
                x = block(x)

        # 价值流和优势流
        value = self.value_stream(x)
        advantage = self.advantage_stream(x)

        # 改进的Dueling聚合：使用方差归一化
        # Q(s,a) = V(s) + A(s,a) - mean(A(s,a))
        advantage_mean = advantage.mean(dim=1, keepdim=True)
        q_value = value + advantage - advantage_mean

        return q_value

    def reset_noise(self):
        """重置所有NoisyNet层的噪声"""
        if self.use_noisy:
            for module in self.modules():
                if isinstance(module, NoisyLinear):
                    module.reset_noise()

    def count_parameters(self):
        """计算模型参数量"""
        return sum(p.numel() for p in self.parameters() if p.requires_grad)

class ConvolutionalDuelingQNetwork(nn.Module):
    """2D卷积版本的Dueling DQN - 适用于棋盘游戏"""
    def __init__(self, board_size, action_dim=None, hidden_dim=256,
                 dropout_rate=0.1, use_noisy=False, history_steps=4):
        super(ConvolutionalDuelingQNetwork, self).__init__()
        self.board_size = board_size
        self.action_dim = action_dim if action_dim else board_size * board_size
        self.use_noisy = use_noisy
        self.history_steps = history_steps

        # 输入通道数：当前玩家 + 对手玩家 + 多步历史
        input_channels = 2 + history_steps

        # 🔥 卷积特征提取 - 适合棋盘结构
        self.conv_layers = nn.Sequential(
            # 第一层：检测基本模式
            nn.Conv2d(input_channels, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(32),

            # 第二层：检测更复杂的模式
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(64),

            # 第三层：检测战略模式
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(128),

            # 第四层：更深层次的特征
            nn.Conv2d(128, hidden_dim, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.BatchNorm2d(hidden_dim),

            # 全局平均池化
            nn.AdaptiveAvgPool2d((4, 4))
        )

        # 计算卷积输出维度
        conv_output_dim = hidden_dim * 4 * 4

        # 全连接特征层
        self.fc_features = nn.Sequential(
            nn.Linear(conv_output_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout_rate),
            nn.Linear(hidden_dim, hidden_dim),
            nn.ReLU(),
        )

        # 价值流
        if use_noisy:
            self.value_stream = nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                nn.Dropout(dropout_rate),
                NoisyLinear(hidden_dim // 2, 1)
            )
        else:
            self.value_stream = nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                nn.Dropout(dropout_rate),
                nn.Linear(hidden_dim // 2, 1)
            )

        # 优势流
        if use_noisy:
            self.advantage_stream = nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                nn.Dropout(dropout_rate),
                NoisyLinear(hidden_dim // 2, self.action_dim)
            )
        else:
            self.advantage_stream = nn.Sequential(
                nn.Linear(hidden_dim, hidden_dim // 2),
                nn.ReLU(),
                nn.Dropout(dropout_rate),
                nn.Linear(hidden_dim // 2, self.action_dim)
            )

        # 权重初始化
        self._init_weights()

        print(f"网络参数量: {self.count_parameters():,}")

    def _init_weights(self):
        """权重初始化"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, state):
        batch_size = state.shape[0]

        # 重塑为2D棋盘格式
        if len(state.shape) == 2:  # (batch, flattened)
            # 假设输入是展平的单通道棋盘
            state = state.view(batch_size, 1, self.board_size, self.board_size)
            # 扩展为多通道格式
            zeros = torch.zeros_like(state)
            state = torch.cat([state, zeros] + [zeros.clone() for _ in range(self.history_steps)], dim=1)
        elif len(state.shape) == 3:  # (batch, board_size, board_size)
            state = state.view(batch_size, 1, self.board_size, self.board_size)
            # 扩展为多通道格式
            zeros = torch.zeros_like(state)
            state = torch.cat([state, zeros] + [zeros.clone() for _ in range(self.history_steps)], dim=1)

        # 卷积特征提取
        x = self.conv_layers(state)
        x = x.view(batch_size, -1)  # 展平

        # 全连接特征
        features = self.fc_features(x)

        # 价值流和优势流
        value = self.value_stream(features)
        advantage = self.advantage_stream(features)

        # Dueling聚合
        advantage_mean = advantage.mean(dim=1, keepdim=True)
        q_value = value + advantage - advantage_mean

        return q_value

    def reset_noise(self):
        """重置噪声"""
        if self.use_noisy:
            for module in self.modules():
                if isinstance(module, NoisyLinear):
                    module.reset_noise()

    def count_parameters(self):
        """计算模型参数量"""
        return sum(p.numel() for p in self.parameters() if p.requires_grad)

class StateProcessor:
    """状态处理器 - 支持多步历史信息"""
    def __init__(self, board_size=15, history_steps=4):
        self.board_size = board_size
        self.history_steps = history_steps

    def process_state(self, board_batch, action_history_batch=None, cur_player_batch=None):
        """
        将棋盘状态转换为多通道格式

        Args:
            board_batch: 当前棋盘状态
            action_history_batch: 历史动作序列
            cur_player_batch: 当前玩家

        Returns:
            run: 处理后的状态张量
        """
        if isinstance(board_batch, list):
            board_batch = torch.tensor(board_batch, dtype=torch.float32)

        # 确保board_batch是正确的维度
        if len(board_batch.shape) == 2:
            if board_batch.shape[0] == self.board_size:  # 单个棋盘
                board_batch = board_batch.unsqueeze(0)  # 添加batch维度
            # 否则认为是 [batch_size, flattened]，需要重塑
            elif board_batch.shape[1] == self.board_size * self.board_size:
                batch_size = board_batch.shape[0]
                board_batch = board_batch.view(batch_size, self.board_size, self.board_size)
        elif len(board_batch.shape) == 1:  # 单个展平的棋盘
            board_batch = board_batch.view(1, self.board_size, self.board_size)

        batch_size = board_batch.shape[0]

        # 如果没有提供历史信息，创建默认值
        if action_history_batch is None:
            action_history_batch = [[-1] * self.history_steps for _ in range(batch_size)]
        if cur_player_batch is None:
            cur_player_batch = torch.ones(batch_size)

        # 添加通道维度并处理
        if len(board_batch.shape) == 3:  # [batch_size, board_size, board_size]
            board_batch = board_batch.unsqueeze(1)  # [batch_size, 1, board_size, board_size]

        # 第一通道：当前玩家的棋子
        state0 = (board_batch > 0).float()
        # 第二通道：对手的棋子
        state1 = (board_batch < 0).float()

        # 历史通道
        history_channels = []
        for step in range(self.history_steps):
            history_channel = torch.zeros((batch_size, 1, self.board_size, self.board_size)).float()
            history_channels.append(history_channel)

        # 处理每个样本
        for i in range(batch_size):
            # 如果当前玩家是-1，交换通道
            if cur_player_batch[i] == -1:
                temp = state0[i].clone()
                state0[i].copy_(state1[i])
                state1[i].copy_(temp)

        # 组合所有通道
        all_channels = [state0, state1] + history_channels
        return torch.cat(all_channels, dim=1)

    def create_sample_batch(self, batch_size=32):
        """创建示例批次数据用于测试"""
        # 随机棋盘状态
        board_batch = torch.randint(-1, 2, (batch_size, self.board_size, self.board_size)).float()

        # 随机历史动作
        action_history_batch = []
        for _ in range(batch_size):
            history = [torch.randint(0, self.board_size**2, (1,)).item()
                       for _ in range(self.history_steps)]
            action_history_batch.append(history)

        # 随机当前玩家
        cur_player_batch = torch.randint(0, 2, (batch_size,)) * 2 - 1  # -1 or 1

        return board_batch, action_history_batch, cur_player_batch

# 工厂函数
def create_gomoku_network(board_size=15, config='large', **kwargs):
    """创建五子棋Q网络"""

    configs = {
        'small': {
            'hidden_dim': 128, 'history_steps': 2, 'dropout_rate': 0.1
        },
        'medium': {
            'hidden_dim': 192, 'history_steps': 3, 'dropout_rate': 0.1
        },
        'large': {
            'hidden_dim': 256, 'history_steps': 4, 'dropout_rate': 0.1
        },
        'xlarge': {
            'hidden_dim': 320, 'history_steps': 4, 'dropout_rate': 0.1
        }
    }

    if config in configs:
        config_params = configs[config]
        config_params.update(kwargs)
        return ConvolutionalDuelingQNetwork(board_size=board_size, **config_params)
    else:
        return ConvolutionalDuelingQNetwork(board_size=board_size, **kwargs)

# 示例使用和测试
if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    # 测试不同配置的网络
    configs = ['small', 'medium', 'large', 'xlarge']

    for config in configs:
        print(f"\n{config.upper()} Network:")
        net = create_gomoku_network(config=config, use_noisy=True)

        # 创建测试数据
        processor = StateProcessor(history_steps=net.history_steps)
        board_batch, action_history_batch, cur_player_batch = processor.create_sample_batch(batch_size=4)
        state = processor.process_state(board_batch, action_history_batch, cur_player_batch)

        # 测试前向传播
        net.eval()
        with torch.no_grad():
            q_values = net(state)
            print(f"参数量: {net.count_parameters():,}")
            print(f"输入形状: {state.shape}")
            print(f"输出形状: {q_values.shape}")
            print(f"Q值范围: [{q_values.min().item():.3f}, {q_values.max().item():.3f}]")

        # 测试噪声重置
        if net.use_noisy:
            net.reset_noise()
            print("噪声已重置")

    # 测试基础网络
    print("\n基础网络测试:")
    basic_net = QNetwork(state_dim=225, action_dim=225, hidden_dim=256, use_residual=True, use_noisy=True)
    print(f"基础Q网络参数量: {basic_net.count_parameters():,}")

    dueling_net = DuelingQNetwork(state_dim=225, action_dim=225, hidden_dim=256, use_residual=True, use_noisy=True)
    print(f"Dueling Q网络参数量: {dueling_net.count_parameters():,}")

    # 测试简单输入
    simple_input = torch.randn(4, 225)
    with torch.no_grad():
        basic_out = basic_net(simple_input)
        dueling_out = dueling_net(simple_input)
        print(f"基础网络输出形状: {basic_out.shape}")
        print(f"Dueling网络输出形状: {dueling_out.shape}")