"""
五子棋神经网络模型 - AlphaGo Zero简化版
包含：策略网络（预测落子概率）+ 价值网络（评估局面胜率）
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np


class GomokuNet(nn.Module):
    """
    五子棋神经网络
    输入：15x15x3 (当前玩家棋子、对手棋子、空位)
    输出：策略(15x15概率分布) + 价值(胜率评估)
    """
    
    def __init__(self, board_size=15, num_channels=128):
        super(GomokuNet, self).__init__()
        self.board_size = board_size
        
        # 共享卷积层（特征提取）
        self.conv1 = nn.Conv2d(3, num_channels, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
        self.conv4 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
        
        # 批归一化层
        self.bn1 = nn.BatchNorm2d(num_channels)
        self.bn2 = nn.BatchNorm2d(num_channels)
        self.bn3 = nn.BatchNorm2d(num_channels)
        self.bn4 = nn.BatchNorm2d(num_channels)
        
        # 策略头（预测落子位置）
        self.policy_conv = nn.Conv2d(num_channels, 4, kernel_size=1)
        self.policy_bn = nn.BatchNorm2d(4)
        self.policy_fc = nn.Linear(4 * board_size * board_size, board_size * board_size)
        
        # 价值头（评估局面）
        self.value_conv = nn.Conv2d(num_channels, 2, kernel_size=1)
        self.value_bn = nn.BatchNorm2d(2)
        self.value_fc1 = nn.Linear(2 * board_size * board_size, 64)
        self.value_fc2 = nn.Linear(64, 1)
    
    def forward(self, x):
        """
        前向传播
        x: (batch, 3, 15, 15)
        返回: (策略概率, 价值评估)
        """
        # 共享特征提取
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))
        x = F.relu(self.bn4(self.conv4(x)))
        
        # 策略头
        policy = F.relu(self.policy_bn(self.policy_conv(x)))
        policy = policy.view(-1, 4 * self.board_size * self.board_size)
        policy = self.policy_fc(policy)
        policy = F.log_softmax(policy, dim=1)  # 输出log概率
        
        # 价值头
        value = F.relu(self.value_bn(self.value_conv(x)))
        value = value.view(-1, 2 * self.board_size * self.board_size)
        value = F.relu(self.value_fc1(value))
        value = torch.tanh(self.value_fc2(value))  # 输出[-1, 1]
        
        return policy, value
    
    def predict(self, board_state):
        """
        预测单个局面
        board_state: numpy array (15, 15) 或 (3, 15, 15)
        返回: (策略概率分布, 价值评估)
        """
        self.eval()
        with torch.no_grad():
            # 转换输入格式
            if board_state.shape == (self.board_size, self.board_size):
                board_state = self._encode_board(board_state)
            
            # 转为tensor
            x = torch.FloatTensor(board_state).unsqueeze(0)  # (1, 3, 15, 15)
            
            # 预测
            policy, value = self.forward(x)
            
            # 转回numpy
            policy = torch.exp(policy).squeeze(0).numpy()  # (225,)
            value = value.item()
            
            return policy, value
    
    def _encode_board(self, board):
        """
        编码棋盘状态为3通道图像
        board: (15, 15) 1=黑棋, -1=白棋, 0=空
        返回: (3, 15, 15) [当前玩家, 对手, 空位]
        """
        encoded = np.zeros((3, self.board_size, self.board_size), dtype=np.float32)
        encoded[0] = (board == 1).astype(np.float32)   # 黑棋
        encoded[1] = (board == -1).astype(np.float32)  # 白棋
        encoded[2] = (board == 0).astype(np.float32)   # 空位
        return encoded


class AlphaGomokuTrainer:
    """训练器"""
    
    def __init__(self, model, lr=0.001, device='cpu'):
        self.model = model.to(device)
        self.device = device
        self.optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
        
    def train_step(self, states, policies, values):
        """
        训练一步
        states: (batch, 3, 15, 15)
        policies: (batch, 225) 目标策略
        values: (batch, 1) 目标价值
        """
        self.model.train()
        
        # 转为tensor
        states = torch.FloatTensor(states).to(self.device)
        target_policies = torch.FloatTensor(policies).to(self.device)
        target_values = torch.FloatTensor(values).to(self.device)
        
        # 前向传播
        pred_policies, pred_values = self.model(states)
        
        # 计算损失
        policy_loss = -torch.mean(torch.sum(target_policies * pred_policies, dim=1))
        value_loss = F.mse_loss(pred_values.squeeze(), target_values.squeeze())
        total_loss = policy_loss + value_loss
        
        # 反向传播
        self.optimizer.zero_grad()
        total_loss.backward()
        self.optimizer.step()
        
        return {
            'total_loss': total_loss.item(),
            'policy_loss': policy_loss.item(),
            'value_loss': value_loss.item()
        }
    
    def save_model(self, path):
        """保存模型"""
        torch.save({
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
        }, path)
        print(f"✅ 模型已保存到: {path}")
    
    def load_model(self, path):
        """加载模型"""
        checkpoint = torch.load(path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        print(f"✅ 模型已加载: {path}")


def create_model(board_size=15, device='cpu'):
    """创建模型的便捷函数"""
    model = GomokuNet(board_size=board_size)
    return model.to(device)


if __name__ == "__main__":
    # 测试代码
    print("🧪 测试神经网络...")
    
    model = create_model()
    print(f"✅ 模型创建成功")
    print(f"📊 参数量: {sum(p.numel() for p in model.parameters()):,}")
    
    # 测试前向传播
    dummy_input = torch.randn(2, 3, 15, 15)
    policy, value = model(dummy_input)
    print(f"✅ 策略输出形状: {policy.shape}")  # (2, 225)
    print(f"✅ 价值输出形状: {value.shape}")    # (2, 1)
    
    # 测试预测
    board = np.zeros((15, 15))
    board[7, 7] = 1  # 天元
    policy_probs, value_est = model.predict(board)
    print(f"✅ 预测成功 - 价值评估: {value_est:.3f}")
    print(f"✅ 策略概率和: {policy_probs.sum():.3f}")
