# gomoku_ai/model/gomoku_net.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os


class GomokuNet(nn.Module):
    def __init__(self, game, learning_rate=0.001, device='cpu'):
        super(GomokuNet, self).__init__()
        self.board_size = game.get_board_size()[0]
        self.action_size = game.get_action_size()
        self.device = device

        # Common layers
        self.conv1 = nn.Conv2d(1, 256, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv7 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv8 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv9 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
        self.conv10 = nn.Conv2d(256, 256, kernel_size=3, padding=1)

        self.bn1 = nn.BatchNorm2d(256)
        self.bn2 = nn.BatchNorm2d(256)
        self.bn3 = nn.BatchNorm2d(256)
        self.bn4 = nn.BatchNorm2d(256)
        self.bn5 = nn.BatchNorm2d(256)
        self.bn6 = nn.BatchNorm2d(256)
        self.bn7 = nn.BatchNorm2d(256)
        self.bn8 = nn.BatchNorm2d(256)
        self.bn9 = nn.BatchNorm2d(256)
        self.bn10 = nn.BatchNorm2d(256)

        # Policy head
        self.policy_conv = nn.Conv2d(256, 2, kernel_size=1)
        self.policy_bn = nn.BatchNorm2d(2)
        self.policy_fc = nn.Linear(2 * self.board_size * self.board_size, self.action_size)

        # Value head
        self.value_conv = nn.Conv2d(256, 1, kernel_size=1)
        self.value_bn = nn.BatchNorm2d(1)
        self.value_fc1 = nn.Linear(1 * self.board_size * self.board_size, 256)
        self.value_fc2 = nn.Linear(256, 1)

        self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)

        self.to(device)

    def forward(self, s):
        # s: batch_size x board_size x board_size
        s = s.view(-1, 1, self.board_size, self.board_size)  # batch_size x 1 x board_size x board_size
        s = F.relu(self.bn1(self.conv1(s)))
        s = F.relu(self.bn2(self.conv2(s)))
        s = F.relu(self.bn3(self.conv3(s)))
        s = F.relu(self.bn4(self.conv4(s)))
        s = F.relu(self.bn5(self.conv5(s)))
        s = F.relu(self.bn6(self.conv6(s)))
        s = F.relu(self.bn7(self.conv7(s)))
        s = F.relu(self.bn8(self.conv8(s)))
        s = F.relu(self.bn9(self.conv9(s)))
        s = F.relu(self.bn10(self.conv10(s)))

        # Policy head
        p = F.relu(self.policy_bn(self.policy_conv(s)))
        p = p.view(p.size(0), -1)
        p = self.policy_fc(p)
        # Return log probabilities for policy
        log_pi = F.log_softmax(p, dim=1)

        # Value head
        v = F.relu(self.value_bn(self.value_conv(s)))
        v = v.view(v.size(0), -1)
        v = F.relu(self.value_fc1(v))
        v = torch.tanh(self.value_fc2(v))

        return log_pi, v

    def predict(self, board):
        """
        board: numpy array of board_size x board_size
        Returns: pi (policy probabilities), v (value)
        """
        # Preparing input
        board = torch.tensor(board, dtype=torch.float32, device=self.device)
        board = board.view(1, self.board_size, self.board_size)

        self.eval()
        with torch.no_grad():
            log_pi, v = self.forward(board)

        # Return probabilities, not log probabilities
        pi = torch.exp(log_pi).cpu().numpy()[0]
        v = v.cpu().numpy()[0]
        return pi, v

    def train_step(self, examples, batch_size=64):
        """
        examples: list of examples, each example is of form (board, pi, v)
        """
        self.optimizer.zero_grad()

        # Prepare batches
        batch_indices = np.random.randint(len(examples), size=batch_size)
        boards = torch.tensor(np.array([examples[i][0] for i in batch_indices]), dtype=torch.float32).to(self.device)
        target_pis = torch.tensor(np.array([examples[i][2] for i in batch_indices]), dtype=torch.float32).to(
            self.device)
        target_vs = torch.tensor(np.array([examples[i][3] for i in batch_indices]), dtype=torch.float32).to(self.device)

        # Forward pass
        self.train()
        policy_logits, out_v = self.forward(boards)

        # --- 关键修改：使用 KL 散度计算策略损失 ---
        # F.kl_div 的 input 是 log-probabilities, target 是 probabilities
        loss_pi = F.kl_div(policy_logits, target_pis, reduction='batchmean')

        # Value loss
        loss_v = F.mse_loss(out_v.squeeze(), target_vs)

        # Total loss
        total_loss = loss_pi + loss_v

        # Backward pass and optimize
        total_loss.backward()
        self.optimizer.step()

        return total_loss.item(), loss_pi.item(), loss_v.item()

    def save(self, filename):
        """Save the model to a file."""
        # Ensure the directory exists
        model_dir = os.path.dirname(filename)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir)
        torch.save({
            'state_dict': self.state_dict(),
            'board_size': self.board_size,
            'action_size': self.action_size
        }, filename)

    def load(self, filename):
        """Load the model from a file."""
        if not os.path.exists(filename):
            raise FileNotFoundError(f"No model found at {filename}")

        checkpoint = torch.load(filename, map_location=self.device)

        # --- 关键修改：最终修复版加载逻辑 ---
        state_dict_to_load = None

        if isinstance(checkpoint, dict):
            # 情况1: 新格式，包含 'state_dict' 键
            if 'state_dict' in checkpoint:
                state_dict_to_load = checkpoint['state_dict']
            # 情况2: 字典本身就是 state_dict (键是层名)
            # 一个简单的判断方法是检查第一个键是否包含 '.'
            elif len(checkpoint) > 0 and '.' in list(checkpoint.keys())[0]:
                state_dict_to_load = checkpoint
            else:
                # 情况3: 未知格式，打印键以供调试
                print(f"警告: 模型文件 {filename} 是一个未知格式的字典。")
                print(f"可用的键: {list(checkpoint.keys())}")
                raise RuntimeError(f"无法在 {filename} 中找到 state_dict。")
        else:
            # 情况4: 非常旧的格式，直接就是 state_dict
            state_dict_to_load = checkpoint

        if state_dict_to_load is not None:
            try:
                self.load_state_dict(state_dict_to_load)
                print(f"✅ 成功加载模型: {filename}")
            except RuntimeError as e:
                # --- 关键修改：捕获架构不匹配的错误 ---
                print("\n" + "=" * 50)
                print("⚠️ 警告: 模型加载失败！")
                print(f"原因: 模型文件 '{filename}' 的架构与当前代码不匹配。")
                print("这通常是因为模型文件是用旧版本的代码生成的。")
                print("程序将跳过加载，从头开始训练一个新的模型。")
                print("=" * 50 + "\n")
                # 这里不重新抛出错误，让程序继续执行
        else:
            # This part is technically unreachable due to the RuntimeError above
            raise RuntimeError(f"无法从 {filename} 加载模型。")
