import numpy as np
import random
import os
import pygame
from config import CLASSIC_AI_DEPTH, USE_SELF_PLAY

class Trainer:
    def __init__(self, game, net, mcts_simulations=200, checkpoint_dir="./checkpoints"):
        self.game = game
        self.net = net
        self.mcts_simulations = mcts_simulations
        self.checkpoint_dir = checkpoint_dir

        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)

    def evaluate_board(self, board, player):
        """局部奖励函数：连子 + 拦截 + 中心优先"""
        reward = 0.0
        board_size = len(board)
        opponent = -player

        for i in range(board_size):
            for j in range(board_size):
                if board[i][j] == player:
                    # 横纵斜连子奖励
                    if j+1 < board_size and board[i][j+1]==player: reward += 0.05
                    if j+2 < board_size and board[i][j+1]==player and board[i][j+2]==player: reward += 0.1
                    if i+1 < board_size and board[i+1][j]==player: reward += 0.05
                    if i+2 < board_size and board[i+1][j]==player and board[i+2][j]==player: reward += 0.1
                    if i+1<board_size and j+1<board_size and board[i+1][j+1]==player: reward += 0.05
                    if i+2<board_size and j+2<board_size and board[i+1][j+1]==player and board[i+2][j+2]==player: reward += 0.1
                elif board[i][j] == opponent:
                    # 对手连子惩罚（即拦截奖励）
                    if j+1 < board_size and board[i][j+1]==opponent: reward -= 0.07
                    if j+2 < board_size and board[i][j+1]==opponent and board[i][j+2]==opponent: reward -= 0.15
                    if i+1 < board_size and board[i+1][j]==opponent: reward -= 0.07
                    if i+2 < board_size and board[i+1][j]==opponent and board[i+2][j]==opponent: reward -= 0.15
                    if i+1<board_size and j+1<board_size and board[i+1][j+1]==opponent: reward -= 0.07
                    if i+2<board_size and j+2<board_size and board[i+1][j+1]==opponent and board[i+2][j+2]==opponent: reward -= 0.15

        # 中心优先
        center = board_size // 2
        if board[center][center] == player: reward += 0.05
        if board[center][center] == opponent: reward -= 0.05

        return reward

    def execute_episode(self):
        """执行一局对局，根据配置选择模式"""
        if self.game.visual_mode:
            if USE_SELF_PLAY:
                return self._execute_episode_self_play_visual()
            else:
                return self._execute_episode_vs_classic_visual()
        else:
            if USE_SELF_PLAY:
                return self._execute_episode_self_play()
            else:
                return self._execute_episode_vs_classic()

    def _execute_episode_vs_classic(self):
        from ai.classic_ai import ClassicAI
        from mcts.mcts import MCTS

        classic_ai = ClassicAI(self.game.board_size, difficulty=CLASSIC_AI_DEPTH)
        board = self.game.get_init_board()
        current_player = 1
        mcts = MCTS(self.game, self.net, self.mcts_simulations, self.net.device, trainer=self)
        train_examples = []

        while True:
            canonical_board = self.game.get_canonical_form(board, current_player)
            temp = 1.0 if len(train_examples) < 20 else 0.1
            action_probs = mcts.get_action_prob(canonical_board, temp)
            local_reward = self.evaluate_board(board, current_player)

            train_examples.append((canonical_board, current_player, action_probs, None, local_reward))
            action = np.random.choice(len(action_probs), p=action_probs)
            board, current_player = self.game.get_next_state(board, current_player, action)

            game_result = self.game.get_game_ended(board)
            if game_result != 0:
                for i in range(len(train_examples)):
                    r = train_examples[i][4]
                    train_examples[i] = (train_examples[i][0], train_examples[i][1],
                                         train_examples[i][2], game_result * ((-1) ** (len(train_examples) - 1 - i)) + r)
                return train_examples

    def _execute_episode_vs_classic_visual(self):
        from ai.classic_ai import ClassicAI
        from mcts.mcts import MCTS

        classic_ai = ClassicAI(self.game.board_size, difficulty=CLASSIC_AI_DEPTH)
        board = self.game.get_init_board()
        current_player = 1
        mcts = MCTS(self.game, self.net, self.mcts_simulations, self.net.device, trainer=self)
        train_examples = []
        move_count = 0

        self.game.render(board, current_player=current_player,
                         info_text=f"AI(黑) vs 经典AI(白) - 第{move_count}手")

        while True:
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    self.game.close()
                    os._exit(0)
                if event.type == pygame.VIDEORESIZE:
                    self.game.visual_board.handle_resize(event)

            canonical_board = self.game.get_canonical_form(board, current_player)
            if current_player == 1:
                temp = 1.0 if len(train_examples) < 20 else 0.1
                action_probs = mcts.get_action_prob(canonical_board, temp)
                local_reward = self.evaluate_board(board, current_player)
                train_examples.append((canonical_board, current_player, action_probs, None, local_reward))
                action = np.random.choice(len(action_probs), p=action_probs)
                row, col = divmod(action, self.game.board_size)
            else:
                action = classic_ai.get_action(board)
                row, col = divmod(action, self.game.board_size)

            board, current_player = self.game.get_next_state(board, current_player, action)
            move_count += 1

            if self.game.visual_board:
                self.game.visual_board.set_last_move((row, col))
            self.game.render(board, current_player=current_player,
                             info_text=f"第{move_count}手 - {'AI' if current_player==-1 else '经典AI'}走了({row},{col})")

            game_result = self.game.get_game_ended(board)
            if game_result != 0:
                for i in range(len(train_examples)):
                    r = train_examples[i][4]
                    train_examples[i] = (train_examples[i][0], train_examples[i][1],
                                         train_examples[i][2], game_result * ((-1) ** (len(train_examples) - 1 - i)) + r)
                return train_examples

    def _execute_episode_self_play(self):
        from model.gomoku_net import GomokuNet
        from mcts.mcts import MCTS

        opponent_net = GomokuNet(self.game, device=self.net.device)
        opponent_net.load_state_dict(self.net.state_dict())

        board = self.game.get_init_board()
        current_player = 1
        mcts_player1 = MCTS(self.game, self.net, self.mcts_simulations, self.net.device, trainer=self)
        mcts_player2 = MCTS(self.game, opponent_net, self.mcts_simulations, self.net.device, trainer=self)
        train_examples = []

        while True:
            canonical_board = self.game.get_canonical_form(board, current_player)
            mcts = mcts_player1 if current_player == 1 else mcts_player2
            temp = 1.0 if len(train_examples) < 20 else 0.1
            action_probs = mcts.get_action_prob(canonical_board, temp)
            local_reward = self.evaluate_board(board, current_player)
            train_examples.append((canonical_board, current_player, action_probs, None, local_reward))
            action = np.random.choice(len(action_probs), p=action_probs)
            board, current_player = self.game.get_next_state(board, current_player, action)
            game_result = self.game.get_game_ended(board)
            if game_result != 0:
                for i in range(len(train_examples)):
                    r = train_examples[i][4]
                    train_examples[i] = (train_examples[i][0], train_examples[i][1],
                                         train_examples[i][2], game_result * ((-1) ** (len(train_examples) - 1 - i)) + r)
                return train_examples

    def _execute_episode_self_play_visual(self):
        from model.gomoku_net import GomokuNet
        from mcts.mcts import MCTS

        opponent_net = GomokuNet(self.game, device=self.net.device)
        opponent_net.load_state_dict(self.net.state_dict())

        board = self.game.get_init_board()
        current_player = 1
        mcts_player1 = MCTS(self.game, self.net, self.mcts_simulations, self.net.device, trainer=self)
        mcts_player2 = MCTS(self.game, opponent_net, self.mcts_simulations, self.net.device, trainer=self)
        train_examples = []
        move_count = 0

        self.game.render(board, current_player=current_player,
                         info_text=f"自我对弈 - 玩家1(黑) vs 玩家2(白) - 第{move_count}手")

        while True:
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    self.game.close()
                    os._exit(0)
                if event.type == pygame.VIDEORESIZE:
                    self.game.visual_board.handle_resize(event)

            canonical_board = self.game.get_canonical_form(board, current_player)
            mcts = mcts_player1 if current_player == 1 else mcts_player2
            temp = 1.0 if len(train_examples) < 20 else 0.1
            action_probs = mcts.get_action_prob(canonical_board, temp)
            local_reward = self.evaluate_board(board, current_player)
            train_examples.append((canonical_board, current_player, action_probs, None, local_reward))
            action = np.random.choice(len(action_probs), p=action_probs)

            row, col = divmod(action, self.game.board_size)
            board, current_player = self.game.get_next_state(board, current_player, action)
            move_count += 1

            if self.game.visual_board:
                self.game.visual_board.set_last_move((row, col))

            self.game.render(board, current_player=current_player,
                             info_text=f"第{move_count}手 - {'玩家1' if current_player==1 else '玩家2'}走了({row},{col})")

            game_result = self.game.get_game_ended(board)
            if game_result != 0:
                for i in range(len(train_examples)):
                    r = train_examples[i][4]
                    train_examples[i] = (train_examples[i][0], train_examples[i][1],
                                         train_examples[i][2], game_result * ((-1) ** (len(train_examples) - 1 - i)) + r)
                return train_examples

    def learn(self, num_iterations, num_episodes_per_iter, batch_size, epochs_per_iter, model_filename_template):
        for i in range(1, num_iterations + 1):
            mode_text = "自我对弈" if USE_SELF_PLAY else "对战经典AI"
            print(f"--- 第 {i}/{num_iterations} 次迭代 (模式: {mode_text}) ---")

            train_examples = []
            for episode in range(num_episodes_per_iter):
                if self.game.visual_mode:
                    for event in pygame.event.get():
                        if event.type == pygame.QUIT:
                            self.game.close()
                            os._exit(0)
                        if event.type == pygame.VIDEORESIZE:
                            self.game.visual_board.handle_resize(event)

                print(f"  正在进行第 {episode + 1}/{num_episodes_per_iter} 局对弈...")
                train_examples.extend(self.execute_episode())

            print("  开始训练神经网络...")
            random.shuffle(train_examples)

            for epoch in range(epochs_per_iter):
                for j in range(0, len(train_examples), batch_size):
                    loss, p_loss, v_loss = self.net.train_step(train_examples)
                    if j == 0:
                        print(f"    训练轮次 {epoch + 1}/{epochs_per_iter} - 损失: {loss:.4f}, 策略损失: {p_loss:.4f}, 价值损失: {v_loss:.4f}")

            model_path = os.path.join(self.checkpoint_dir, model_filename_template.format(iteration=i))
            self.net.save(model_path)
            print(f"  模型已保存至 {model_path}")
