"""
自我对弈训练器 - AlphaGo Zero风格
AI自己和自己下棋，生成训练数据
"""

import numpy as np
import pickle
import os
from datetime import datetime
from board import Board
from mcts_alpha import AlphaMCTS
from neural_network import GomokuNet, AlphaGomokuTrainer
import torch


class SelfPlayTrainer:
    """自我对弈训练器"""
    
    def __init__(self, model_path=None, device='cpu'):
        """
        初始化训练器
        model_path: 预训练模型路径（可选）
        """
        self.device = device
        self.board_size = 15
        
        # 创建或加载模型
        self.model = GomokuNet(board_size=self.board_size).to(device)
        self.trainer = AlphaGomokuTrainer(self.model, lr=0.001, device=device)
        
        if model_path and os.path.exists(model_path):
            self.trainer.load_model(model_path)
            print(f"✅ 加载预训练模型: {model_path}")
        else:
            print("✅ 创建新模型")
        
        # 训练数据缓冲区
        self.training_data = []
        self.max_buffer_size = 10000
        
        # 统计信息
        self.stats = {
            'games_played': 0,
            'black_wins': 0,
            'white_wins': 0,
            'draws': 0,
            'avg_game_length': 0
        }
    
    def self_play_game(self, num_simulations=100, temperature=1.0, show_progress=False):
        """
        进行一局自我对弈
        返回: 训练数据 [(state, policy, value), ...]
        """
        board = Board(size=self.board_size)
        game_data = []
        move_count = 0
        
        while True:
            move_count += 1
            
            # 使用MCTS + 神经网络选择动作
            mcts = AlphaMCTS(
                board=board,
                neural_net=self.model,
                num_simulations=num_simulations,
                c_puct=1.5
            )
            
            # 获取动作概率分布
            action_probs = mcts.get_action_probs(temperature=temperature)
            
            # 保存当前状态和策略
            state = self._encode_state(board.board, board.current_player)
            policy = self._action_probs_to_policy(action_probs)
            game_data.append([state, policy, None])  # value稍后填充
            
            # 执行动作
            move = mcts.get_best_move(board.current_player, temperature=temperature)
            
            if show_progress and move_count % 10 == 0:
                print(f"  第{move_count}步: {move}")
            
            board.place_stone(move[0], move[1])
            
            # 检查游戏是否结束
            winner = board.check_winner()
            if winner is not None:
                # 填充价值（从当前玩家视角）
                for i in range(len(game_data)):
                    # 如果是获胜方的数据，value=1；否则value=-1
                    player_at_step = 1 if i % 2 == 0 else -1
                    game_data[i][2] = 1.0 if winner == player_at_step else -1.0
                
                # 更新统计
                self.stats['games_played'] += 1
                if winner == 1:
                    self.stats['black_wins'] += 1
                elif winner == -1:
                    self.stats['white_wins'] += 1
                
                if show_progress:
                    winner_name = "黑棋" if winner == 1 else "白棋"
                    print(f"✅ 游戏结束！{winner_name}获胜，共{move_count}步")
                
                break
            
            # 平局判断
            if move_count >= self.board_size * self.board_size:
                for i in range(len(game_data)):
                    game_data[i][2] = 0.0
                self.stats['draws'] += 1
                if show_progress:
                    print(f"⚖️ 游戏结束！平局，共{move_count}步")
                break
        
        # 更新平均步数
        total_games = self.stats['games_played']
        self.stats['avg_game_length'] = (
            (self.stats['avg_game_length'] * (total_games - 1) + move_count) / total_games
        )
        
        return game_data
    
    def train(self, num_games=100, num_simulations=100, batch_size=32, 
              save_interval=10, model_save_path='models/best_model.pth'):
        """
        训练主循环
        num_games: 自我对弈局数
        num_simulations: 每步MCTS模拟次数
        batch_size: 训练批次大小
        save_interval: 保存模型间隔
        """
        print("=" * 60)
        print("🚀 开始自我对弈训练")
        print("=" * 60)
        print(f"📊 训练参数:")
        print(f"  - 对弈局数: {num_games}")
        print(f"  - MCTS模拟次数: {num_simulations}")
        print(f"  - 批次大小: {batch_size}")
        print(f"  - 保存间隔: {save_interval}局")
        print("=" * 60)
        
        # 创建保存目录
        os.makedirs(os.path.dirname(model_save_path), exist_ok=True)
        os.makedirs('training_data', exist_ok=True)
        
        for game_num in range(1, num_games + 1):
            print(f"\n🎮 第 {game_num}/{num_games} 局自我对弈...")
            
            # 自我对弈
            game_data = self.self_play_game(
                num_simulations=num_simulations,
                temperature=1.0 if game_num < num_games * 0.5 else 0.5,  # 后期降低温度
                show_progress=(game_num % 10 == 0)
            )
            
            # 添加到训练缓冲区
            self.training_data.extend(game_data)
            
            # 限制缓冲区大小
            if len(self.training_data) > self.max_buffer_size:
                self.training_data = self.training_data[-self.max_buffer_size:]
            
            print(f"  📦 训练数据: {len(self.training_data)} 条")
            
            # 训练神经网络
            if len(self.training_data) >= batch_size:
                print(f"  🧠 训练神经网络...")
                losses = self._train_network(batch_size, epochs=5)
                print(f"  📉 损失: 总={losses['total']:.4f}, "
                      f"策略={losses['policy']:.4f}, 价值={losses['value']:.4f}")
            
            # 定期保存模型
            if game_num % save_interval == 0:
                self.trainer.save_model(model_save_path)
                self._save_training_data()
                self._print_stats()
        
        # 最终保存
        print("\n" + "=" * 60)
        print("✅ 训练完成！")
        self.trainer.save_model(model_save_path)
        self._save_training_data()
        self._print_stats()
        print("=" * 60)
    
    def _train_network(self, batch_size, epochs=1):
        """训练神经网络"""
        total_losses = {'total': 0, 'policy': 0, 'value': 0}
        num_batches = 0
        
        for _ in range(epochs):
            # 随机采样
            indices = np.random.choice(len(self.training_data), batch_size, replace=False)
            batch = [self.training_data[i] for i in indices]
            
            # 准备数据
            states = np.array([item[0] for item in batch])
            policies = np.array([item[1] for item in batch])
            values = np.array([item[2] for item in batch])
            
            # 训练一步
            losses = self.trainer.train_step(states, policies, values)
            
            for key in total_losses:
                total_losses[key] += losses[key]
            num_batches += 1
        
        # 平均损失
        for key in total_losses:
            total_losses[key] /= num_batches
        
        return total_losses
    
    def _encode_state(self, board, current_player):
        """编码棋盘状态"""
        encoded = np.zeros((3, self.board_size, self.board_size), dtype=np.float32)
        
        if current_player == 1:
            encoded[0] = (board == 1).astype(np.float32)   # 当前玩家（黑）
            encoded[1] = (board == -1).astype(np.float32)  # 对手（白）
        else:
            encoded[0] = (board == -1).astype(np.float32)  # 当前玩家（白）
            encoded[1] = (board == 1).astype(np.float32)   # 对手（黑）
        
        encoded[2] = (board == 0).astype(np.float32)  # 空位
        
        return encoded
    
    def _action_probs_to_policy(self, action_probs):
        """将动作概率转换为策略向量"""
        policy = np.zeros(self.board_size * self.board_size, dtype=np.float32)
        
        for move, prob in action_probs:
            idx = move[0] * self.board_size + move[1]
            policy[idx] = prob
        
        return policy
    
    def _save_training_data(self):
        """保存训练数据"""
        data_path = f'training_data/games_{datetime.now().strftime("%Y%m%d_%H%M%S")}.pkl'
        with open(data_path, 'wb') as f:
            pickle.dump(self.training_data, f)
        print(f"💾 训练数据已保存: {data_path}")
    
    def _print_stats(self):
        """打印统计信息"""
        print("\n" + "=" * 60)
        print("📊 训练统计")
        print("=" * 60)
        print(f"总对局数: {self.stats['games_played']}")
        print(f"黑棋胜: {self.stats['black_wins']} "
              f"({self.stats['black_wins']/max(1, self.stats['games_played'])*100:.1f}%)")
        print(f"白棋胜: {self.stats['white_wins']} "
              f"({self.stats['white_wins']/max(1, self.stats['games_played'])*100:.1f}%)")
        print(f"平局: {self.stats['draws']}")
        print(f"平均步数: {self.stats['avg_game_length']:.1f}")
        print(f"训练数据量: {len(self.training_data)}")
        print("=" * 60)


if __name__ == "__main__":
    # 训练示例
    print("🎯 五子棋自我对弈训练")
    print("=" * 60)
    
    # 检查是否有GPU
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"🖥️  使用设备: {device}")
    
    # 创建训练器
    trainer = SelfPlayTrainer(device=device)
    
    # 开始训练（小规模测试）
    trainer.train(
        num_games=20,           # 对弈20局
        num_simulations=50,     # 每步50次模拟（快速训练）
        batch_size=32,
        save_interval=5,
        model_save_path='models/best_model.pth'
    )
    
    print("\n✅ 训练完成！可以使用 play_trained.py 测试训练好的模型")
