import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os
import time
from datetime import datetime
from torch.utils.data import DataLoader, TensorDataset, random_split
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
import threading
import glob
import re
from gomoku_net import GomokuNet
from gomoku_game import play_game_worker, state_to_tensor, check_win, init_worker

class EloRating:
    def __init__(self, k=32, default_rating=1500):
        self.k = k
        self.default_rating = default_rating
        self.ratings = {}

    def update_ratings(self, model_a, model_b, result):
        a_rating = self.ratings.get(model_a, self.default_rating)
        b_rating = self.ratings.get(model_b, self.default_rating)
        
        expected_a = 1 / (1 + 10 ** ((b_rating - a_rating) / 400))
        expected_b = 1 - expected_a
        
        if result == 1:
            a_new = a_rating + self.k * (1 - expected_a)
            b_new = b_rating + self.k * (0 - expected_b)
        elif result == -1:
            a_new = a_rating + self.k * (0 - expected_a)
            b_new = b_rating + self.k * (1 - expected_b)
        else:
            a_new = a_rating + self.k * (0.5 - expected_a)
            b_new = b_rating + self.k * (0.5 - expected_b)
            
        self.ratings[model_a] = a_new
        self.ratings[model_b] = b_new

    def get_rating(self, model):
        return self.ratings.get(model, self.default_rating)

class AdvancedTrainer:
    def __init__(self, config=None, resume_model_path=None):
        self.config = {
            'res_blocks': 10,
            'batch_size': 1024,
            'num_games': 5000,
            'patience': 7,
            'initial_lr': 0.005,
            'weight_decay': 2e-4,
            'warmup_epochs': 3,
            'data_refresh_interval': 5,
            'total_epochs': 100,
            **(config or {})
        }
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = GomokuNet(self.config['res_blocks']).to(self.device)
        self.optimizer = optim.AdamW(
            self.model.parameters(),
            lr=self.config['initial_lr'],
            weight_decay=self.config['weight_decay']
        )
        self.scheduler = optim.lr_scheduler.OneCycleLR(
            self.optimizer,
            max_lr=self.config['initial_lr'],
            total_steps=self.config['total_epochs'] * 1000,
            pct_start=0.3
        )
        self.scaler = torch.cuda.amp.GradScaler()
        self.pause_requested = False
        current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
        self.checkpoint_dir = os.path.join("checkpoint", current_time)
        self.data_dir = "training_data"
        os.makedirs(self.checkpoint_dir, exist_ok=True)
        os.makedirs(self.data_dir, exist_ok=True)
        self.best_loss = float('inf')
        self.early_stop_counter = 0
        self.current_epoch = 0
        self.policy_weight = 1.0
        self.value_weight = 1.0
        self.resume_model_path = resume_model_path
        self.train_losses = []
        self.val_losses = []
        self.evaluation_results = []
        self.baseline_model = None
        self.elo = EloRating(k=24)
        self._action_mapping_9x9 = None
        self.scaler = torch.cuda.amp.GradScaler(
        enabled=True,
        init_scale=2.**16,
        growth_factor=2.0,
        backoff_factor=0.5,
        growth_interval=2000
    )
        self.scheduler = {
            'warmup': torch.optim.lr_scheduler.LinearLR(
                self.optimizer,
                start_factor=0.1,
                end_factor=1.0,
                total_iters=self.config['warmup_epochs']
            ),
            'main': torch.optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=self.config['total_epochs'] - self.config['warmup_epochs'],
                eta_min=1e-6
            ),
            'reduce_on_plateau': torch.optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                mode='min',
                factor=0.5,
                patience=3,
                min_lr=1e-6
            )
        }
        self.config.update({
            'use_cosine_annealing': True,
            'grad_clip_norm': 1.0,
            'accumulation_steps': 4,
            'autocast_enabled': True
        })
        
    def _run_training_epoch(self, loader):
        self.model.train()
        total_loss = 0
        accumulation_steps = 4  # 梯度累积步数
        max_grad_norm = 1.0     # 梯度裁剪阈值
        device = self.device
        
        # 混合精度优化配置
        autocast_dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
        
        for i, batch in enumerate(tqdm(loader, desc="混合精度训练")):
            # 数据预处理
            states, policy_targets, value_targets = self._prepare_batch(batch)
            
            # 前向传播（混合精度）
            with torch.autocast(device_type=device.type, dtype=autocast_dtype):
                policy_pred, value_pred = self.model(states)
                
                # 策略损失（KL散度 + 熵正则化）
                policy_loss = nn.KLDivLoss(reduction='batchmean')(
                    policy_pred, 
                    policy_targets
                )
                policy_entropy = -(policy_pred.exp() * policy_pred).sum(dim=1).mean()
                policy_loss -= 0.01 * policy_entropy  # 鼓励探索
                
                # 价值损失（MSE + 梯度平滑）
                value_loss = nn.SmoothL1Loss()(value_pred, value_targets)
                
                # 总损失（动态权重）
                loss = (
                    self.policy_weight * policy_loss + 
                    self.value_weight * value_loss
                ) / accumulation_steps
            
            # 混合精度反向传播
            self.scaler.scale(loss).backward()
            
            # 梯度累积与更新
            if (i + 1) % accumulation_steps == 0:
                # 解除缩放后裁剪梯度
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
                
                # 优化器步骤
                self.scaler.step(self.optimizer)
                self.scaler.update()
                
                # 重置梯度
                self.optimizer.zero_grad(set_to_none=True)
            
            # 学习率动态调整
            if self.config.get('use_cosine_annealing', False):
                self.scheduler.step()
            
            # 显存优化
            total_loss += loss.item()
            del states, policy_targets, value_targets, policy_pred, value_pred
            torch.cuda.empty_cache()
        
        return total_loss / len(loader)

    def train(self, total_epochs=100, resume=False):
        try:
            # 初始化学习率调度器组合
            self.scheduler = {
                'warmup': torch.optim.lr_scheduler.LinearLR(
                    self.optimizer,
                    start_factor=0.1,
                    end_factor=1.0,
                    total_iters=self.config['warmup_epochs']
                ),
                'main': torch.optim.lr_scheduler.CosineAnnealingLR(
                    self.optimizer,
                    T_max=total_epochs - self.config['warmup_epochs'],
                    eta_min=1e-6
                ),
                'reduce_on_plateau': torch.optim.lr_scheduler.ReduceLROnPlateau(
                    self.optimizer,
                    mode='min',
                    factor=0.5,
                    patience=3,
                    min_lr=1e-6
                )
            }
            
            # 恢复训练状态
            if resume:
                self._load_training_state()
            
            # 主训练循环
            for epoch in range(self.current_epoch, total_epochs):
                self.current_epoch = epoch
                print(f"\n🚀 当前训练轮次 {epoch + 1}/{total_epochs}")
                
                # 动态数据生成
                if (epoch % self.config['data_refresh_interval'] == 0 or 
                    not os.path.exists(os.path.join(self.data_dir, "states.npy"))):
                    self.generate_training_data()
                
                # 学习率预热阶段
                if epoch < self.config['warmup_epochs']:
                    self.scheduler['warmup'].step()
                
                # 准备数据加载器
                train_loader, val_loader = self._prepare_dataloaders()
                
                # 执行训练和验证
                train_loss = self._run_training_epoch(train_loader)
                val_loss = self._run_validation_epoch(val_loader)
                
                # 主调度器更新
                if epoch >= self.config['warmup_epochs']:
                    self.scheduler['main'].step()
                
                # Plateau调度器更新
                self.scheduler['reduce_on_plateau'].step(val_loss)
                
                # 早停检查
                if self._check_early_stopping(val_loss):
                    break
                
                # 定期保存和评估
                if (epoch + 1) % 5 == 0:
                    self._save_checkpoint(epoch)
                if epoch % 2 == 0:
                    self.evaluate_model(num_games=50, baseline='random')
                
                # 学习率监控
                current_lr = self.optimizer.param_groups[0]['lr']
                print(f"🔧 当前学习率: {current_lr:.2e} | 训练损失: {train_loss:.4f} | 验证损失: {val_loss:.4f}")
                
                # 安全终止条件
                if current_lr < 1e-5:
                    print("⚠️ 学习率过低，提前终止训练")
                    break

        except KeyboardInterrupt:
            print("\n🛑 用户中断训练，保存当前状态...")
            self._save_checkpoint(self.current_epoch)
            raise
        except Exception as e:
            print(f"\n⚠️ 训练异常终止: {str(e)}")
            self._save_checkpoint(self.current_epoch)
            raise
        finally:
            print("✅ 训练完成，保存最终模型")
            self._save_final_model()
            self.generate_animation()

    def evaluate_model(self, num_games=100, baseline='random'):
        elo = self.elo
        model_id = id(self.model)
        baseline_id = id(self.baseline_model) if self.baseline_model else "random"
        
        win_count = 0
        accuracy_sum = 0
        with torch.no_grad():
            for i in tqdm(range(num_games), desc="评估进度"):
                result, accuracy = self._evaluate_game(baseline)
                win_count += result
                accuracy_sum += accuracy
                elo.update_ratings(model_id, baseline_id, result)
        
        win_rate = win_count / num_games
        avg_accuracy = accuracy_sum / num_games
        current_rating = elo.get_rating(model_id)
        self.evaluation_results.append({
            'epoch': self.current_epoch,
            'win_rate': win_rate,
            'elo_rating': current_rating
        })
        print(f"✅ 评估结果 - 胜率: {win_rate:.2%} | Elo Rating: {current_rating:.0f}")
        return win_rate, current_rating

    def _evaluate_game(self, baseline):
        """优化后的评估函数"""
        state = np.zeros((15, 15), dtype=np.float32)
        current_player = 1
        max_steps = 15 * 15  # 最大步数限制
        step_count = 0
        
        while True:
            # 生成当前状态张量
            state_tensor, _ = state_to_tensor(state, current_player)
            state_tensor = state_tensor.unsqueeze(0).to(self.device)
            
            # 模型预测（修复设备一致性）
            with torch.no_grad():
                if current_player == 1:
                    policy, _ = self.model(state_tensor)
                    policy = torch.softmax(policy, dim=1)  # 确保概率分布有效
                else:
                    if baseline == 'random':
                        # 生成合法随机策略
                        valid_actions = np.where(state.reshape(-1) == 0)[0]
                        policy = torch.zeros(225).to(self.device)
                        policy[valid_actions] = torch.rand(len(valid_actions))
                        policy = policy.unsqueeze(0)
                    elif baseline == 'previous' and self.baseline_model:
                        policy, _ = self.baseline_model(state_tensor)
                    else:
                        raise ValueError(f"未知基准类型: {baseline}")

            # 选择动作（修复坐标顺序）
            action = torch.argmax(policy).item()
            y, x = divmod(action, 15)
            
            # 合法性检查
            if state[y, x] != 0:
                valid_actions = np.where(state.reshape(-1) == 0)[0]
                action = np.random.choice(valid_actions)
                y, x = divmod(action, 15)
            
            # 更新棋盘状态
            state[y, x] = current_player
            step_count += 1

            # 胜利判断（修复坐标顺序）
            if check_win(state, y, x):  # 参数改为(y, x)
                winner = current_player
                return (1 if current_player == 1 else -1), 0.0  # 移除无效的accuracy参数
            
            # 平局判断
            if step_count >= max_steps or (state != 0).all():
                return 0, 0.0
            
            # 切换玩家
            current_player = 3 - current_player

    def generate_training_data(self):
        """动态生成训练数据（优化版）"""
        self.model.eval()
        original_device = next(self.model.parameters()).device
        
        # 动态调整数据生成策略
        if self.current_epoch < 10:
            board_size = 9
            num_games = self.config['num_games'] * (1 + self.current_epoch // 5)
        else:
            board_size = 15
            num_games = self.config['num_games'] * (1 + self.current_epoch // 10)
        
        print(f"生成 {num_games} 局 {board_size}x{board_size} 训练数据...")
        
        # 优化模型状态传递
        model_state = self.model.cpu().state_dict()
        self.model.to(original_device)
        
        # 多进程生成（显式资源管理）
        with Pool(
            processes=cpu_count()-1,
            initializer=init_worker,
            initargs=(model_state, torch.cuda.is_available())
        ) as pool:
            results = list(tqdm(
                pool.imap_unordered(
                    play_game_worker,
                    [(i, np.random.choice([1, 2])) for i in range(num_games)]
                ),
                total=num_games,
                desc="数据生成"
            ))
        
        # 数据处理（显式设备恢复）
        self._process_and_save_data(results, board_size)
        self.model.to(original_device)
        torch.cuda.empty_cache()

    def _process_and_save_data(self, results, board_size):
        """处理并保存生成的训练数据（优化版）"""
        # 数据预处理阶段 =====================================================
        valid_results = [r for r in results if r and len(r[0]) > 0]
        if not valid_results:
            raise ValueError("所有生成的对局数据均为空，请检查游戏逻辑")
        
        # 提取并合并数据
        all_states = np.concatenate([s for s, _, _ in valid_results])
        all_policies = np.concatenate([p for _, p, _ in valid_results])
        all_values = np.concatenate([v for _, _, v in valid_results])
        
        # 数据增强处理 =======================================================
        if board_size == 9:
            # 9x9到15x15坐标映射
            if not hasattr(self, '_action_mapping_9x9'):
                original_actions = np.arange(81)
                mapped_actions = []
                for a in original_actions:
                    y, x = divmod(a, 9)
                    mapped_actions.append((y + 3) * 15 + (x + 3))
                self._action_mapping_9x9 = np.array(mapped_actions)
            
            # 状态数据扩展（9x9 -> 15x15中心区域）
            padded_states = np.zeros((all_states.shape[0], 15, 15), dtype=np.float32)
            padded_states[:, 3:12, 3:12] = all_states.reshape(-1, 9, 9)
            all_states = padded_states
            
            # 策略数据映射与归一化
            new_policies = np.zeros((len(all_policies), 225), dtype=np.float32)
            new_policies[:, self._action_mapping_9x9] = all_policies[:, :81]
            
            # 批量归一化处理（处理零向量情况）
            row_sums = new_policies.sum(axis=1, keepdims=True)
            row_sums[row_sums == 0] = 1
            new_policies /= row_sums
            all_policies = new_policies
            
        else:
            # 确保15x15数据维度正确
            if all_states.shape[1:] != (15, 15):
                raise ValueError(f"无效状态维度：{all_states.shape}")
        
        # 数据格式转换 =======================================================
        # 转换为双通道格式（当前玩家/对手）
        all_states = np.stack([
            (all_states == 1).astype(np.float32),
            (all_states == 2).astype(np.float32)
        ], axis=1).transpose(0, 2, 3, 1)  # 调整为(N, 15, 15, 2)
        
        # 数据验证 ===========================================================
        assert all_states.shape[0] == all_policies.shape[0] == len(all_values), "数据维度不匹配"
        assert np.allclose(all_policies.sum(axis=1), 1, atol=1e-5), "策略分布未正确归一化"
        assert all_states.shape[1:] == (15, 15, 2), f"状态维度错误：{all_states.shape}"
        
        # 保存数据 ===========================================================
        save_options = {
            'allow_pickle': False,
            'fix_imports': False
        }
        np.save(os.path.join(self.data_dir, "states.npy"), 
                all_states.astype(np.float16), **save_options)
        np.save(os.path.join(self.data_dir, "policies.npy"), 
                all_policies.astype(np.float16), **save_options)
        np.save(os.path.join(self.data_dir, "values.npy"), 
                all_values.astype(np.float16), **save_options)
        
        print(f"✅ 成功保存 {len(all_states)} 条训练数据")

    def _prepare_dataloaders(self):
        """准备训练和验证数据加载器"""
        try:
            # 加载状态数据
            states = np.load(os.path.join(self.data_dir, "states.npy"))
            if states.ndim not in (3, 4):
                raise ValueError(f"无效数据维度: {states.ndim}，应为3或4维")
            if states.ndim == 3:
                states = np.stack([(states==1).astype(np.float32), 
                                (states==2).astype(np.float32)], axis=1)
            if states.shape[1:] != (2, 15, 15):
                raise ValueError(f"无效数据形状: {states.shape}，应为 (N, 2, 15, 15)")
            states = torch.from_numpy(states).float()

            # 加载策略和目标值
            policies = torch.from_numpy(np.load(os.path.join(self.data_dir, "policies.npy"))).float()
            values = torch.from_numpy(np.load(os.path.join(self.data_dir, "values.npy"))).float()

            # 创建数据集
            dataset = TensorDataset(states, policies, values)
            train_size = int(0.8 * len(dataset))
            val_size = len(dataset) - train_size
            train_dataset, val_dataset = random_split(dataset, [train_size, val_size])

            # 创建数据加载器
            return (
                DataLoader(train_dataset, batch_size=self.config['batch_size'], shuffle=True, num_workers=4),
                DataLoader(val_dataset, batch_size=self.config['batch_size'], num_workers=4)
            )
        except FileNotFoundError as e:
            raise RuntimeError("训练数据文件缺失，请先运行generate_training_data()") from e

    def _load_training_state(self):
        if self.resume_model_path:
            checkpoint_path = self.resume_model_path
        else:
            checkpoint_path = os.path.join(self.checkpoint_dir, "latest_checkpoint.pth")
        if os.path.exists(checkpoint_path):
            checkpoint = torch.load(checkpoint_path)
            self.model.load_state_dict(checkpoint['model_state'])
            self.optimizer.load_state_dict(checkpoint['optimizer_state'])
            self.scheduler.load_state_dict(checkpoint['scheduler_state'])
            self.best_loss = checkpoint['best_loss']
            self.current_epoch = checkpoint['epoch']
            self.early_stop_counter = checkpoint['early_stop_counter']
            self.train_losses = checkpoint.get('train_losses', [])
            self.val_losses = checkpoint.get('val_losses', [])
            print(f"⏩ 从第{self.current_epoch + 1}轮恢复训练")
        else:
            print("⚠️ 未找到检查点，开始新训练")

    def _adjust_learning_rate(self, epoch):
        lr = self.config['initial_lr'] * (epoch + 1) / self.config['warmup_epochs']
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = lr

    def _run_training_epoch(self, loader):
        self.model.train()
        total_loss = 0
        accumulation_steps = 4
        max_grad_norm = 1.0
        for i, batch in enumerate(tqdm(loader, desc="训练")):
            states, policy_targets, value_targets = self._prepare_batch(batch)
            self.optimizer.zero_grad()
            with torch.autocast(device_type='cuda', dtype=torch.bfloat16):
                policy_pred, value_pred = self.model(states)
                policy_loss = nn.KLDivLoss(reduction='batchmean')(policy_pred, policy_targets)
                value_loss = nn.MSELoss()(value_pred, value_targets)
                loss = self.policy_weight * policy_loss + self.value_weight * value_loss
                loss = loss / accumulation_steps
            self.scaler.scale(loss).backward()
            if (i + 1) % accumulation_steps == 0:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
                self.scaler.step(self.optimizer)
                self.scaler.update()
                self.optimizer.zero_grad()
            total_loss += loss.item()
            del states, policy_targets, value_targets, policy_pred, value_pred
            torch.cuda.empty_cache()
        return total_loss / len(loader)

    def _run_validation_epoch(self, loader):
        self.model.eval()
        total_loss = 0
        with torch.no_grad():
            for states, policy_targets, value_targets in loader:
                states = states.to(self.device)
                policy_targets = policy_targets.to(self.device)
                value_targets = value_targets.to(self.device).unsqueeze(1)
                policy_pred, value_pred = self.model(states)
                loss = nn.KLDivLoss()(policy_pred, policy_targets) + nn.MSELoss()(value_pred, value_targets)
                total_loss += loss.item()
                del states, policy_targets, value_targets, policy_pred, value_pred
                torch.cuda.empty_cache()
        return total_loss / len(loader)

    def _check_early_stopping(self, val_loss):
        current_lr = self.optimizer.param_groups[0]['lr']
        if current_lr < 1e-5:
            print("学习率过低，提前终止训练")
            return True
        if val_loss < self.best_loss:
            self.best_loss = val_loss
            self.early_stop_counter = 0
            torch.save(self.model.state_dict(),
                       os.path.join(self.checkpoint_dir, "best_model.pth"))
        else:
            self.early_stop_counter += 1
            if self.early_stop_counter >= self.config['patience']:
                print("⏹️ 早停机制触发，停止训练")
                return True
        return False

    def _save_checkpoint(self, epoch):
        checkpoint = {
            'epoch': epoch,
            'model_state': self.model.state_dict(),
            'optimizer_state': self.optimizer.state_dict(),
            'scheduler_state': self.scheduler.state_dict(),
            'best_loss': self.best_loss,
            'early_stop_counter': self.early_stop_counter,
            'train_losses': self.train_losses,
            'val_losses': self.val_losses,
            'evaluation_results': self.evaluation_results,
            'config': self.config
        }
        for old_file in glob.glob(os.path.join(self.checkpoint_dir, "checkpoint_epoch_*.pth")):
            epoch_num = int(re.search(r"epoch_(\d+)", old_file).group(1))
            if epoch_num < (self.current_epoch - 2):
                os.remove(old_file)
        torch.save(checkpoint, os.path.join(self.checkpoint_dir, f"checkpoint_epoch_{epoch + 1}.pth"))
        print(f"💾 已保存第 {epoch + 1} 轮检查点")

    def _save_final_model(self):
        torch.save({
            'model_state': self.model.state_dict(),
            'optimizer_state': self.optimizer.state_dict(),
            'scheduler_state': self.scheduler.state_dict(),
            'config': self.config
        }, os.path.join(self.checkpoint_dir, "final_model.pth"))

    def _prepare_batch(self, batch):
        states, policy_targets, value_targets = batch
        states = states.to(self.device)
        policy_targets = policy_targets.to(self.device)
        value_targets = value_targets.to(self.device).unsqueeze(1)
        return states, policy_targets, value_targets
    
    @staticmethod
    def map_action_9x9_to_15x15(action):
        y, x = divmod(action, 9)
        return (y + 3) * 15 + (x + 3)  # 将9x9坐标映射到15x15中心区域