"""
模型训练管理器
"""
import os
import time
import logging
import json
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import numpy as np

from sichuanmajiang.model.config import TrainingConfig
from sichuanmajiang.model.mahjong_model import create_mahjong_model
from sichuanmajiang.data.data_loader import load_and_split_data
from sichuanmajiang.data.preprocessor import DataPreprocessor


class TrainingManager:
    """
    训练管理器类
    """
    def __init__(self, config: TrainingConfig):
        """
        初始化训练管理器
        
        Args:
            config: 训练配置
        """
        self.config = config
        
        # 设置日志
        self.logger = self._setup_logging()
        self.logger.info(f"初始化训练管理器: {config.experiment_name}")
        
        # 设备
        self.device = config.device
        self.logger.info(f"使用设备: {self.device}")
        
        # 设置随机种子
        self._set_seed(config.seed)
        
        # 模型
        self.model = None
        self._build_model()
        
        # 优化器
        self.optimizer = None
        self._build_optimizer()
        
        # 学习率调度器
        self.scheduler = None
        self._build_scheduler()
        
        # 损失函数
        self.criterion = None
        self._build_criterion()
        
        # 数据加载器
        self.train_loader = None
        self.val_loader = None
        self.test_loader = None
        
        # 预处理器
        self.preprocessor = DataPreprocessor()
        
        # TensorBoard
        self.writer = None
        if config.use_tensorboard:
            self.writer = SummaryWriter(log_dir=os.path.join(config.log_dir, config.experiment_name))
        
        # 训练状态
        self.current_epoch = 0
        self.global_step = 0
        self.best_score = float('inf') if config.early_stopping_mode == 'min' else -float('inf')
        self.early_stopping_counter = 0
        
        # 历史记录
        self.history = {
            'train_loss': [],
            'val_loss': [],
            'train_metrics': [],
            'val_metrics': []
        }
        
        # 恢复训练
        if config.resume_from_checkpoint:
            self._resume_from_checkpoint(config.resume_from_checkpoint)
        elif config.pretrained_model:
            self._load_pretrained_model(config.pretrained_model)
        
        # 冻结层
        if config.freeze_layers:
            self._freeze_layers()
    
    def _setup_logging(self) -> logging.Logger:
        """
        设置日志
        
        Returns:
            日志记录器
        """
        logger = logging.getLogger(f"TrainingManager_{self.config.experiment_name}")
        logger.setLevel(logging.INFO)
        
        # 控制台日志
        console_handler = logging.StreamHandler()
        console_handler.setLevel(logging.INFO)
        
        # 文件日志
        log_file = os.path.join(self.config.log_dir, self.config.experiment_name, 'training.log')
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(logging.INFO)
        
        # 格式化器
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        console_handler.setFormatter(formatter)
        file_handler.setFormatter(formatter)
        
        # 添加处理器
        logger.addHandler(console_handler)
        logger.addHandler(file_handler)
        
        return logger
    
    def _set_seed(self, seed: int) -> None:
        """
        设置随机种子
        
        Args:
            seed: 随机种子
        """
        torch.manual_seed(seed)
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)  # 多GPU情况
        np.random.seed(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
        
        self.logger.info(f"设置随机种子: {seed}")
    
    def _build_model(self) -> None:
        """
        构建模型
        """
        self.logger.info(f"构建模型: {self.config.model_type}")
        
        # 创建模型
        self.model = create_mahjong_model(
            model_type=self.config.model_type,
            **self.config.model_config
        )
        
        # 移动到设备
        self.model.to(self.device)
        
        # 多GPU
        if len(self.config.gpu_ids) > 1 and torch.cuda.is_available():
            self.model = nn.DataParallel(self.model, device_ids=self.config.gpu_ids)
        
        self.logger.info(f"模型构建完成，参数数量: {self._count_parameters()}")
    
    def _count_parameters(self) -> int:
        """
        计算模型参数数量
        
        Returns:
            参数数量
        """
        return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
    
    def _build_optimizer(self) -> None:
        """
        构建优化器
        """
        self.logger.info(f"构建优化器: {self.config.optimizer_type}")
        
        # 获取参数
        parameters = [p for p in self.model.parameters() if p.requires_grad]
        
        # 创建优化器
        if self.config.optimizer_type == 'adam':
            self.optimizer = optim.Adam(
                parameters,
                lr=self.config.learning_rate,
                weight_decay=self.config.weight_decay
            )
        elif self.config.optimizer_type == 'adamw':
            self.optimizer = optim.AdamW(
                parameters,
                lr=self.config.learning_rate,
                weight_decay=self.config.weight_decay
            )
        elif self.config.optimizer_type == 'sgd':
            self.optimizer = optim.SGD(
                parameters,
                lr=self.config.learning_rate,
                momentum=0.9,
                weight_decay=self.config.weight_decay
            )
        else:
            raise ValueError(f"未知的优化器类型: {self.config.optimizer_type}")
    
    def _build_scheduler(self) -> None:
        """
        构建学习率调度器
        """
        self.logger.info(f"构建学习率调度器: {self.config.scheduler_type}")
        
        # 创建调度器
        if self.config.scheduler_type == 'cosine':
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer,
                T_max=self.config.scheduler_config.get('T_max', 100),
                eta_min=1e-6
            )
        elif self.config.scheduler_type == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=self.config.scheduler_config.get('step_size', 30),
                gamma=self.config.scheduler_config.get('gamma', 0.1)
            )
        elif self.config.scheduler_type == 'plateau':
            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                mode=self.config.early_stopping_mode,
                factor=self.config.scheduler_config.get('gamma', 0.1),
                patience=self.config.scheduler_config.get('patience', 10),
                verbose=True
            )
        elif self.config.scheduler_type == 'linear':
            total_steps = self.config.epochs * (len(self.train_loader) // self.config.gradient_accumulation_steps) if self.train_loader else self.config.epochs * 100
            self.scheduler = optim.lr_scheduler.LinearLR(
                self.optimizer,
                start_factor=1.0,
                end_factor=0.01,
                total_iters=total_steps
            )
    
    def _build_criterion(self) -> None:
        """
        构建损失函数
        """
        self.logger.info(f"构建损失函数: {self.config.loss_type}")
        
        # 创建损失函数
        if self.config.loss_type == 'cross_entropy':
            self.criterion = nn.CrossEntropyLoss()
        elif self.config.loss_type == 'mse':
            self.criterion = nn.MSELoss()
        elif self.config.loss_type == 'huber':
            delta = self.config.loss_config.get('delta', 1.0)
            self.criterion = nn.HuberLoss(delta=delta)
        elif self.config.loss_type == 'smooth_l1':
            self.criterion = nn.SmoothL1Loss()
        else:
            raise ValueError(f"未知的损失函数类型: {self.config.loss_type}")
    
    def _freeze_layers(self) -> None:
        """
        冻结部分层
        """
        # 根据模型类型冻结不同的层
        if hasattr(self.model, 'module'):  # DataParallel
            model = self.model.module
        else:
            model = self.model
        
        # 冻结除最后一层外的所有层
        for name, param in model.named_parameters():
            if 'output' not in name and 'fc' not in name:
                param.requires_grad = False
        
        # 只训练最后一层的参数
        trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
        self.logger.info(f"冻结部分层后，可训练参数数量: {trainable_params}")
    
    def _load_data(self) -> None:
        """
        加载数据
        """
        self.logger.info("加载数据...")
        
        # 加载训练、验证和测试数据
        self.train_loader, self.val_loader, self.test_loader = load_and_split_data(
            data_path=self.config.train_data_path,
            preprocessor=self.preprocessor,
            batch_size=self.config.batch_size,
            num_workers=self.config.num_workers,
            use_balanced_sampler=self.config.use_balanced_sampler,
            load_in_memory=self.config.load_in_memory
        )
        
        self.logger.info(f"数据加载完成")
        self.logger.info(f"训练集批次数量: {len(self.train_loader)}")
        self.logger.info(f"验证集批次数量: {len(self.val_loader)}")
        self.logger.info(f"测试集批次数量: {len(self.test_loader)}")
    
    def _save_checkpoint(self, epoch: int, is_best: bool = False) -> str:
        """
        保存检查点
        
        Args:
            epoch: 当前轮数
            is_best: 是否为最佳模型
            
        Returns:
            检查点文件路径
        """
        checkpoint = {
            'epoch': epoch,
            'global_step': self.global_step,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict() if self.scheduler else None,
            'best_score': self.best_score,
            'config': self.config.to_dict(),
            'history': self.history
        }
        
        # 检查点文件名
        if is_best:
            checkpoint_name = f"best_model_epoch_{epoch}.pth"
        else:
            checkpoint_name = f"checkpoint_epoch_{epoch}.pth"
        
        checkpoint_path = os.path.join(self.config.checkpoint_dir, checkpoint_name)
        
        # 保存检查点
        torch.save(checkpoint, checkpoint_path)
        self.logger.info(f"检查点保存到: {checkpoint_path}")
        
        # 清理旧检查点
        self._clean_old_checkpoints()
        
        return checkpoint_path
    
    def _clean_old_checkpoints(self) -> None:
        """
        清理旧的检查点文件
        """
        if self.config.max_checkpoints <= 0:
            return
        
        # 获取所有检查点文件（排除最佳模型）
        checkpoint_files = []
        for f in os.listdir(self.config.checkpoint_dir):
            if f.startswith('checkpoint_') and f.endswith('.pth'):
                epoch = int(f.split('_')[-1].split('.')[0])
                checkpoint_files.append((epoch, f))
        
        # 按轮数排序
        checkpoint_files.sort(key=lambda x: x[0], reverse=True)
        
        # 删除多余的检查点
        while len(checkpoint_files) > self.config.max_checkpoints:
            _, file_to_delete = checkpoint_files.pop()
            file_path = os.path.join(self.config.checkpoint_dir, file_to_delete)
            os.remove(file_path)
            self.logger.info(f"删除旧检查点: {file_path}")
    
    def _resume_from_checkpoint(self, checkpoint_path: str) -> None:
        """
        从检查点恢复训练
        
        Args:
            checkpoint_path: 检查点文件路径
        """
        self.logger.info(f"从检查点恢复: {checkpoint_path}")
        
        # 加载检查点
        checkpoint = torch.load(checkpoint_path, map_location=self.device)
        
        # 恢复模型状态
        self.model.load_state_dict(checkpoint['model_state_dict'])
        
        # 恢复优化器状态
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        
        # 恢复调度器状态
        if checkpoint['scheduler_state_dict'] and self.scheduler:
            self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        
        # 恢复训练状态
        self.current_epoch = checkpoint['epoch'] + 1  # 从下一轮开始
        self.global_step = checkpoint['global_step']
        self.best_score = checkpoint['best_score']
        
        # 恢复历史记录
        if 'history' in checkpoint:
            self.history = checkpoint['history']
        
        self.logger.info(f"检查点恢复完成，当前轮数: {self.current_epoch}")
    
    def _load_pretrained_model(self, model_path: str) -> None:
        """
        加载预训练模型
        
        Args:
            model_path: 模型文件路径
        """
        self.logger.info(f"加载预训练模型: {model_path}")
        
        # 加载模型
        checkpoint = torch.load(model_path, map_location=self.device)
        
        # 只加载模型状态
        if 'model_state_dict' in checkpoint:
            self.model.load_state_dict(checkpoint['model_state_dict'])
        else:
            # 如果是直接保存的模型
            self.model.load_state_dict(checkpoint)
        
        self.logger.info("预训练模型加载完成")
    
    def _log_metrics(self, metrics: Dict[str, float], epoch: int, mode: str) -> None:
        """
        记录指标
        
        Args:
            metrics: 指标字典
            epoch: 轮数
            mode: 模式 (train/val/test)
        """
        # 记录到日志
        metrics_str = ', '.join([f"{k}: {v:.4f}" for k, v in metrics.items()])
        self.logger.info(f"{mode.upper()} Epoch {epoch} - {metrics_str}")
        
        # 记录到TensorBoard
        if self.writer:
            for name, value in metrics.items():
                self.writer.add_scalar(f"{mode}/{name}", value, epoch)
    
    def _update_history(self, loss: float, metrics: Dict[str, float], mode: str) -> None:
        """
        更新历史记录
        
        Args:
            loss: 损失值
            metrics: 指标字典
            mode: 模式 (train/val)
        """
        if mode == 'train':
            self.history['train_loss'].append(loss)
            self.history['train_metrics'].append(metrics)
        elif mode == 'val':
            self.history['val_loss'].append(loss)
            self.history['val_metrics'].append(metrics)
    
    def _save_history(self) -> None:
        """
        保存历史记录
        """
        history_path = os.path.join(self.config.output_dir, f"{self.config.experiment_name}_history.json")
        with open(history_path, 'w', encoding='utf-8') as f:
            json.dump(self.history, f, ensure_ascii=False, indent=2)
    
    def _should_early_stop(self, val_score: float) -> bool:
        """
        判断是否应该早停
        
        Args:
            val_score: 验证分数
            
        Returns:
            是否应该早停
        """
        if not self.config.early_stopping:
            return False
        
        # 判断是否是最佳分数
        if (self.config.early_stopping_mode == 'min' and val_score < self.best_score) or \
           (self.config.early_stopping_mode == 'max' and val_score > self.best_score):
            self.best_score = val_score
            self.early_stopping_counter = 0
            return False
        else:
            self.early_stopping_counter += 1
            self.logger.info(f"早停计数器: {self.early_stopping_counter}/{self.config.early_stopping_patience}")
            
            # 达到早停条件
            if self.early_stopping_counter >= self.config.early_stopping_patience:
                self.logger.info(f"触发早停条件，停止训练")
                return True
            
        return False
    
    def train_one_epoch(self, epoch: int) -> Tuple[float, Dict[str, float]]:
        """
        训练一轮
        
        Args:
            epoch: 轮数
            
        Returns:
            (平均损失, 指标字典)
        """
        # 设置模型为训练模式
        self.model.train()
        
        # 初始化统计信息
        total_loss = 0.0
        total_samples = 0
        metric_values = {}
        
        # 进度条
        pbar = tqdm(self.train_loader, desc=f"Epoch {epoch}/{self.config.epochs}")
        
        # 梯度累积
        self.optimizer.zero_grad()
        
        for batch_idx, batch in enumerate(pbar):
            # 移动数据到设备
            batch = {k: v.to(self.device) for k, v in batch.items()}
            
            # 前向传播
            if self.config.model_type == 'actor_critic':
                action_probs, value = self.model(batch['features'])
                # 计算演员-评论家损失
                # 这里简化处理，实际应该根据算法要求计算
                action_loss = self.criterion(action_probs, batch['label'])
                value_loss = self.criterion(value.squeeze(), batch['reward'] if 'reward' in batch else torch.zeros_like(batch['label'], dtype=torch.float))
                loss = action_loss + 0.5 * value_loss
            else:
                outputs = self.model(batch['features'])
                # 计算损失
                if self.config.loss_type == 'cross_entropy':
                    loss = self.criterion(outputs, batch['label'])
                else:
                    loss = self.criterion(outputs.squeeze(), batch['label'].float())
            
            # 梯度累积
            loss = loss / self.config.gradient_accumulation_steps
            loss.backward()
            
            # 梯度裁剪
            if self.config.clip_grad_norm > 0:
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.clip_grad_norm)
            
            # 更新参数
            if (batch_idx + 1) % self.config.gradient_accumulation_steps == 0:
                self.optimizer.step()
                self.optimizer.zero_grad()
                self.global_step += 1
            
            # 更新统计信息
            batch_size = batch['features'].size(0)
            total_loss += loss.item() * self.config.gradient_accumulation_steps * batch_size
            total_samples += batch_size
            
            # 计算指标
            with torch.no_grad():
                if self.config.loss_type == 'cross_entropy':
                    # 分类任务指标
                    _, predicted = torch.max(outputs, 1)
                    correct = (predicted == batch['label']).sum().item()
                    
                    if 'accuracy' not in metric_values:
                        metric_values['accuracy'] = 0.0
                    metric_values['accuracy'] += correct
                    
                    # 计算F1分数
                    if 'f1' in self.config.eval_metrics:
                        from sklearn.metrics import f1_score
                        y_true = batch['label'].cpu().numpy()
                        y_pred = predicted.cpu().numpy()
                        f1 = f1_score(y_true, y_pred, average='macro', zero_division=0)
                        
                        if 'f1' not in metric_values:
                            metric_values['f1'] = 0.0
                        metric_values['f1'] += f1 * batch_size
            
            # 记录日志
            if (batch_idx + 1) % self.config.log_interval == 0:
                avg_loss = total_loss / total_samples
                pbar.set_postfix({'loss': f'{avg_loss:.4f}'})
                
                # 记录到TensorBoard
                if self.writer:
                    self.writer.add_scalar('train/batch_loss', loss.item() * self.config.gradient_accumulation_steps, self.global_step)
                    self.writer.add_scalar('train/learning_rate', self.optimizer.param_groups[0]['lr'], self.global_step)
            
            # 检查最大步数
            if self.config.max_steps and self.global_step >= self.config.max_steps:
                break
        
        # 计算平均损失和指标
        avg_loss = total_loss / total_samples
        
        # 处理指标
        for metric in metric_values:
            if metric == 'accuracy':
                metric_values[metric] = metric_values[metric] / total_samples
            elif metric == 'f1':
                metric_values[metric] = metric_values[metric] / total_samples
        
        return avg_loss, metric_values
    
    def evaluate(self, data_loader: torch.utils.data.DataLoader) -> Tuple[float, Dict[str, float]]:
        """
        评估模型
        
        Args:
            data_loader: 数据加载器
            
        Returns:
            (平均损失, 指标字典)
        """
        # 设置模型为评估模式
        self.model.eval()
        
        # 初始化统计信息
        total_loss = 0.0
        total_samples = 0
        metric_values = {}
        
        # 不计算梯度
        with torch.no_grad():
            for batch in data_loader:
                # 移动数据到设备
                batch = {k: v.to(self.device) for k, v in batch.items()}
                
                # 前向传播
                if self.config.model_type == 'actor_critic':
                    action_probs, value = self.model(batch['features'])
                    # 计算损失
                    action_loss = self.criterion(action_probs, batch['label'])
                    value_loss = self.criterion(value.squeeze(), batch['reward'] if 'reward' in batch else torch.zeros_like(batch['label'], dtype=torch.float))
                    loss = action_loss + 0.5 * value_loss
                else:
                    outputs = self.model(batch['features'])
                    # 计算损失
                    if self.config.loss_type == 'cross_entropy':
                        loss = self.criterion(outputs, batch['label'])
                    else:
                        loss = self.criterion(outputs.squeeze(), batch['label'].float())
                
                # 更新统计信息
                batch_size = batch['features'].size(0)
                total_loss += loss.item() * batch_size
                total_samples += batch_size
                
                # 计算指标
                if self.config.loss_type == 'cross_entropy':
                    # 分类任务指标
                    _, predicted = torch.max(outputs, 1)
                    correct = (predicted == batch['label']).sum().item()
                    
                    if 'accuracy' not in metric_values:
                        metric_values['accuracy'] = 0.0
                    metric_values['accuracy'] += correct
                    
                    # 计算F1分数
                    if 'f1' in self.config.eval_metrics:
                        from sklearn.metrics import f1_score
                        y_true = batch['label'].cpu().numpy()
                        y_pred = predicted.cpu().numpy()
                        f1 = f1_score(y_true, y_pred, average='macro', zero_division=0)
                        
                        if 'f1' not in metric_values:
                            metric_values['f1'] = 0.0
                        metric_values['f1'] += f1 * batch_size
        
        # 计算平均损失和指标
        avg_loss = total_loss / total_samples
        
        # 处理指标
        for metric in metric_values:
            if metric == 'accuracy':
                metric_values[metric] = metric_values[metric] / total_samples
            elif metric == 'f1':
                metric_values[metric] = metric_values[metric] / total_samples
        
        return avg_loss, metric_values
    
    def train(self) -> None:
        """
        训练模型
        """
        # 加载数据
        self._load_data()
        
        # 开始训练
        self.logger.info(f"开始训练，共 {self.config.epochs} 轮")
        
        # 训练循环
        for epoch in range(self.current_epoch, self.config.epochs):
            # 记录开始时间
            start_time = time.time()
            
            # 训练一轮
            train_loss, train_metrics = self.train_one_epoch(epoch)
            
            # 记录训练指标
            metrics = {'loss': train_loss, **train_metrics}
            self._log_metrics(metrics, epoch, 'train')
            self._update_history(train_loss, train_metrics, 'train')
            
            # 评估
            if epoch % self.config.evaluate_interval == 0:
                val_loss, val_metrics = self.evaluate(self.val_loader)
                
                # 记录验证指标
                val_metrics['loss'] = val_loss
                self._log_metrics(val_metrics, epoch, 'val')
                self._update_history(val_loss, val_metrics, 'val')
                
                # 更新学习率（如果是plateau调度器）
                if self.scheduler and isinstance(self.scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                    monitor_value = val_loss if self.config.early_stopping_monitor == 'val_loss' else val_metrics.get(self.config.early_stopping_monitor, val_loss)
                    self.scheduler.step(monitor_value)
                else:
                    # 其他调度器
                    if self.scheduler:
                        self.scheduler.step()
                
                # 保存检查点
                if self.config.save_checkpoint and (epoch % self.config.checkpoint_interval == 0 or epoch == self.config.epochs - 1):
                    # 判断是否为最佳模型
                    monitor_value = val_loss if self.config.early_stopping_monitor == 'val_loss' else val_metrics.get(self.config.early_stopping_monitor, val_loss)
                    is_best = (self.config.early_stopping_mode == 'min' and monitor_value < self.best_score) or \
                              (self.config.early_stopping_mode == 'max' and monitor_value > self.best_score)
                    
                    # 保存检查点
                    if not self.config.save_best_only or is_best:
                        self._save_checkpoint(epoch, is_best)
                
                # 早停检查
                if self._should_early_stop(val_loss):
                    break
            else:
                # 如果不评估，仍然更新调度器
                if self.scheduler and not isinstance(self.scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                    self.scheduler.step()
            
            # 记录耗时
            elapsed_time = time.time() - start_time
            self.logger.info(f"Epoch {epoch} 耗时: {elapsed_time:.2f} 秒")
            
            # 检查最大步数
            if self.config.max_steps and self.global_step >= self.config.max_steps:
                self.logger.info(f"达到最大步数 {self.config.max_steps}，停止训练")
                break
            
            # 保存历史记录
            self._save_history()
        
        # 训练结束
        self.logger.info("训练完成")
        
        # 最后测试
        if self.test_loader:
            self.logger.info("开始测试...")
            test_loss, test_metrics = self.evaluate(self.test_loader)
            test_metrics['loss'] = test_loss
            self._log_metrics(test_metrics, 0, 'test')
            
            # 保存测试结果
            test_result_path = os.path.join(self.config.output_dir, f"{self.config.experiment_name}_test_results.json")
            with open(test_result_path, 'w', encoding='utf-8') as f:
                json.dump(test_metrics, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"测试结果保存到: {test_result_path}")
    
    def test(self, checkpoint_path: Optional[str] = None) -> Dict[str, float]:
        """
        测试模型
        
        Args:
            checkpoint_path: 检查点文件路径
            
        Returns:
            测试指标
        """
        # 加载数据
        if not self.test_loader:
            self._load_data()
        
        # 加载模型
        if checkpoint_path:
            self._load_pretrained_model(checkpoint_path)
        
        # 评估
        self.logger.info("开始测试...")
        test_loss, test_metrics = self.evaluate(self.test_loader)
        
        # 记录指标
        test_metrics['loss'] = test_loss
        self._log_metrics(test_metrics, 0, 'test')
        
        return test_metrics
    
    def export_model(self, export_path: str, format: str = 'pth') -> None:
        """
        导出模型
        
        Args:
            export_path: 导出路径
            format: 导出格式 (pth/onnx/torchscript)
        """
        self.logger.info(f"导出模型到: {export_path}, 格式: {format}")
        
        # 创建目录
        os.makedirs(os.path.dirname(export_path), exist_ok=True)
        
        # 根据格式导出
        if format == 'pth':
            # 导出PyTorch模型
            if hasattr(self.model, 'module'):  # DataParallel
                torch.save(self.model.module.state_dict(), export_path)
            else:
                torch.save(self.model.state_dict(), export_path)
        
        elif format == 'onnx':
            # 导出ONNX模型
            dummy_input = torch.randn(1, self.config.model_config['input_dim']).to(self.device)
            
            if hasattr(self.model, 'module'):  # DataParallel
                model = self.model.module
            else:
                model = self.model
            
            torch.onnx.export(
                model,
                dummy_input,
                export_path,
                export_params=True,
                opset_version=11,
                do_constant_folding=True,
                input_names=['input'],
                output_names=['output'],
                dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}
            )
        
        elif format == 'torchscript':
            # 导出TorchScript模型
            dummy_input = torch.randn(1, self.config.model_config['input_dim']).to(self.device)
            
            if hasattr(self.model, 'module'):  # DataParallel
                model = self.model.module
            else:
                model = self.model
            
            # 跟踪模型
            traced_model = torch.jit.trace(model, dummy_input)
            # 保存模型
            traced_model.save(export_path)
        
        else:
            raise ValueError(f"未知的导出格式: {format}")
        
        self.logger.info("模型导出完成")


def train_model(config: Union[TrainingConfig, Dict], data_path: Optional[str] = None) -> TrainingManager:
    """
    训练模型的便捷函数
    
    Args:
        config: 配置对象或字典
        data_path: 数据路径
        
    Returns:
        训练管理器实例
    """
    # 如果是字典，转换为配置对象
    if isinstance(config, dict):
        config = TrainingConfig(config)
    
    # 如果提供了数据路径，更新配置
    if data_path:
        config.train_data_path = data_path
    
    # 创建训练管理器
    manager = TrainingManager(config)
    
    # 开始训练
    manager.train()
    
    return manager


def evaluate_model(model_path: str, 
                   data_path: str, 
                   model_type: str = 'policy_network',
                   batch_size: int = 64) -> Dict[str, float]:
    """
    评估模型的便捷函数
    
    Args:
        model_path: 模型路径
        data_path: 数据路径
        model_type: 模型类型
        batch_size: 批量大小
        
    Returns:
        评估指标
    """
    # 创建配置
    config = TrainingConfig({
        'model_type': model_type,
        'batch_size': batch_size,
        'pretrained_model': model_path,
        'test_data_path': data_path
    })
    
    # 创建训练管理器
    manager = TrainingManager(config)
    
    # 评估模型
    metrics = manager.test()
    
    return metrics


def export_trained_model(model_path: str,
                         export_path: str,
                         model_type: str = 'policy_network',
                         export_format: str = 'pth') -> None:
    """
    导出训练好的模型
    
    Args:
        model_path: 模型路径
        export_path: 导出路径
        model_type: 模型类型
        export_format: 导出格式
    """
    # 创建配置
    config = TrainingConfig({
        'model_type': model_type,
        'pretrained_model': model_path
    })
    
    # 创建训练管理器
    manager = TrainingManager(config)
    
    # 导出模型
    manager.export_model(export_path, format=export_format)


def main() -> None:
    """
    主函数（用于测试）
    """
    import argparse
    
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='四川麻将AI训练')
    parser.add_argument('--config', type=str, help='配置文件路径')
    parser.add_argument('--data', type=str, help='数据路径')
    parser.add_argument('--model-type', type=str, default='policy_network', help='模型类型')
    parser.add_argument('--batch-size', type=int, default=64, help='批量大小')
    parser.add_argument('--epochs', type=int, default=100, help='训练轮数')
    parser.add_argument('--lr', type=float, default=1e-4, help='学习率')
    parser.add_argument('--output-dir', type=str, help='输出目录')
    
    args = parser.parse_args()
    
    # 创建配置
    if args.config:
        config = TrainingConfig.load(args.config)
    else:
        config_dict = {
            'model_type': args.model_type,
            'batch_size': args.batch_size,
            'epochs': args.epochs,
            'learning_rate': args.lr
        }
        
        if args.output_dir:
            config_dict['output_dir'] = args.output_dir
        
        config = TrainingConfig(config_dict)
    
    # 训练模型
    train_model(config, args.data)


if __name__ == '__main__':
    main()