"""
模型评估工具
"""
import os
import time
import logging
import json
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from typing import Dict, List, Optional, Tuple, Union
from collections import defaultdict
import torch
import torch.nn as nn
from sklearn.metrics import (
    accuracy_score,
    precision_recall_fscore_support,
    confusion_matrix,
    classification_report,
    roc_auc_score,
    auc,
    roc_curve
)
from sklearn.preprocessing import label_binarize

from sichuanmajiang.model.config import TrainingConfig
from sichuanmajiang.model.mahjong_model import create_mahjong_model
from sichuanmajiang.data.data_loader import load_data
from sichuanmajiang.data.preprocessor import DataPreprocessor


class ModelEvaluator:
    """
    模型评估器类
    """
    def __init__(self, 
                 model_path: str, 
                 model_type: str = 'policy_network',
                 model_config: Optional[Dict] = None,
                 device: Optional[str] = None):
        """
        初始化评估器
        
        Args:
            model_path: 模型路径
            model_type: 模型类型
            model_config: 模型配置
            device: 运行设备
        """
        # 设置日志
        self.logger = self._setup_logging()
        self.logger.info(f"初始化模型评估器，模型路径: {model_path}")
        
        # 设备
        if device is None:
            device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.device = device
        self.logger.info(f"使用设备: {self.device}")
        
        # 模型配置
        if model_config is None:
            model_config = {}
        
        # 构建模型
        self.model = self._build_model(model_type, model_config)
        
        # 加载模型
        self._load_model(model_path)
        
        # 设置为评估模式
        self.model.eval()
        
        # 预处理器
        self.preprocessor = DataPreprocessor()
        
        # 评估结果
        self.evaluation_results = {}
    
    def _setup_logging(self) -> logging.Logger:
        """
        设置日志
        
        Returns:
            日志记录器
        """
        logger = logging.getLogger('ModelEvaluator')
        logger.setLevel(logging.INFO)
        
        # 控制台日志
        if not logger.handlers:
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
            console_handler.setFormatter(formatter)
            logger.addHandler(console_handler)
        
        return logger
    
    def _build_model(self, model_type: str, model_config: Dict) -> nn.Module:
        """
        构建模型
        
        Args:
            model_type: 模型类型
            model_config: 模型配置
            
        Returns:
            模型实例
        """
        self.logger.info(f"构建模型: {model_type}")
        
        # 创建模型
        model = create_mahjong_model(
            model_type=model_type,
            **model_config
        )
        
        # 移动到设备
        model.to(self.device)
        
        return model
    
    def _load_model(self, model_path: str) -> None:
        """
        加载模型
        
        Args:
            model_path: 模型路径
        """
        self.logger.info(f"加载模型: {model_path}")
        
        try:
            # 加载模型
            checkpoint = torch.load(model_path, map_location=self.device)
            
            # 只加载模型状态
            if 'model_state_dict' in checkpoint:
                self.model.load_state_dict(checkpoint['model_state_dict'])
            else:
                # 如果是直接保存的模型
                self.model.load_state_dict(checkpoint)
            
            self.logger.info("模型加载成功")
        except Exception as e:
            self.logger.error(f"模型加载失败: {str(e)}")
            raise
    
    def evaluate(self, 
                 data_loader: torch.utils.data.DataLoader,
                 metrics: Optional[List[str]] = None,
                 save_results: bool = False,
                 output_dir: Optional[str] = None) -> Dict[str, float]:
        """
        评估模型
        
        Args:
            data_loader: 数据加载器
            metrics: 评估指标列表
            save_results: 是否保存结果
            output_dir: 输出目录
            
        Returns:
            评估指标字典
        """
        self.logger.info("开始评估模型...")
        
        # 初始化默认指标
        if metrics is None:
            metrics = ['accuracy', 'precision', 'recall', 'f1', 'loss']
        
        # 记录预测结果
        y_true = []
        y_pred = []
        y_score = []
        total_loss = 0.0
        total_samples = 0
        
        # 评估时间
        start_time = time.time()
        
        # 不计算梯度
        with torch.no_grad():
            for batch in data_loader:
                # 移动数据到设备
                batch = {k: v.to(self.device) for k, v in batch.items()}
                
                # 前向传播
                if isinstance(self.model, nn.Module) and hasattr(self.model, 'forward'):
                    outputs = self.model(batch['features'])
                else:
                    raise ValueError("模型必须是PyTorch模块且具有forward方法")
                
                # 计算损失
                criterion = nn.CrossEntropyLoss()
                loss = criterion(outputs, batch['label'])
                
                # 更新统计信息
                batch_size = batch['features'].size(0)
                total_loss += loss.item() * batch_size
                total_samples += batch_size
                
                # 获取预测结果
                _, predicted = torch.max(outputs, 1)
                
                # 保存结果
                y_true.extend(batch['label'].cpu().numpy())
                y_pred.extend(predicted.cpu().numpy())
                
                # 保存概率分数
                if hasattr(torch.nn.functional, 'softmax'):
                    probs = torch.nn.functional.softmax(outputs, dim=1)
                    y_score.extend(probs.cpu().numpy())
        
        # 计算平均损失
        avg_loss = total_loss / total_samples
        
        # 计算指标
        evaluation_metrics = {'loss': avg_loss}
        
        # 准确率
        if 'accuracy' in metrics:
            acc = accuracy_score(y_true, y_pred)
            evaluation_metrics['accuracy'] = acc
        
        # 精确率、召回率、F1分数
        if any(m in metrics for m in ['precision', 'recall', 'f1']):
            precision, recall, f1, _ = precision_recall_fscore_support(
                y_true, y_pred, average='macro', zero_division=0
            )
            if 'precision' in metrics:
                evaluation_metrics['precision'] = precision
            if 'recall' in metrics:
                evaluation_metrics['recall'] = recall
            if 'f1' in metrics:
                evaluation_metrics['f1'] = f1
        
        # ROC AUC
        if 'roc_auc' in metrics and len(np.unique(y_true)) > 1:
            try:
                # 二分类情况
                if len(np.unique(y_true)) == 2:
                    auc_score = roc_auc_score(y_true, np.array(y_score)[:, 1])
                else:
                    # 多分类情况
                    y_true_bin = label_binarize(y_true, classes=np.unique(y_true))
                    auc_score = roc_auc_score(y_true_bin, np.array(y_score), multi_class='ovr')
                evaluation_metrics['roc_auc'] = auc_score
            except Exception as e:
                self.logger.warning(f"ROC AUC计算失败: {str(e)}")
        
        # 评估时间
        eval_time = time.time() - start_time
        evaluation_metrics['evaluation_time'] = eval_time
        evaluation_metrics['samples_per_second'] = total_samples / eval_time
        
        # 记录结果
        self.evaluation_results = evaluation_metrics
        
        # 打印结果
        self._log_evaluation_results(evaluation_metrics)
        
        # 保存结果
        if save_results and output_dir:
            self._save_evaluation_results(evaluation_metrics, output_dir)
            
            # 生成混淆矩阵
            if 'confusion_matrix' in metrics:
                self._generate_confusion_matrix(y_true, y_pred, output_dir)
            
            # 生成分类报告
            if 'classification_report' in metrics:
                self._generate_classification_report(y_true, y_pred, output_dir)
            
            # 生成ROC曲线
            if 'roc_curve' in metrics and len(np.unique(y_true)) <= 2:
                self._generate_roc_curve(y_true, np.array(y_score), output_dir)
        
        return evaluation_metrics
    
    def _log_evaluation_results(self, metrics: Dict[str, float]) -> None:
        """
        记录评估结果
        
        Args:
            metrics: 评估指标字典
        """
        self.logger.info("===== 评估结果 ====")
        for metric_name, metric_value in metrics.items():
            if metric_name in ['evaluation_time', 'samples_per_second']:
                self.logger.info(f"{metric_name}: {metric_value:.4f}")
            else:
                self.logger.info(f"{metric_name}: {metric_value:.4f}")
        self.logger.info("==================")
    
    def _save_evaluation_results(self, metrics: Dict[str, float], output_dir: str) -> None:
        """
        保存评估结果
        
        Args:
            metrics: 评估指标字典
            output_dir: 输出目录
        """
        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存为JSON
        results_path = os.path.join(output_dir, 'evaluation_results.json')
        with open(results_path, 'w', encoding='utf-8') as f:
            json.dump(metrics, f, ensure_ascii=False, indent=2)
        
        self.logger.info(f"评估结果保存到: {results_path}")
    
    def _generate_confusion_matrix(self, 
                                  y_true: List[int], 
                                  y_pred: List[int], 
                                  output_dir: str) -> None:
        """
        生成混淆矩阵
        
        Args:
            y_true: 真实标签
            y_pred: 预测标签
            output_dir: 输出目录
        """
        # 计算混淆矩阵
        cm = confusion_matrix(y_true, y_pred)
        
        # 创建DataFrame
        class_names = [str(i) for i in range(cm.shape[0])]
        df_cm = pd.DataFrame(cm, index=class_names, columns=class_names)
        
        # 绘制热力图
        plt.figure(figsize=(10, 8))
        sns.heatmap(df_cm, annot=True, fmt='d', cmap='Blues')
        plt.title('Confusion Matrix')
        plt.xlabel('Predicted Label')
        plt.ylabel('True Label')
        
        # 保存图像
        cm_path = os.path.join(output_dir, 'confusion_matrix.png')
        plt.savefig(cm_path, bbox_inches='tight')
        plt.close()
        
        self.logger.info(f"混淆矩阵保存到: {cm_path}")
        
        # 保存为CSV
        cm_csv_path = os.path.join(output_dir, 'confusion_matrix.csv')
        df_cm.to_csv(cm_csv_path)
        self.logger.info(f"混淆矩阵CSV保存到: {cm_csv_path}")
    
    def _generate_classification_report(self, 
                                       y_true: List[int], 
                                       y_pred: List[int], 
                                       output_dir: str) -> None:
        """
        生成分类报告
        
        Args:
            y_true: 真实标签
            y_pred: 预测标签
            output_dir: 输出目录
        """
        # 生成分类报告
        report = classification_report(y_true, y_pred, output_dict=True)
        
        # 保存为JSON
        report_path = os.path.join(output_dir, 'classification_report.json')
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
        
        # 转换为DataFrame
        df_report = pd.DataFrame(report).transpose()
        
        # 保存为CSV
        report_csv_path = os.path.join(output_dir, 'classification_report.csv')
        df_report.to_csv(report_csv_path)
        
        self.logger.info(f"分类报告保存到: {report_path}")
        self.logger.info(f"分类报告CSV保存到: {report_csv_path}")
    
    def _generate_roc_curve(self, 
                           y_true: List[int], 
                           y_score: np.ndarray,
                           output_dir: str) -> None:
        """
        生成ROC曲线
        
        Args:
            y_true: 真实标签
            y_score: 预测分数
            output_dir: 输出目录
        """
        # 计算ROC曲线
        fpr, tpr, _ = roc_curve(y_true, y_score[:, 1])
        roc_auc = auc(fpr, tpr)
        
        # 绘制ROC曲线
        plt.figure(figsize=(8, 8))
        plt.plot(fpr, tpr, color='blue', lw=2, label=f'ROC curve (area = {roc_auc:.3f})')
        plt.plot([0, 1], [0, 1], color='gray', lw=2, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver Operating Characteristic')
        plt.legend(loc="lower right")
        
        # 保存图像
        roc_path = os.path.join(output_dir, 'roc_curve.png')
        plt.savefig(roc_path, bbox_inches='tight')
        plt.close()
        
        self.logger.info(f"ROC曲线保存到: {roc_path}")
    
    def analyze_game_performance(self, 
                               game_data: List[Dict], 
                               output_dir: Optional[str] = None) -> Dict[str, float]:
        """
        分析游戏性能
        
        Args:
            game_data: 游戏数据列表
            output_dir: 输出目录
            
        Returns:
            游戏性能指标
        """
        self.logger.info("分析游戏性能...")
        
        # 初始化统计
        stats = {
            'total_games': len(game_data),
            'win_rate': 0,
            'avg_score': 0,
            'avg_rounds': 0,
            'avg_decisions_per_game': 0,
            'correct_decisions': 0,
            'total_decisions': 0,
            'decision_accuracy': 0,
            'strategy_metrics': defaultdict(int)
        }
        
        # 分析游戏数据
        wins = 0
        total_score = 0
        total_rounds = 0
        total_decisions = 0
        correct_decisions = 0
        
        for game in game_data:
            # 记录游戏结果
            if game.get('result', {}).get('winner') == 'ai':
                wins += 1
            
            total_score += game.get('result', {}).get('score', 0)
            total_rounds += game.get('rounds', 0)
            
            # 分析决策
            decisions = game.get('decisions', [])
            total_decisions += len(decisions)
            
            # 计算正确决策（这里简化处理，实际应该有专家标注）
            for decision in decisions:
                if decision.get('quality', 0) > 0.7:  # 假设质量分数大于0.7为正确
                    correct_decisions += 1
                
                # 策略统计
                action_type = decision.get('action_type', 'unknown')
                stats['strategy_metrics'][action_type] += 1
        
        # 计算指标
        stats['win_rate'] = wins / stats['total_games'] if stats['total_games'] > 0 else 0
        stats['avg_score'] = total_score / stats['total_games'] if stats['total_games'] > 0 else 0
        stats['avg_rounds'] = total_rounds / stats['total_games'] if stats['total_games'] > 0 else 0
        stats['avg_decisions_per_game'] = total_decisions / stats['total_games'] if stats['total_games'] > 0 else 0
        stats['correct_decisions'] = correct_decisions
        stats['total_decisions'] = total_decisions
        stats['decision_accuracy'] = correct_decisions / total_decisions if total_decisions > 0 else 0
        
        # 记录结果
        self.logger.info("===== 游戏性能分析 ====")
        self.logger.info(f"总游戏数: {stats['total_games']}")
        self.logger.info(f"胜率: {stats['win_rate']:.4f}")
        self.logger.info(f"平均得分: {stats['avg_score']:.4f}")
        self.logger.info(f"平均轮数: {stats['avg_rounds']:.4f}")
        self.logger.info(f"平均每局决策数: {stats['avg_decisions_per_game']:.4f}")
        self.logger.info(f"决策准确率: {stats['decision_accuracy']:.4f}")
        self.logger.info("策略统计:")
        for action, count in stats['strategy_metrics'].items():
            self.logger.info(f"  {action}: {count}")
        self.logger.info("====================")
        
        # 保存结果
        if output_dir:
            # 创建输出目录
            os.makedirs(output_dir, exist_ok=True)
            
            # 保存为JSON
            stats_path = os.path.join(output_dir, 'game_performance_stats.json')
            with open(stats_path, 'w', encoding='utf-8') as f:
                json.dump(stats, f, ensure_ascii=False, indent=2)
            
            self.logger.info(f"游戏性能统计保存到: {stats_path}")
            
            # 生成可视化
            self._visualize_game_performance(stats, output_dir)
        
        return stats
    
    def _visualize_game_performance(self, stats: Dict, output_dir: str) -> None:
        """
        可视化游戏性能
        
        Args:
            stats: 游戏性能统计
            output_dir: 输出目录
        """
        # 胜率图
        plt.figure(figsize=(6, 6))
        plt.bar(['Win', 'Loss'], [stats['win_rate'], 1 - stats['win_rate']], color=['green', 'red'])
        plt.title('Win Rate')
        plt.ylabel('Rate')
        plt.ylim(0, 1)
        plt.savefig(os.path.join(output_dir, 'win_rate.png'), bbox_inches='tight')
        plt.close()
        
        # 策略分布图
        plt.figure(figsize=(10, 6))
        actions = list(stats['strategy_metrics'].keys())
        counts = list(stats['strategy_metrics'].values())
        plt.bar(actions, counts)
        plt.title('Strategy Distribution')
        plt.xlabel('Action Type')
        plt.ylabel('Count')
        plt.xticks(rotation=45)
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, 'strategy_distribution.png'), bbox_inches='tight')
        plt.close()
    
    def compare_models(self, 
                      model_paths: List[str],
                      data_loader: torch.utils.data.DataLoader,
                      model_type: str = 'policy_network',
                      model_config: Optional[Dict] = None,
                      output_dir: Optional[str] = None) -> pd.DataFrame:
        """
        比较多个模型
        
        Args:
            model_paths: 模型路径列表
            data_loader: 数据加载器
            model_type: 模型类型
            model_config: 模型配置
            output_dir: 输出目录
            
        Returns:
            比较结果DataFrame
        """
        self.logger.info("比较模型性能...")
        
        # 比较结果
        comparison_results = []
        
        for model_path in model_paths:
            # 加载并评估模型
            try:
                # 创建临时评估器
                temp_evaluator = ModelEvaluator(
                    model_path=model_path,
                    model_type=model_type,
                    model_config=model_config,
                    device=self.device
                )
                
                # 评估
                metrics = temp_evaluator.evaluate(data_loader, save_results=False)
                
                # 添加模型信息
                metrics['model_path'] = model_path
                metrics['model_name'] = os.path.basename(model_path)
                
                comparison_results.append(metrics)
                
            except Exception as e:
                self.logger.error(f"模型 {model_path} 评估失败: {str(e)}")
        
        # 转换为DataFrame
        df_comparison = pd.DataFrame(comparison_results)
        
        if not df_comparison.empty:
            # 重新排序列
            columns_order = ['model_name', 'model_path', 'loss', 'accuracy', 'precision', 'recall', 'f1', 'roc_auc']
            existing_columns = [col for col in columns_order if col in df_comparison.columns]
            other_columns = [col for col in df_comparison.columns if col not in existing_columns]
            df_comparison = df_comparison[existing_columns + other_columns]
            
            # 打印结果
            self.logger.info("===== 模型比较结果 ====")
            self.logger.info(f"\n{df_comparison.to_string(index=False)}")
            
            # 保存结果
            if output_dir:
                # 创建输出目录
                os.makedirs(output_dir, exist_ok=True)
                
                # 保存为CSV
                comparison_path = os.path.join(output_dir, 'model_comparison.csv')
                df_comparison.to_csv(comparison_path, index=False)
                self.logger.info(f"模型比较结果保存到: {comparison_path}")
                
                # 生成可视化
                self._visualize_model_comparison(df_comparison, output_dir)
        
        return df_comparison
    
    def _visualize_model_comparison(self, df_comparison: pd.DataFrame, output_dir: str) -> None:
        """
        可视化模型比较结果
        
        Args:
            df_comparison: 比较结果DataFrame
            output_dir: 输出目录
        """
        # 损失比较图
        if 'loss' in df_comparison.columns:
            plt.figure(figsize=(10, 6))
            sns.barplot(x='model_name', y='loss', data=df_comparison)
            plt.title('Loss Comparison')
            plt.xlabel('Model')
            plt.ylabel('Loss')
            plt.xticks(rotation=45)
            plt.tight_layout()
            plt.savefig(os.path.join(output_dir, 'loss_comparison.png'), bbox_inches='tight')
            plt.close()
        
        # 准确率比较图
        if 'accuracy' in df_comparison.columns:
            plt.figure(figsize=(10, 6))
            sns.barplot(x='model_name', y='accuracy', data=df_comparison)
            plt.title('Accuracy Comparison')
            plt.xlabel('Model')
            plt.ylabel('Accuracy')
            plt.xticks(rotation=45)
            plt.tight_layout()
            plt.savefig(os.path.join(output_dir, 'accuracy_comparison.png'), bbox_inches='tight')
            plt.close()
        
        # 综合指标雷达图
        metrics_to_compare = ['accuracy', 'precision', 'recall', 'f1']
        available_metrics = [m for m in metrics_to_compare if m in df_comparison.columns]
        
        if len(available_metrics) >= 3:
            plt.figure(figsize=(10, 8))
            
            # 创建雷达图
            angles = np.linspace(0, 2*np.pi, len(available_metrics), endpoint=False).tolist()
            angles += angles[:1]  # 闭合雷达图
            
            for i, row in df_comparison.iterrows():
                values = [row[m] for m in available_metrics]
                values += values[:1]  # 闭合雷达图
                
                plt.polar(angles, values, label=row['model_name'], linewidth=2)
                plt.fill(angles, values, alpha=0.1)
            
            # 设置标签
            plt.xticks(angles[:-1], available_metrics)
            plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
            plt.title('Model Performance Comparison')
            
            plt.tight_layout()
            plt.savefig(os.path.join(output_dir, 'model_performance_radar.png'), bbox_inches='tight')
            plt.close()
    
    def batch_inference(self, 
                       features: Union[np.ndarray, torch.Tensor],
                       batch_size: int = 64,
                       return_probs: bool = False) -> Union[List[int], List[float]]:
        """
        批量推理
        
        Args:
            features: 输入特征
            batch_size: 批量大小
            return_probs: 是否返回概率
            
        Returns:
            预测结果
        """
        # 转换为Tensor
        if isinstance(features, np.ndarray):
            features = torch.tensor(features, dtype=torch.float32)
        
        # 移动到设备
        features = features.to(self.device)
        
        # 结果列表
        all_predictions = []
        all_probs = []
        
        # 批量处理
        with torch.no_grad():
            for i in range(0, len(features), batch_size):
                # 获取批次
                batch = features[i:i+batch_size]
                
                # 推理
                outputs = self.model(batch)
                
                # 获取预测
                _, predicted = torch.max(outputs, 1)
                all_predictions.extend(predicted.cpu().numpy())
                
                # 获取概率
                if return_probs:
                    probs = torch.nn.functional.softmax(outputs, dim=1)
                    all_probs.extend(probs.cpu().numpy())
        
        if return_probs:
            return all_probs
        else:
            return all_predictions


def evaluate_model(model_path: str,
                  data_path: str,
                  model_type: str = 'policy_network',
                  model_config: Optional[Dict] = None,
                  batch_size: int = 64,
                  output_dir: Optional[str] = None) -> Dict[str, float]:
    """
    评估模型的便捷函数
    
    Args:
        model_path: 模型路径
        data_path: 数据路径
        model_type: 模型类型
        model_config: 模型配置
        batch_size: 批量大小
        output_dir: 输出目录
        
    Returns:
        评估指标
    """
    # 创建评估器
    evaluator = ModelEvaluator(
        model_path=model_path,
        model_type=model_type,
        model_config=model_config
    )
    
    # 加载数据
    data_loader = load_data(
        data_path=data_path,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4
    )
    
    # 评估
    metrics = evaluator.evaluate(
        data_loader=data_loader,
        save_results=output_dir is not None,
        output_dir=output_dir
    )
    
    return metrics


def compare_multiple_models(model_paths: List[str],
                           data_path: str,
                           model_type: str = 'policy_network',
                           model_config: Optional[Dict] = None,
                           batch_size: int = 64,
                           output_dir: Optional[str] = None) -> pd.DataFrame:
    """
    比较多个模型的便捷函数
    
    Args:
        model_paths: 模型路径列表
        data_path: 数据路径
        model_type: 模型类型
        model_config: 模型配置
        batch_size: 批量大小
        output_dir: 输出目录
        
    Returns:
        比较结果DataFrame
    """
    # 创建评估器
    evaluator = ModelEvaluator(
        model_path=model_paths[0],  # 使用第一个模型初始化
        model_type=model_type,
        model_config=model_config
    )
    
    # 加载数据
    data_loader = load_data(
        data_path=data_path,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4
    )
    
    # 比较模型
    comparison_df = evaluator.compare_models(
        model_paths=model_paths,
        data_loader=data_loader,
        model_type=model_type,
        model_config=model_config,
        output_dir=output_dir
    )
    
    return comparison_df


def analyze_game_results(game_data_path: str,
                        output_dir: Optional[str] = None) -> Dict[str, float]:
    """
    分析游戏结果的便捷函数
    
    Args:
        game_data_path: 游戏数据路径
        output_dir: 输出目录
        
    Returns:
        游戏性能指标
    """
    # 加载游戏数据
    with open(game_data_path, 'r', encoding='utf-8') as f:
        game_data = json.load(f)
    
    # 创建评估器（任意模型路径，因为只分析游戏数据）
    evaluator = ModelEvaluator(model_path='dummy_path', model_type='policy_network')
    
    # 分析游戏性能
    stats = evaluator.analyze_game_performance(
        game_data=game_data,
        output_dir=output_dir
    )
    
    return stats


def main() -> None:
    """
    主函数（用于测试）
    """
    import argparse
    
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='模型评估工具')
    parser.add_argument('--model', type=str, help='模型路径')
    parser.add_argument('--data', type=str, help='数据路径')
    parser.add_argument('--model-type', type=str, default='policy_network', help='模型类型')
    parser.add_argument('--batch-size', type=int, default=64, help='批量大小')
    parser.add_argument('--output', type=str, help='输出目录')
    parser.add_argument('--compare', nargs='+', help='比较多个模型')
    parser.add_argument('--analyze-games', type=str, help='分析游戏数据')
    
    args = parser.parse_args()
    
    # 根据命令执行不同功能
    if args.compare:
        # 比较多个模型
        compare_multiple_models(
            model_paths=args.compare,
            data_path=args.data,
            model_type=args.model_type,
            batch_size=args.batch_size,
            output_dir=args.output
        )
    elif args.analyze_games:
        # 分析游戏数据
        analyze_game_results(
            game_data_path=args.analyze_games,
            output_dir=args.output
        )
    else:
        # 评估单个模型
        evaluate_model(
            model_path=args.model,
            data_path=args.data,
            model_type=args.model_type,
            batch_size=args.batch_size,
            output_dir=args.output
        )


if __name__ == '__main__':
    main()