"""
专用于模型评估的脚本。
基于train.py的架构，但专门用于对所有保存的checkpoint进行评估，而不进行训练。
增强版本：支持单个checkpoint异常时继续评估其他checkpoint
"""

# 解决 Hugging Face tokenizers 的 fork 警告
import os
import glob
import re
import traceback
import logging
import time
from datetime import datetime
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:32,'
os.environ["GRPC_DEFAULT_AUTHORITY"] = "localhost"

from ragen.trainer.agent_trainer import RayAgentTrainer
from train import DummyRewardManager, get_custom_reward_fn

import ray
import hydra
import torch
import numpy as np
from ragen.utils import register_resolvers
register_resolvers()


# 配置日志记录
def setup_logging():
    """设置日志记录配置"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(f'evaluation_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'),
            logging.StreamHandler()
        ]
    )
    return logging.getLogger(__name__)


def find_all_checkpoint_dirs(checkpoint_root_dir, start_from_step=None, end_at_step=None, step_interval=1):
    """
    扫描checkpoint根目录，找到所有的global_step_*目录。
    
    Args:
        checkpoint_root_dir: checkpoint根目录路径
        start_from_step: 起始步数，None表示从最小步数开始
        end_at_step: 结束步数，None表示到最大步数结束
        step_interval: 步数间隔，1表示每个checkpoint都要，10表示每隔10步取一个
        
    Returns:
        list: 符合条件的checkpoint目录的完整路径，按步数排序
    """
    # 使用glob模式匹配所有global_step_*目录
    pattern = os.path.join(checkpoint_root_dir, "global_step_*")
    checkpoint_dirs = glob.glob(pattern)
    
    # 过滤出确实是目录的路径
    checkpoint_dirs = [d for d in checkpoint_dirs if os.path.isdir(d)]
    
    # 提取步数并排序
    def extract_step_number(path):
        # 从路径中提取步数，例如从"/path/to/global_step_240"中提取240
        basename = os.path.basename(path)
        match = re.search(r'global_step_(\d+)', basename)
        return int(match.group(1)) if match else 0
    
    # 按步数排序
    checkpoint_dirs.sort(key=extract_step_number)
    
    # 根据配置过滤checkpoint
    filtered_dirs = []
    for checkpoint_dir in checkpoint_dirs:
        step_num = extract_step_number(checkpoint_dir)
        
        # 检查起始步数
        if start_from_step is not None and step_num < start_from_step:
            continue
            
        # 检查结束步数
        if end_at_step is not None and step_num > end_at_step:
            continue
            
        # 检查步数间隔
        if start_from_step is not None:
            if (step_num - start_from_step) % step_interval != 0:
                continue
        else:
            # 如果没有设置起始步数，从第一个checkpoint开始按间隔取
            if len(filtered_dirs) == 0:
                # 第一个checkpoint总是要的
                pass
            else:
                first_step = extract_step_number(filtered_dirs[0])
                if (step_num - first_step) % step_interval != 0:
                    continue
        
        filtered_dirs.append(checkpoint_dir)
    
    return filtered_dirs


class RayAgentEvaluator(RayAgentTrainer):
    """
    继承自RayAgentTrainer的评估器。
    专门用于模型评估，支持加载指定的checkpoint。
    """
    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 初始化logger用于记录评估结果
        self.logger = None
        # 添加系统日志器
        self.sys_logger = logging.getLogger(__name__)
    
    def init_logger(self):
        """初始化跟踪器logger，用于记录评估结果到swanlab等系统"""
        from omegaconf import OmegaConf
        from verl.utils.tracking import Tracking
        
        if self.logger is None:
            try:
                self.logger = Tracking(
                    project_name=self.config.trainer.project_name,  # 添加-eval后缀区分评估项目
                    experiment_name=self.config.trainer.experiment_name + "-evaluation",
                    default_backend=self.config.trainer.logger,
                    config=OmegaConf.to_container(self.config, resolve=True),
                )
                self.sys_logger.info(f"✓ Logger初始化完成，项目名: {self.config.trainer.project_name}-eval")
            except Exception as e:
                self.sys_logger.error(f"Logger初始化失败: {e}")
                # 即使logger初始化失败，也继续评估
                self.logger = None
        
    def load_specific_checkpoint(self, checkpoint_path):
        """
        加载指定路径的checkpoint。
        
        Args:
            checkpoint_path: 完整的checkpoint目录路径，例如"/path/to/global_step_240"
        """
        if not os.path.exists(checkpoint_path):
            raise FileNotFoundError(f"Checkpoint路径不存在: {checkpoint_path}")
        
        self.sys_logger.info(f"正在加载checkpoint: {checkpoint_path}")
        
        # 从路径中提取全局步数
        basename = os.path.basename(checkpoint_path)
        match = re.search(r'global_step_(\d+)', basename)
        if not match:
            raise ValueError(f"无法从路径中提取步数: {checkpoint_path}")
        
        self.global_steps = int(match.group(1))
        self.sys_logger.info(f"设置全局步数为: {self.global_steps}")
        
        # 加载actor checkpoint
        actor_path = os.path.join(checkpoint_path, "actor")
        if not os.path.exists(actor_path):
            raise FileNotFoundError(f"Actor checkpoint不存在: {actor_path}")
        
        self.actor_rollout_wg.load_checkpoint(
            actor_path, 
            del_local_after_load=self.config.trainer.get("del_local_ckpt_after_load", False)
        )
        
        # 如果使用critic，也要加载critic checkpoint  
        if self.use_critic:
            critic_path = os.path.join(checkpoint_path, "critic")
            if os.path.exists(critic_path):
                self.critic_wg.load_checkpoint(
                    critic_path, 
                    del_local_after_load=self.config.trainer.get("del_local_ckpt_after_load", False)
                )
            else:
                self.sys_logger.warning(f"Critic checkpoint不存在: {critic_path}")
        
        self.sys_logger.info(f"成功加载checkpoint: {checkpoint_path}")
    
    def safe_evaluate_checkpoint(self, checkpoint_path, max_retries=20, retry_delay=30):
        """
        安全地评估指定的checkpoint，包含重试机制和全面的异常处理。
        
        Args:
            checkpoint_path: checkpoint目录路径
            max_retries: 最大重试次数
            retry_delay: 重试间隔（秒）
            
        Returns:
            dict: 评估结果指标，失败时包含错误信息
        """
        checkpoint_name = os.path.basename(checkpoint_path)
        
        self.sys_logger.info(f"\n{'='*60}")
        self.sys_logger.info(f"开始评估 Checkpoint: {checkpoint_name}")
        self.sys_logger.info(f"{'='*60}")
        
        # 记录开始时间
        start_time = time.time()
        
        for attempt in range(max_retries + 1):
            try:
                if attempt > 0:
                    self.sys_logger.info(f"第 {attempt + 1} 次尝试评估 {checkpoint_name}")
                    time.sleep(retry_delay)  # 等待一段时间再重试
                
                # 尝试清理GPU内存
                if torch.cuda.is_available():
                    torch.cuda.empty_cache()
                
                # 加载指定的checkpoint
                self.load_specific_checkpoint(checkpoint_path)
                
                # 确保logger已初始化
                if self.logger is None:
                    self.init_logger()
                
                # 执行验证
                if self.val_reward_fn is not None:
                    val_metrics = self._validate()
                    
                    # 计算评估耗时
                    eval_duration = time.time() - start_time
                    
                    self.sys_logger.info(f"评估完成，耗时: {eval_duration:.2f}秒，指标: {val_metrics}")
                    
                    # 将评估结果记录到logger中
                    # 添加checkpoint信息到指标中
                    eval_metrics = {
                        **val_metrics,
                        "checkpoint_step": self.global_steps,
                        "checkpoint_name": checkpoint_name,
                        "eval_duration_seconds": eval_duration,
                        "eval_status": "success"
                    }
                    
                    # 记录到logger (swanlab等)
                    if self.logger is not None:
                        try:
                            self.logger.log(data=eval_metrics, step=self.global_steps)
                            self.sys_logger.info(f"✓ 评估结果已记录到日志系统，步数: {self.global_steps}")
                        except Exception as log_e:
                            self.sys_logger.error(f"记录日志失败: {log_e}")
                    
                    return eval_metrics
                else:
                    self.sys_logger.warning("没有设置验证奖励函数，跳过评估")
                    return {
                        "checkpoint_step": self.global_steps,
                        "checkpoint_name": checkpoint_name,
                        "eval_status": "skipped",
                        "message": "no_validation_reward_function"
                    }
                    
            except Exception as e:
                error_msg = str(e)
                error_traceback = traceback.format_exc()
                
                self.sys_logger.error(f"评估 {checkpoint_name} 失败 (尝试 {attempt + 1}/{max_retries + 1}): {error_msg}")
                self.sys_logger.error(f"错误堆栈: {error_traceback}")
                
                # 如果这是最后一次尝试，返回错误结果
                if attempt == max_retries:
                    eval_duration = time.time() - start_time
                    
                    error_result = {
                        "checkpoint_step": getattr(self, 'global_steps', 0),
                        "checkpoint_name": checkpoint_name,
                        "eval_status": "failed",
                        "error": error_msg,
                        "error_traceback": error_traceback,
                        "eval_duration_seconds": eval_duration,
                        "total_attempts": max_retries + 1
                    }
                    
                    # 即使失败也尝试记录到日志中
                    if self.logger is not None:
                        try:
                            self.logger.log(data=error_result, step=getattr(self, 'global_steps', 0))
                        except Exception as log_e:
                            self.sys_logger.error(f"记录失败日志失败: {log_e}")
                    
                    return error_result
                
                # 如果不是最后一次尝试，记录错误但继续重试
                self.sys_logger.warning(f"将在 {retry_delay} 秒后重试...")
                
                # 尝试清理资源
                try:
                    if torch.cuda.is_available():
                        torch.cuda.empty_cache()
                except Exception as cleanup_e:
                    self.sys_logger.error(f"清理GPU内存失败: {cleanup_e}")
    
    def evaluate_checkpoint(self, checkpoint_path):
        """
        向后兼容的评估方法，现在使用安全评估
        """
        return self.safe_evaluate_checkpoint(checkpoint_path)


def add_dependency_and_validate_config(config):
    """从train.py复制的配置验证函数"""
    # 验证配置
    assert config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node <= config.actor_rollout_ref.actor.ppo_mini_batch_size, \
        f"micro_batch_size_per_gpu * n_gpus_per_node ({config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node}) must be less than or equal to ppo_mini_batch_size ({config.actor_rollout_ref.actor.ppo_mini_batch_size})"
    assert config.actor_rollout_ref.actor.ppo_mini_batch_size % (config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node) == 0, \
        f"ppo_mini_batch_size ({config.actor_rollout_ref.actor.ppo_mini_batch_size}) must be divisible by micro_batch_size_per_gpu * n_gpus_per_node ({config.micro_batch_size_per_gpu * config.trainer.n_gpus_per_node})"
    assert "qwen" in config.model_path.lower() or (not config.enable_response_mask), \
        "response mask is currently only supported for qwen models"
    assert len(str(config.system.CUDA_VISIBLE_DEVICES).split(',')) == config.trainer.n_gpus_per_node, \
        f"CUDA_VISIBLE_DEVICES ({config.system.CUDA_VISIBLE_DEVICES}) must have the same number of GPUs as n_gpus_per_node ({config.trainer.n_gpus_per_node})"
    
    # 验证 mid_turn_ratio 在 [0, 1) 范围内
    ratio = getattr(config.agent_proxy, 'mid_turn_ratio', None)
    assert ratio is not None, "agent_proxy.mid_turn_ratio 必须设置，取值范围为 [0, 1)；0 表示从第一回合开始引入可选的 print_result 提示，不能等于 1"
    try:
        ratio = float(ratio)
    except Exception:
        raise AssertionError(f"agent_proxy.mid_turn_ratio 必须是数值类型，当前值为: {ratio}")
    assert 0 <= ratio < 1, f"agent_proxy.mid_turn_ratio 必须位于 [0, 1) 区间，当前值为: {ratio}"

    # 添加依赖配置
    config.data.train_batch_size = config.es_manager.train.env_groups * config.es_manager.train.group_size

    return config


class EvaluationTaskRunner:
    """评估任务运行器 - 增强版本，支持健壮的错误处理"""
    
    def __init__(self):
        self.sys_logger = logging.getLogger(__name__)
    
    def run(self, config, checkpoint_root_dir=None, specific_checkpoint=None):
        """
        运行评估任务
        
        Args:
            config: 配置对象
            checkpoint_root_dir: checkpoint根目录，如果为None则使用config中的default_local_dir
            specific_checkpoint: 指定要评估的checkpoint路径，如果为None则评估所有checkpoint
        """
        from verl.utils.fs import copy_to_local
        from pprint import pprint

        try:
            # 下载模型checkpoint到本地
            self.sys_logger.info("正在下载模型到本地...")
            local_path = copy_to_local(config.actor_rollout_ref.model.path)
            self.sys_logger.info(f"模型下载完成: {local_path}")

            # 实例化tokenizer和processor
            from verl.utils import hf_tokenizer, hf_processor
            self.sys_logger.info("正在初始化tokenizer和processor...")
            tokenizer = hf_tokenizer(local_path)
            processor = hf_processor(local_path, use_fast=True)
            self.sys_logger.info("✓ Tokenizer和processor初始化完成")

            # 定义worker类
            if config.actor_rollout_ref.actor.strategy == 'fsdp':
                assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
                from ragen.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker
                from verl.single_controller.ray import RayWorkerGroup
                ray_worker_group_cls = RayWorkerGroup
            else:
                raise NotImplementedError(f"不支持的strategy: {config.actor_rollout_ref.actor.strategy}")

            from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role

            # 配置角色到worker的映射
            role_worker_mapping = {
                Role.ActorRollout: ray.remote(ActorRolloutRefWorker),
                Role.Critic: ray.remote(CriticWorker),
            }
            if config.actor_rollout_ref.actor.use_ref:
                self.sys_logger.info("[DEBUG] 使用参考策略")
                role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker)
            else:
                self.sys_logger.info("[DEBUG] 不使用参考策略，设置use_kl_loss为False")
                config.actor_rollout_ref.actor.use_kl_loss = False

            # 资源池配置
            global_pool_id = 'global_pool'
            resource_pool_spec = {
                global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
            }

            mapping = {
                Role.ActorRollout: global_pool_id,
                Role.Critic: global_pool_id,
            }
            if config.actor_rollout_ref.actor.use_ref:
                mapping[Role.RefPolicy] = global_pool_id

            # 如果启用了奖励模型
            if config.reward_model.enable:
                if config.reward_model.strategy == 'fsdp':
                    from ragen.workers.fsdp_workers import RewardModelWorker
                elif config.reward_model.strategy == 'megatron':
                    from verl.workers.megatron_workers import RewardModelWorker
                else:
                    raise NotImplementedError(f"不支持的reward_model strategy: {config.reward_model.strategy}")
                role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker)
                mapping[Role.RewardModel] = global_pool_id

            # 创建奖励管理器
            self.sys_logger.info("正在创建奖励管理器...")
            reward_manager_cls = DummyRewardManager
            compute_score = get_custom_reward_fn(config)
            reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=0, compute_score=compute_score)
            val_reward_fn = reward_manager_cls(tokenizer=tokenizer, num_examine=1, compute_score=compute_score)
            self.sys_logger.info("✓ 奖励管理器创建完成")

            resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)

            # 创建评估器
            self.sys_logger.info("正在创建评估器...")
            evaluator = RayAgentEvaluator(
                config=config,
                tokenizer=tokenizer,
                processor=processor,
                role_worker_mapping=role_worker_mapping,
                resource_pool_manager=resource_pool_manager,
                ray_worker_group_cls=ray_worker_group_cls,
                reward_fn=reward_fn,
                val_reward_fn=val_reward_fn
            )
            
            # 初始化workers、代理和logger
            self.sys_logger.info("正在初始化workers...")
            evaluator.init_workers()
            self.sys_logger.info("正在初始化agent proxy...")
            evaluator.init_agent_proxy()
            self.sys_logger.info("正在初始化logger...")
            evaluator.init_logger()
            self.sys_logger.info("✓ 评估器初始化完成")

        except Exception as e:
            self.sys_logger.error(f"评估器初始化失败: {e}")
            self.sys_logger.error(f"错误堆栈: {traceback.format_exc()}")
            raise

        # 确定checkpoint根目录
        if checkpoint_root_dir is None:
            checkpoint_root_dir = config.trainer.default_local_dir
            if not os.path.isabs(checkpoint_root_dir):
                working_dir = os.getcwd()
                checkpoint_root_dir = os.path.join(working_dir, checkpoint_root_dir)

        # 如果指定了特定checkpoint，只评估该checkpoint
        if specific_checkpoint:
            if not os.path.isabs(specific_checkpoint):
                specific_checkpoint = os.path.join(checkpoint_root_dir, specific_checkpoint)
            
            self.sys_logger.info(f"评估指定的checkpoint: {specific_checkpoint}")
            result = evaluator.safe_evaluate_checkpoint(specific_checkpoint)
            self.sys_logger.info(f"评估结果: {result}")
            return [result]

        # 否则评估所有checkpoints，根据配置过滤
        # 从配置中读取评估参数
        eval_config = config.trainer.get("evaluation", {})
        start_from_step = eval_config.get("start_from_step", None)
        end_at_step = eval_config.get("end_at_step", None)
        step_interval = eval_config.get("step_interval", 1)
        
        # 处理配置值：0和null都视为None
        if start_from_step == 0:
            start_from_step = None
        if end_at_step == 0:
            end_at_step = None
            
        self.sys_logger.info(f"评估配置:")
        self.sys_logger.info(f"  起始步数: {start_from_step if start_from_step is not None else '从最小开始'}")
        self.sys_logger.info(f"  结束步数: {end_at_step if end_at_step is not None else '到最大结束'}")
        self.sys_logger.info(f"  步数间隔: {step_interval}")
        
        try:
            checkpoint_dirs = find_all_checkpoint_dirs(
                checkpoint_root_dir, 
                start_from_step=start_from_step, 
                end_at_step=end_at_step, 
                step_interval=step_interval
            )
        except Exception as e:
            self.sys_logger.error(f"查找checkpoint失败: {e}")
            return []
        
        if not checkpoint_dirs:
            self.sys_logger.warning(f"在目录 {checkpoint_root_dir} 中没有找到符合条件的checkpoint")
            self.sys_logger.warning(f"请检查：起始步数={start_from_step}, 结束步数={end_at_step}, 步数间隔={step_interval}")
            return []

        self.sys_logger.info(f"找到 {len(checkpoint_dirs)} 个符合条件的checkpoint，将按顺序评估:")
        for i, checkpoint_dir in enumerate(checkpoint_dirs):
            self.sys_logger.info(f"  {i+1}. {os.path.basename(checkpoint_dir)}")

        # 存储所有评估结果
        all_results = {}
        total_start_time = time.time()
        
        # 逐个评估每个checkpoint - 关键改进：使用健壮的异常处理
        for i, checkpoint_dir in enumerate(checkpoint_dirs):
            checkpoint_name = os.path.basename(checkpoint_dir)
            
            self.sys_logger.info(f"\n进度: [{i+1}/{len(checkpoint_dirs)}] 正在处理 {checkpoint_name}")
            
            # 使用安全的评估方法，确保单个checkpoint失败不影响其他checkpoint
            try:
                result = evaluator.safe_evaluate_checkpoint(checkpoint_dir)
                all_results[checkpoint_name] = result
                
                if result.get("eval_status") == "success":
                    self.sys_logger.info(f"✓ {checkpoint_name} 评估成功")
                elif result.get("eval_status") == "failed":
                    self.sys_logger.error(f"✗ {checkpoint_name} 评估失败: {result.get('error', 'Unknown error')}")
                else:
                    self.sys_logger.warning(f"⚠ {checkpoint_name} 评估跳过: {result.get('message', 'Unknown reason')}")
                
            except Exception as e:
                # 这是最外层的异常捕获，理论上safe_evaluate_checkpoint应该已经处理了所有异常
                # 但为了保险起见，我们仍然捕获任何可能的异常
                error_msg = str(e)
                error_traceback = traceback.format_exc()
                
                self.sys_logger.error(f"✗ {checkpoint_name} 发生未预期的异常: {error_msg}")
                self.sys_logger.error(f"错误堆栈: {error_traceback}")
                
                # 记录异常结果
                all_results[checkpoint_name] = {
                    "checkpoint_name": checkpoint_name,
                    "eval_status": "unexpected_error",
                    "error": error_msg,
                    "error_traceback": error_traceback
                }
        
        # 计算总评估时间
        total_duration = time.time() - total_start_time
        
        # 打印所有评估结果的汇总
        self.sys_logger.info(f"\n{'='*80}")
        self.sys_logger.info("所有Checkpoint评估结果汇总:")
        self.sys_logger.info(f"总评估时间: {total_duration:.2f}秒")
        self.sys_logger.info(f"{'='*80}")
        
        # 计算汇总统计
        successful_evals = []
        failed_evals = []
        skipped_evals = []
        
        for checkpoint_name, result in all_results.items():
            self.sys_logger.info(f"\n{checkpoint_name}:")
            status = result.get("eval_status", "unknown")
            
            if status == "success":
                successful_evals.append((checkpoint_name, result))
                # 打印关键指标
                for key, value in result.items():
                    if isinstance(value, (int, float)) and key not in ["checkpoint_step", "eval_duration_seconds"]:
                        self.sys_logger.info(f"  {key}: {value:.4f}")
                    elif key in ["eval_duration_seconds"]:
                        self.sys_logger.info(f"  {key}: {value:.2f}")
                self.sys_logger.info(f"  状态: ✓ 成功")
                
            elif status in ["failed", "unexpected_error"]:
                failed_evals.append(checkpoint_name)
                self.sys_logger.info(f"  状态: ✗ 失败 - {result.get('error', 'Unknown error')}")
                
            elif status == "skipped":
                skipped_evals.append(checkpoint_name)
                self.sys_logger.info(f"  状态: ⚠ 跳过 - {result.get('message', 'Unknown reason')}")
                
            else:
                self.sys_logger.info(f"  状态: ? 未知状态 - {status}")
        
        # 记录汇总统计到logger
        try:
            if evaluator.logger is not None:
                summary_metrics = {
                    "eval_summary/total_checkpoints": len(all_results),
                    "eval_summary/successful_evaluations": len(successful_evals),
                    "eval_summary/failed_evaluations": len(failed_evals),
                    "eval_summary/skipped_evaluations": len(skipped_evals),
                    "eval_summary/success_rate": len(successful_evals) / len(all_results) if all_results else 0,
                    "eval_summary/total_duration_seconds": total_duration,
                }
                
                # 如果有成功的评估，计算平均指标
                if successful_evals:
                    # 收集所有成功评估的数值指标
                    all_numeric_metrics = {}
                    for _, result in successful_evals:
                        for key, value in result.items():
                            if isinstance(value, (int, float)) and not key.startswith("checkpoint_") and key not in ["eval_duration_seconds"]:
                                if key not in all_numeric_metrics:
                                    all_numeric_metrics[key] = []
                                all_numeric_metrics[key].append(value)
                    
                    # 计算平均值
                    for key, values in all_numeric_metrics.items():
                        if values:
                            avg_value = sum(values) / len(values)
                            summary_metrics[f"eval_summary/avg_{key}"] = avg_value
                            summary_metrics[f"eval_summary/max_{key}"] = max(values)
                            summary_metrics[f"eval_summary/min_{key}"] = min(values)
                
                # 使用最后一个checkpoint的步数作为summary的步数
                summary_step = max([int(name.split('_')[-1]) for name in all_results.keys() if 'global_step_' in name], default=0)
                evaluator.logger.log(data=summary_metrics, step=summary_step)
                self.sys_logger.info(f"\n✓ 汇总结果已记录到日志系统")
                
        except Exception as e:
            self.sys_logger.error(f"记录汇总结果失败: {e}")
        
        # 打印最终统计
        self.sys_logger.info(f"\n{'='*80}")
        self.sys_logger.info("最终统计:")
        self.sys_logger.info(f"  总计: {len(all_results)} 个checkpoint")
        self.sys_logger.info(f"  成功: {len(successful_evals)} 个")
        self.sys_logger.info(f"  失败: {len(failed_evals)} 个")
        self.sys_logger.info(f"  跳过: {len(skipped_evals)} 个")
        self.sys_logger.info(f"  成功率: {len(successful_evals) / len(all_results) * 100:.1f}%" if all_results else "N/A")
        self.sys_logger.info(f"  总耗时: {total_duration:.2f}秒 ({total_duration/60:.1f}分钟)")
        if successful_evals:
            avg_time_per_eval = total_duration / len(all_results)
            self.sys_logger.info(f"  平均每个checkpoint耗时: {avg_time_per_eval:.2f}秒")
        self.sys_logger.info(f"{'='*80}")
        
        return all_results


def run_evaluation(config, checkpoint_root_dir=None, specific_checkpoint=None):
    """运行评估的主要函数 - 增强版本，支持全面的错误处理"""
    sys_logger = logging.getLogger(__name__)
    
    # 设置环境变量
    os.environ["CUDA_VISIBLE_DEVICES"] = str(config.system.CUDA_VISIBLE_DEVICES)
    os.environ["ENSURE_CUDA_VISIBLE_DEVICES"] = os.environ.get('CUDA_VISIBLE_DEVICES', '')

    sys_logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ['CUDA_VISIBLE_DEVICES']}")

    # 确保Ray正确初始化
    if ray.is_initialized():
        sys_logger.info("关闭现有的Ray实例...")
        ray.shutdown()

    try:
        sys_logger.info("初始化Ray...")
        ray.init(
            runtime_env={
                'env_vars': {
                    'TOKENIZERS_PARALLELISM': 'true',
                    'NCCL_DEBUG': 'WARN',
                    'VLLM_LOGGING_LEVEL': 'WARN',
                    "RAY_DEBUG": "legacy",
                    "CUDA_VISIBLE_DEVICES": str(config.system.CUDA_VISIBLE_DEVICES)
                }
            },
            ignore_reinit_error=True,
            configure_logging=True,
            logging_level='warning'
        )
        sys_logger.info("✓ Ray初始化完成")

        runner = EvaluationTaskRunner()
        results = runner.run(config, checkpoint_root_dir, specific_checkpoint)
        
        sys_logger.info("✓ 所有评估任务完成")
        return results

    except KeyboardInterrupt:
        sys_logger.warning("用户中断了评估过程")
        raise
    except Exception as e:
        sys_logger.error(f"评估过程中发生严重错误: {e}")
        sys_logger.error(f"错误堆栈: {traceback.format_exc()}")
        raise
    finally:
        # 确保正确关闭Ray
        if ray.is_initialized():
            sys_logger.info("正在关闭Ray...")
            try:
                ray.shutdown()
                sys_logger.info("✓ Ray已关闭")
            except Exception as e:
                sys_logger.error(f"关闭Ray时出错: {e}")


@hydra.main(version_base=None, config_path="config", config_name="base")
def main(config, checkpoint_root_dir=None, specific_checkpoint=None):
    """
    主函数 - 增强版本，支持完整的日志记录
    
    Args:
        config: Hydra配置
        checkpoint_root_dir: checkpoint根目录（可选）
        specific_checkpoint: 指定要评估的checkpoint（可选）
    """
    # 设置日志记录
    sys_logger = setup_logging()
    
    sys_logger.info("="*80)
    sys_logger.info("开始模型评估任务")
    sys_logger.info("="*80)
    
    try:
        # 验证和处理配置
        sys_logger.info("正在验证配置...")
        config = add_dependency_and_validate_config(config)
        sys_logger.info("✓ 配置验证完成")
        
        # 运行评估
        results = run_evaluation(config, checkpoint_root_dir, specific_checkpoint)
        
        sys_logger.info("="*80)
        sys_logger.info("评估任务全部完成！")
        sys_logger.info(f"共处理了 {len(results)} 个checkpoint")
        
        # 统计最终结果
        if isinstance(results, dict):
            success_count = sum(1 for r in results.values() if r.get("eval_status") == "success")
            failed_count = sum(1 for r in results.values() if r.get("eval_status") in ["failed", "unexpected_error"])
            skipped_count = sum(1 for r in results.values() if r.get("eval_status") == "skipped")
            
            sys_logger.info(f"成功: {success_count}, 失败: {failed_count}, 跳过: {skipped_count}")
        
        sys_logger.info("="*80)
        
        return results
        
    except KeyboardInterrupt:
        sys_logger.warning("用户手动终止了程序")
        sys_logger.info("程序正在安全退出...")
        return None
        
    except Exception as e:
        sys_logger.error(f"主程序发生致命错误: {e}")
        sys_logger.error(f"错误堆栈: {traceback.format_exc()}")
        sys_logger.error("="*80)
        sys_logger.error("程序异常终止")
        sys_logger.error("="*80)
        raise


if __name__ == '__main__':
    main()