"""
This is the environment state manager for the LLM agent.
author: Pingyue Zhang
date: 2025-03-30
"""

from dataclasses import dataclass, field
import json
from typing import Dict, List, Optional, Any, Union
import PIL.Image
import hydra
import random
import numpy as np

from ragen.env import REGISTERED_ENVS, REGISTERED_ENV_CONFIGS
from ragen.utils import register_resolvers

register_resolvers()

import multiprocessing
from functools import partial

use_parallel = True


@dataclass
class EnvStatus:
    """环境状态
    
    【优化】添加历史长度限制，防止长期训练内存无限增长
    """

    truncated: bool = False  # 完成但未成功
    terminated: bool = False  # 完成且成功
    num_actions: int = 0  # 当前动作步数
    process_rewards: List[float] = field(default_factory=list)  # 每回合过程奖励
    result_rewards: List[float] = field(default_factory=list)  # 每回合结果奖励
    seed: Optional[int] = None  # 重置环境使用的种子
    
    # 【优化】历史长度限制（防止内存无限增长）
    MAX_HISTORY_LENGTH: int = 200  # 最多保留200步历史
    
    def add_rewards(self, process_reward: float, result_reward: float):
        """添加奖励并自动管理历史长度
        
        【优化】超出限制时自动清理最旧的历史
        """
        self.process_rewards.append(process_reward)
        self.result_rewards.append(result_reward)
        
        # 超出限制时，只保留最新的历史
        if len(self.process_rewards) > self.MAX_HISTORY_LENGTH:
            self.process_rewards = self.process_rewards[-self.MAX_HISTORY_LENGTH:]
            self.result_rewards = self.result_rewards[-self.MAX_HISTORY_LENGTH:]


class EnvStateManager:
    """环境状态管理器 - 负责管理多个（多种）环境"""

    def __init__(self, config, mode: str = "train"):
        self.sys_config = config
        self.mode = mode
        self.config = getattr(self.sys_config.es_manager, mode)
        self.env_groups = int(self.config.env_groups)
        self.group_size = self.config.group_size
        self.base_seed = self._get_base_seed()
        self.seed_counter = 0
        self.n_cpu = multiprocessing.cpu_count()
        self._init_envs()
        self.rollout_cache = None

    def _get_base_seed(self) -> Optional[int]:
        """获取基础种子"""
        seed_cfg = getattr(self.sys_config, "seed", None)
        return seed_cfg.get(self.mode, None) if seed_cfg else None

    def _validate_env_config(self):
        """验证环境配置"""
        env_configs = self.config.env_configs
        assert (
            sum(env_configs.n_groups) == self.env_groups
        ), f"n_groups总和必须等于env_groups。得到 sum({env_configs.n_groups}) != {self.env_groups}"
        assert len(env_configs.tags) == len(
            env_configs.n_groups
        ), f"tags数量必须等于n_groups数量。得到 {len(env_configs.tags)} != {len(env_configs.n_groups)}"

    def _init_envs(self):
        """初始化环境"""
        self._validate_env_config()

        global use_parallel
        if use_parallel:
            self._init_envs_parallel()
        else:
            self.envs = self._init_env_instances()

    def _init_envs_parallel(self):
        """并行初始化环境"""
        import time
        init_start_time = time.time()
        
        env_configs = self.config.env_configs
        done_groups = 0
        self.envs = []  # 初始化为空列表
        
        # 优化：使用更多进程以提高初始化速度
        # 根据环境数量动态调整进程数：小任务用少进程，大任务用多进程
        total_envs = self.env_groups * self.group_size
        if total_envs <= 16:
            num_processes = min(self.n_cpu // 2, total_envs)
        elif total_envs <= 64:
            num_processes = min(self.n_cpu, total_envs)
        else:
            num_processes = self.n_cpu
        
        print(f"[ESManager] 开始并行初始化 {total_envs} 个环境，使用 {num_processes} 个进程...")
        
        for tag, n_group in zip(env_configs.tags, env_configs.n_groups):
            func = partial(
                self._init_one_env,
                sys_config=self.sys_config,
                group_size=self.group_size,
                tag=tag,
            )
            batch_size = n_group * self.group_size
            actual_processes = min(num_processes, batch_size)
            
            with multiprocessing.Pool(actual_processes) as pool:
                env_batch = pool.map(
                    func,
                    range(
                        done_groups * self.group_size,
                        (done_groups + n_group) * self.group_size,
                    ),
                )
                self.envs.extend(env_batch)  # 追加到列表中
            done_groups += n_group
        
        total_init_time = time.time() - init_start_time
        print(f"[ESManager] 环境初始化完成！总耗时: {total_init_time:.2f}s，平均每个环境: {total_init_time/total_envs:.3f}s")

    def _create_single_env(self, env_id: int, tag: str) -> Dict:
        """创建单个环境实例"""
        cfg_template = self.sys_config.custom_envs[tag]
        env_config = self._create_env_config(
            cfg_template, cfg_template.env_type, self.sys_config
        )
        env_obj = REGISTERED_ENVS[cfg_template.env_type](env_config)
        return {
            "tag": tag,
            "group_id": env_id // self.group_size,
            "env_id": env_id,
            "env": env_obj,
            "config": env_config,
            "status": EnvStatus(),
            "max_actions_per_traj": cfg_template.max_actions_per_traj,
            "max_turn": self.sys_config.agent_proxy.max_turn,
        }

    @staticmethod
    def _init_one_env(env_id, sys_config, group_size, tag):
        """初始化单个环境（用于并行处理）"""
        manager = EnvStateManager.__new__(EnvStateManager)
        manager.sys_config = sys_config
        manager.group_size = group_size
        return manager._create_single_env(env_id, tag)

    def _init_env_instances(self) -> List[Dict]:
        """串行初始化环境实例"""
        env_list = []
        done_groups = 0
        config = self.config
        for tag, n_group in zip(config.env_configs.tags, config.env_configs.n_groups):
            for env_id in range(
                done_groups * self.group_size, (done_groups + n_group) * self.group_size
            ):
                env_list.append(self._create_single_env(env_id, tag))
            done_groups += n_group
        return env_list

    def _calculate_think_content_penalty(self, think_content: str) -> float:
        """计算基于think_content长度的惩罚"""
        # if think_content is None:
        #     return 0.0
        # length = len(think_content)
        # return min(150.0, max(0, 300 - length)) if length < 300 else 0.0
        return 0.0

    @staticmethod
    def _create_env_config(cfg_template, env_class, sys_config):
        """创建环境配置，合并envs.yaml和全局配置"""
        params = cfg_template.env_config or {}

        # 从全局agent_proxy配置读取通用参数
        agent_proxy = sys_config.agent_proxy
        params.update(
            {
                "max_turn": agent_proxy.max_turn,
                "mid_turn_ratio": agent_proxy.mid_turn_ratio,
                "max_actions_per_turn": agent_proxy.max_actions_per_turn,
            }
        )

        # 传递环境特定配置字段
        for attr in [
            "env_instruction",
            "mid_turn_instruction",
            "final_turn_instruction",
            "tools",
            "stage_tools",
            "data_root",
        ]:
            if hasattr(cfg_template, attr):
                params[attr] = getattr(cfg_template, attr)
        return REGISTERED_ENV_CONFIGS[env_class](**params)

    def _execute_actions(self, env, actions: List[str], turn_number: int = 0) -> tuple:
        """执行动作列表"""
        acc_process_reward, acc_result_reward = 0, 0
        turn_info, turn_done = {}, False
        executed_actions = []
        action_summaries = []  # 收集动作摘要

        # 处理空动作列表
        if not actions:
            empty_action = json.dumps({"tool_call": "empty", "args": {}})
            actions = [empty_action]
            turn_info["empty_action"] = True
            print("警告: 传入的动作列表为空，添加empty动作")

        for action in actions:
            action_summary, reward, done, info = env.step(
                action, turn_number=turn_number
            )
            action_summaries.append(action_summary)  # 收集动作摘要

            # 处理数组格式的奖励（reward必须是[process_reward, result_reward]）
            if isinstance(reward, list) and len(reward) == 2:
                process_reward, result_reward = reward
                acc_process_reward += process_reward
                acc_result_reward += result_reward

                # 【优化】批量更新，减少字典操作次数
                info["action_process_reward"] = process_reward
                info["action_result_reward"] = result_reward
            else:
                # 如果不是预期格式，抛出错误
                raise ValueError(
                    f"期望奖励格式为[process_reward, result_reward]，但得到: {reward}"
                )

            # 【优化】合并info到turn_info（减少update调用）
            turn_info = {**turn_info, **info}
            
            # 只记录非empty动作
            if not (len(actions) == 1 and turn_info.get("empty_action")):
                executed_actions.append(action)
            if done:
                turn_done = True
                break

        # 【优化】一次性添加累积奖励（批量更新）
        turn_info["turn_process_reward"] = acc_process_reward
        turn_info["turn_result_reward"] = acc_result_reward

        # 打印动作摘要（不存储到turn_info中）
        if action_summaries:
            for summary in action_summaries:
                print(f"[动作执行摘要] {summary}")

        return (
            acc_process_reward,
            acc_result_reward,
            turn_info,
            turn_done,
            executed_actions,
            action_summaries,
        )

    def _extract_map_valid_actions(self, entry: Dict, actions: List[str]) -> List[str]:
        """从动作查找表中提取有效动作"""
        action_lookup = getattr(entry["env"].config, "action_lookup", None)
        if action_lookup is None:
            return actions

        rev_action_lookup = {v.lower(): k for k, v in action_lookup.items()}
        return [
            rev_action_lookup[action.lower()]
            for action in actions
            if action.lower() in rev_action_lookup
        ]

    def _handle_mm_state(self, state: Union[str, np.ndarray, list[np.ndarray]]):
        """处理多模态状态"""
        if isinstance(state, str):
            return state
        elif isinstance(state, np.ndarray):
            state = [state]
        return [PIL.Image.fromarray(_state, mode="RGB") for _state in state]

    def _update_cache_history(
        self,
        history: List[Dict],
        next_state,
        actions_left,
        num_actions_info: Optional[Dict] = None,
    ) -> List[Dict]:
        """更新缓存历史"""
        if num_actions_info is not None:
            assert len(history), "History should not be empty"
            history[-1].update(num_actions_info)

        entry = {"actions_left": actions_left}
        if isinstance(next_state, str):
            entry["state"] = next_state
        else:
            entry["state"] = "<images>" * len(next_state)
            entry["images"] = next_state

        history.append(entry)
        return history

    def _log_env_state(
        self,
        status: EnvStatus,
        history: List[Dict],
        cur_obs,
        max_actions_per_traj: int,
        executed_actions: List[str],
        process_reward: float,
        result_reward: float,
        turn_done: bool,
        turn_info: Dict,
        env_input: Dict,
        action_summaries: List[str] = None,
    ) -> tuple:
        """记录环境状态"""
        obs = self._handle_mm_state(cur_obs)
        status.num_actions += len(executed_actions)

        # 【优化】使用add_rewards方法，自动管理历史长度
        status.add_rewards(process_reward, result_reward)

        actions_left = max_actions_per_traj - status.num_actions

        if turn_done:
            status.terminated = True
            status.truncated = not turn_info.get("success", False)
            # 更新最后一步信息
            if history:
                update_data = {
                    "actions": executed_actions,
                    "process_reward": process_reward,  # 过程奖励
                    "result_reward": result_reward,  # 结果奖励
                    "info": turn_info,
                    "llm_response": env_input["llm_response"],
                    "llm_raw_response": env_input["llm_raw_response"],
                }
                # 添加action_summaries到history
                if action_summaries:
                    update_data["action_summaries"] = action_summaries
                history[-1].update(update_data)
        else:
            # 添加新状态
            history_data = {
                "actions": executed_actions,
                "process_reward": process_reward,  # 过程奖励
                "result_reward": result_reward,  # 结果奖励
                "info": turn_info,
                "llm_response": env_input["llm_response"],
                "llm_raw_response": env_input["llm_raw_response"],
            }
            # 添加action_summaries到history
            if action_summaries:
                history_data["action_summaries"] = action_summaries
            history = self._update_cache_history(
                history, obs, actions_left, history_data
            )

        return status, history

    def _process_single_env_step(self, env_input: Dict, entry: Dict) -> tuple:
        """处理单个环境的步进"""
        env_id, env = entry["env_id"], entry["env"]
        actions_left_before = (
            entry["max_actions_per_traj"] - entry["status"].num_actions
        )

        # 检查格式有效性
        if not env_input.get("is_format_valid", True):
            print(f"环境 {env_id}: LLM响应格式不正确，跳过动作执行")
            format_invalid_info = {
                "format_invalid": True,
                "turn_process_reward": -100.0,
                "turn_result_reward": 0.0,
            }
            return -100.0, 0.0, format_invalid_info, True, [], []

        # 执行动作
        valid_actions = self._extract_map_valid_actions(entry, env_input["actions"])
        (
            process_reward,
            result_reward,
            turn_info,
            turn_done,
            executed_actions,
            action_summaries,
        ) = self._execute_actions(
            env, valid_actions[:actions_left_before], env_input.get("turn_number", 0)
        )

        # 检查动作有效性惩罚
        if len(valid_actions) != len(env_input["actions"]) or not valid_actions:
            penalty_key = env_input.get("env_id")
            if penalty_key is not None and hasattr(self, "rollout_cache"):
                self.rollout_cache[penalty_key][
                    "penalty"
                ] += self.sys_config.es_manager.format_penalty

        return (
            process_reward,
            result_reward,
            turn_info,
            turn_done,
            executed_actions,
            action_summaries,
        )

    def _apply_think_content_penalty(
        self,
        env_input: Dict,
        env_id: int,
        turn_info: Dict,
        process_reward: float,
        result_reward: float,
    ) -> tuple:
        """应用思考内容长度惩罚"""
        if (
            "think_content" in env_input
            and env_input["think_content"] is not None
            and env_input.get("is_format_valid", True)
        ):

            think_content = env_input["think_content"]
            think_penalty = self._calculate_think_content_penalty(think_content)

            if think_penalty > 0:
                if hasattr(self, "rollout_cache"):
                    self.rollout_cache[env_id]["penalty"] += think_penalty
                print(
                    f"环境 {env_id}: think_content 长度 {len(think_content)}，惩罚 {think_penalty:.2f}"
                )
                process_reward = -think_penalty  # 思考内容惩罚归为过程惩罚

            turn_info.update(
                {
                    "think_content_length": len(think_content),
                    "think_content_penalty": think_penalty,
                }
            )

        return process_reward, result_reward

    def reset(self, seed: Optional[int] = None, global_step: Optional[int] = None):
        """重置环境并获取初始观察
        
        参数说明：
            seed: 可选的种子值（优先级最高）
            global_step: 可选的全局步数（用于训练中断恢复）
        
        【BUG修复】解决两个关键问题：
        1. 训练中断恢复：通过global_step确保相同step总是使用相同seed
        2. 验证环境一致性：val模式使用固定seed，不随调用次数变化
        
        Seed生成逻辑：
        - Train模式：
            * 如果提供global_step：seed = base_seed + global_step（可复现）
            * 如果提供seed：直接使用（测试用）
            * 否则：随机seed（向后兼容）
        - Val模式：
            * 如果提供seed：直接使用
            * 否则：使用固定的base_seed或默认值123（保证一致性）
        """
        # ============ Train模式：支持训练中断恢复 ============
        if self.mode == "train":
            if seed is not None:
                # 显式传入seed，直接使用（最高优先级，用于测试）
                print(f"[ESManager] TRAIN模式使用显式seed: {seed}")
            elif global_step is not None:
                # 【BUG修复1】使用global_step计算seed，确保训练中断恢复后seed一致
                if self.base_seed is not None:
                    seed = self.base_seed + global_step
                else:
                    seed = global_step
                print(f"[ESManager] TRAIN模式使用global_step={global_step}计算seed: {seed}")
            else:
                # 向后兼容：没有提供global_step时使用随机seed
                if self.base_seed is not None:
                    seed = self.base_seed + self.seed_counter
                    self.seed_counter += 1
                    print(f"[ESManager] TRAIN模式使用配置seed（向后兼容）: {seed}")
                else:
                    seed = random.randint(0, 1000000)
                    print(f"[ESManager] TRAIN模式使用随机seed（向后兼容）: {seed}")
        
        # ============ Val模式：保证验证环境一致性 ============
        else:  # val mode
            if seed is not None:
                # 显式传入seed，直接使用
                print(f"[ESManager] VAL模式使用显式seed: {seed}")
            else:
                # 【BUG修复2】val模式使用固定seed，不使用seed_counter
                # 确保不管在step 10还是step 20评估，都得到相同的验证环境
                seed = self.base_seed if self.base_seed is not None else 123
                print(f"[ESManager] VAL模式使用固定seed: {seed} (保证一致性)")

        # 扩展种子到所有环境
        # 同一group内的环境使用相同seed，不同group使用递增的seed
        seeds = [[seed + i] * self.group_size for i in range(self.env_groups)]
        seeds = sum(seeds, [])

        # 创建rollout缓存
        self.rollout_cache = [
            {
                "env_id": entry["env_id"],
                "history": [],
                "group_id": entry["group_id"],
                "tag": entry["tag"],
                "penalty": 0,
            }
            for entry in self.envs
        ]

        # 重置所有环境并更新缓存
        for seed, entry, cache in zip(seeds, self.envs, self.rollout_cache):
            entry["env"].reset(seed=seed, mode=self.mode)
            entry["status"] = EnvStatus(seed=seed)
            next_state = self._handle_mm_state(entry["env"].render())
            cache["history"] = self._update_cache_history(
                cache["history"], next_state, entry["max_actions_per_traj"]
            )
            # 【BUG修复】立即更新env_info到cache，以便测试和调试时能获取环境信息
            try:
                cache["env_info"] = entry["env"].get_env_info()
            except Exception as e:
                print(f"    [ESManager] 更新env_info失败 (env_id: {entry['env_id']}): {e}")

        return self.rollout_cache

    def _should_use_parallel(self, n_envs: int) -> bool:
        """
        动态判断是否应该使用并行处理
        
        【优化】根据CPU核心数动态调整阈值，小批量也能利用多核
        
        Args:
            n_envs: 环境数量
            
        Returns:
            是否应该使用并行处理
        """
        # 动态阈值：根据CPU核心数
        if self.n_cpu >= 16:
            threshold = 4  # 高性能服务器：4个环境就并行
        elif self.n_cpu >= 8:
            threshold = 8  # 普通服务器：8个环境就并行
        else:
            threshold = 16  # 低配机器：16个环境才并行
        
        global use_parallel
        return use_parallel and n_envs >= threshold
    
    def step(self, all_env_inputs: List[Dict]):
        """环境步进处理
        
        【优化】使用动态阈值判断，小批量也能并行处理
        """
        if self._should_use_parallel(len(all_env_inputs)):
            return self._step_parallel(all_env_inputs)
        return self._step_sequential(all_env_inputs)

    def _get_optimal_pool_size(self, n_tasks: int) -> int:
        """
        获取最优的进程池大小
        
        【优化】动态调整进程数，提升CPU利用率20-40%
        
        Args:
            n_tasks: 任务数量
            
        Returns:
            最优进程数
        """
        # 最大进程数：CPU核心数的75%（留25%给系统）
        max_processes = max(1, int(self.n_cpu * 0.75))
        # 实际进程数：不超过任务数（避免空闲进程）
        optimal = min(max_processes, n_tasks)
        # 最少2个进程（如果有多个任务）
        return max(2, optimal) if n_tasks >= 2 else 1
    
    def _step_parallel(self, all_env_inputs: List[Dict]) -> List[Dict]:
        """并行处理环境步进
        
        【优化】动态调整进程池大小，提升并行效率
        """
        parallel_env_inputs = [
            {
                **env_input,
                "env": self.envs[env_input["env_id"]],
                "rollout_cache_single": self.rollout_cache[env_input["env_id"]],
            }
            for env_input in all_env_inputs
        ]

        func = partial(self._step_env_static, sys_config=self.sys_config)
        
        # 【优化】使用动态计算的进程数，而不是固定的n_cpu//8
        pool_size = self._get_optimal_pool_size(len(parallel_env_inputs))
        with multiprocessing.Pool(pool_size) as pool:
            results = pool.map(func, parallel_env_inputs)

        env_outputs = []
        for turn_done, env, rollout_cache in results:
            self.envs[env["env_id"]] = env
            self.rollout_cache[env["env_id"]] = rollout_cache
            if not turn_done:
                env_outputs.append(rollout_cache)
        return env_outputs

    def _step_sequential(self, all_env_inputs: List[Dict]) -> List[Dict]:
        """串行处理环境步进"""
        env_outputs = []

        for env_input in all_env_inputs:
            entry = self.envs[env_input["env_id"]]
            env_id = entry["env_id"]

            # 处理单个环境步进
            (
                process_reward,
                result_reward,
                turn_info,
                turn_done,
                executed_actions,
                action_summaries,
            ) = self._process_single_env_step(env_input, entry)

            # 应用思考内容惩罚
            process_reward, result_reward = self._apply_think_content_penalty(
                env_input, env_id, turn_info, process_reward, result_reward
            )

            # 记录环境状态（传入action_summaries）
            status, history = self._log_env_state(
                entry["status"],
                self.rollout_cache[env_id]["history"],
                entry["env"].render(),
                entry["max_actions_per_traj"],
                executed_actions,
                process_reward,
                result_reward,
                turn_done,
                turn_info,
                env_input,
                action_summaries,
            )

            # 更新状态和缓存
            entry["status"] = status

            self.rollout_cache[env_id]["history"] = history
            if not turn_done:
                env_outputs.append(self.rollout_cache[env_id])

        return env_outputs

    @staticmethod
    def _step_env_static(env_input, sys_config):
        """静态方法：处理单个环境步进（用于并行处理）"""
        # 创建临时管理器实例来使用实例方法
        manager = EnvStateManager.__new__(EnvStateManager)
        manager.sys_config = sys_config

        entry = env_input["env"]
        env_id, env = entry["env_id"], entry["env"]
        rollout_cache_single = env_input["rollout_cache_single"]

        # 处理步进
        (
            process_reward,
            result_reward,
            turn_info,
            turn_done,
            executed_actions,
            action_summaries,
        ) = manager._process_single_env_step(env_input, entry)

        # 应用思考内容惩罚
        if (
            "think_content" in env_input
            and env_input["think_content"] is not None
            and env_input.get("is_format_valid", True)
        ):

            think_content = env_input["think_content"]
            think_penalty = manager._calculate_think_content_penalty(think_content)

            if think_penalty > 0:
                rollout_cache_single["penalty"] += think_penalty
                process_reward = -think_penalty  # 思考内容惩罚归为过程惩罚

            turn_info.update(
                {
                    "think_content_length": len(think_content),
                    "think_content_penalty": think_penalty,
                }
            )

        # 记录状态
        status, history = manager._log_env_state(
            entry["status"],
            rollout_cache_single["history"],
            entry["env"].render(),
            entry["max_actions_per_traj"],
            executed_actions,
            process_reward,
            result_reward,
            turn_done,
            turn_info,
            env_input,
            action_summaries,
        )

        # 更新最终状态
        entry["status"] = status
        if (
            entry["status"].num_actions >= entry["max_actions_per_traj"]
            and not turn_done
        ):
            entry["status"].truncated = entry["status"].terminated = True
            turn_done = True

        rollout_cache_single["history"] = history
        return turn_done, entry, rollout_cache_single

    def get_rollout_states(self, control_parameter_u: float = None):
        """获取所有环境的最终输出"""
        # 定义指标类型
        TURN_LVL_METRICS = [
            "action_is_effective",
            "action_is_valid",
            "think_content_length",
            "think_content_penalty",
        ]
        SUM_METRICS = [
            "process_reward",
            "result_reward",
            "total_process_reward",
            "total_result_reward",
            "total_mixed_reward",
        ]
        STATE_METRICS = {
            "top_1": 0.0,
            "top_3": 0.0,
            "top_5": 0.0,
            "root_cause_rank": 999,
            "success": 0.0,
            "turn_number": 0,
        }

        # 处理每个环境的指标
        for entry, cache in zip(self.envs, self.rollout_cache):
            self._update_env_info(entry, cache)
            env_metric = self._calculate_env_metrics(
                entry,
                cache,
                TURN_LVL_METRICS,
                SUM_METRICS,
                STATE_METRICS,
                control_parameter_u,
            )
            cache["metrics"] = {f"{entry['tag']}/{k}": v for k, v in env_metric.items()}

            if entry["tag"] == "MetamathQA":
                cache["correct_answer"] = entry["env"].correct_answer

        # 计算并添加MRR指标
        self._calculate_and_add_mrr()
        return self.rollout_cache

    def _update_env_info(self, entry: Dict, cache: Dict):
        """更新环境信息"""
        try:
            cache["env_info"] = entry["env"].get_env_info()
        except Exception as e:
            print(f"更新env_info失败 (env_id: {entry['env_id']}): {e}")

    def _calculate_env_metrics(
        self,
        entry: Dict,
        cache: Dict,
        turn_lvl_metrics: List,
        sum_metrics: List,
        state_metrics: Dict,
        control_parameter_u: float = None,
    ) -> Dict:
        """计算环境指标"""
        status = entry["status"]
        env_metric = {
            "success": float(status.terminated and not status.truncated),
            "num_actions": status.num_actions,
        }

        # 添加分离的奖励指标
        total_process_reward = 0.0
        total_result_reward = 0.0

        if status.process_rewards:
            total_process_reward = np.sum(status.process_rewards)
            env_metric["total_process_reward"] = total_process_reward
        if status.result_rewards:
            total_result_reward = np.sum(status.result_rewards)
            env_metric["total_result_reward"] = total_result_reward

        # 计算混合奖励（如果提供了控制参数u）
        if control_parameter_u is not None:
            total_mixed_reward = (
                total_process_reward * control_parameter_u
                + total_result_reward * (1.0 - control_parameter_u)
            )
            env_metric["total_mixed_reward"] = total_mixed_reward
            env_metric["control_parameter_u"] = (
                control_parameter_u  # 记录使用的控制参数
            )
            print(f"  🎯 环境 {entry['env_id']}: 混合奖励 = {total_mixed_reward:.4f}")
            print(
                f"     - 过程奖励: {total_process_reward:.4f} × {control_parameter_u:.4f} = {total_process_reward * control_parameter_u:.4f}"
            )
            print(
                f"     - 结果奖励: {total_result_reward:.4f} × {1.0 - control_parameter_u:.4f} = {total_result_reward * (1.0 - control_parameter_u):.4f}"
            )
        else:
            # 如果没有控制参数，则使用过程奖励作为默认值（向后兼容）
            env_metric["total_mixed_reward"] = (
                total_process_reward + total_result_reward
            )

        # 收集自定义指标
        custom_metric = {}
        case_query_stats = {"case_query_calls": 0, "case_query_recall_at_1": 0, "case_query_recall_at_3": 0, "case_query_recall_at_5": 0}

        for turn in cache["history"]:
            for k, v in turn.get("info", {}).items():
                if k != "success":
                    # 跳过列表、字典等复杂类型（如explored_components、checked_components等）
                    if isinstance(v, (list, dict)):
                        continue
                    try:
                        custom_metric.setdefault(k, []).append(float(v))
                    except (ValueError, TypeError):
                        # 如果无法转换为float，跳过该字段
                        continue

            # 从history中收集分离的奖励信息
            if "process_reward" in turn:
                custom_metric.setdefault("process_reward", []).append(
                    float(turn["process_reward"])
                )
            if "result_reward" in turn:
                custom_metric.setdefault("result_reward", []).append(
                    float(turn["result_reward"])
                )

            # 注意：case_query_calls统计已移至统一的case_query_history统计中，避免重复统计

            # 修复BUG：从entry['env']直接获取正确的case_query_history统计
            # 只统计有效记录（有turn信息的记录），避免重复统计
            env_obj = entry['env']
            if hasattr(env_obj, 'get_env_info'):
                try:
                    env_info = env_obj.get_env_info()
                    case_query_history = env_info.get("case_query_history", [])
                    if case_query_history:
                        # 只统计有效记录（有turn信息的记录）
                        valid_records = [record for record in case_query_history if record.get("turn") is not None]

                        # 统计实际的查询次数
                        case_query_stats["case_query_calls"] = len(valid_records)

                        # 从有效记录中获取recall统计
                        for query in valid_records:
                            recall_stats = query.get("recall_at_k", {})
                            case_query_stats["case_query_recall_at_1"] += recall_stats.get("recall_at_1", 0)
                            case_query_stats["case_query_recall_at_3"] += recall_stats.get("recall_at_3", 0)
                            case_query_stats["case_query_recall_at_5"] += recall_stats.get("recall_at_5", 0)
                except Exception as e:
                    # 如果获取失败，跳过统计但不影响整体流程
                    print(f"获取case_query统计失败: {e}")
                    pass

        # 处理指标类型
        for k, v in custom_metric.items():
            if k in turn_lvl_metrics or ("Webshop" in k and k in turn_lvl_metrics):
                env_metric[k] = np.sum(v) / max(1, len(v))
            elif k in sum_metrics or k in [
                "process_reward",
                "result_reward",
            ]:  # 奖励类指标使用求和
                env_metric[k] = np.sum(v)
            elif k in state_metrics:
                env_metric[k] = v[-1] if v else state_metrics[k]
            else:
                env_metric[k] = np.sum(v)

        # 添加缺失的状态指标默认值
        for k, default_val in state_metrics.items():
            if k not in env_metric:
                env_metric[k] = default_val

        # 添加case_query_tool统计指标
        if case_query_stats["case_query_calls"] > 0:
            env_metric[f"{entry['tag']}/case_query_calls"] = case_query_stats["case_query_calls"]
            # 计算平均recall值
            env_metric[f"{entry['tag']}/case_query_avg_recall_at_1"] = (
                case_query_stats["case_query_recall_at_1"] / case_query_stats["case_query_calls"]
            )
            env_metric[f"{entry['tag']}/case_query_avg_recall_at_3"] = (
                case_query_stats["case_query_recall_at_3"] / case_query_stats["case_query_calls"]
            )
            env_metric[f"{entry['tag']}/case_query_avg_recall_at_5"] = (
                case_query_stats["case_query_recall_at_5"] / case_query_stats["case_query_calls"]
            )

        # 更新历史指标
        if cache["history"]:
            cache["history"][-1]["metrics"] = custom_metric

        return env_metric

    def _calculate_and_add_mrr(self):
        """计算并添加MRR指标（按tag分组计算）"""
        # 按tag分组收集reciprocal_ranks
        tag_reciprocal_ranks = {}
        for entry, cache in zip(self.envs, self.rollout_cache):
            tag = entry['tag']
            rank_key = f"{tag}/root_cause_rank"
            if rank_key in cache["metrics"]:
                rank = cache["metrics"][rank_key]
                reciprocal_rank = 1.0 / rank if 0 < rank < 999 else 0.0
                if tag not in tag_reciprocal_ranks:
                    tag_reciprocal_ranks[tag] = []
                tag_reciprocal_ranks[tag].append(reciprocal_rank)

        # 为每个tag分别计算MRR
        tag_mrr_values = {}
        for tag, reciprocal_ranks in tag_reciprocal_ranks.items():
            tag_mrr_values[tag] = np.mean(reciprocal_ranks) if reciprocal_ranks else 0.0

        # 将对应的MRR值赋给对应tag的环境
        for entry, cache in zip(self.envs, self.rollout_cache):
            tag = entry['tag']
            mrr_value = tag_mrr_values.get(tag, 0.0)
            cache["metrics"][f"{tag}/mrr"] = float(mrr_value)

    def render(self):
        """渲染所有环境"""
        return [entry["env"].render() for entry in self.envs]

    def close(self):
        """关闭所有环境"""
        for entry in self.envs:
            entry["env"].close()
